Merge pull request #943 from liangyongxiang/fcitx5-gtk
[gentoo-zh.git] / sys-kernel / xanmod-hybird / files / patch-5.12.6-xanmod1-cacule
bloba85bb7280a3107798632891b1a8feb3ef41eac23
1 diff --git a/.config b/.config
2 new file mode 100644
3 index 000000000000..17bfc3494ad9
4 --- /dev/null
5 +++ b/.config
6 @@ -0,0 +1,11069 @@
7 +#
8 +# Automatically generated file; DO NOT EDIT.
9 +# Linux/x86 5.12.6 Kernel Configuration
11 +CONFIG_CC_VERSION_TEXT="gcc-11 (Debian 11.1.0-1) 11.1.0"
12 +CONFIG_CC_IS_GCC=y
13 +CONFIG_GCC_VERSION=110100
14 +CONFIG_CLANG_VERSION=0
15 +CONFIG_LD_IS_BFD=y
16 +CONFIG_LD_VERSION=23502
17 +CONFIG_LLD_VERSION=0
18 +CONFIG_CC_CAN_LINK=y
19 +CONFIG_CC_CAN_LINK_STATIC=y
20 +CONFIG_CC_HAS_ASM_GOTO=y
21 +CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y
22 +CONFIG_CC_HAS_ASM_INLINE=y
23 +CONFIG_IRQ_WORK=y
24 +CONFIG_BUILDTIME_TABLE_SORT=y
25 +CONFIG_THREAD_INFO_IN_TASK=y
28 +# General setup
30 +CONFIG_INIT_ENV_ARG_LIMIT=32
31 +# CONFIG_COMPILE_TEST is not set
32 +CONFIG_LOCALVERSION=""
33 +# CONFIG_LOCALVERSION_AUTO is not set
34 +CONFIG_BUILD_SALT=""
35 +CONFIG_HAVE_KERNEL_GZIP=y
36 +CONFIG_HAVE_KERNEL_BZIP2=y
37 +CONFIG_HAVE_KERNEL_LZMA=y
38 +CONFIG_HAVE_KERNEL_XZ=y
39 +CONFIG_HAVE_KERNEL_LZO=y
40 +CONFIG_HAVE_KERNEL_LZ4=y
41 +CONFIG_HAVE_KERNEL_ZSTD=y
42 +# CONFIG_KERNEL_GZIP is not set
43 +# CONFIG_KERNEL_BZIP2 is not set
44 +# CONFIG_KERNEL_LZMA is not set
45 +# CONFIG_KERNEL_XZ is not set
46 +# CONFIG_KERNEL_LZO is not set
47 +# CONFIG_KERNEL_LZ4 is not set
48 +CONFIG_KERNEL_ZSTD=y
49 +CONFIG_DEFAULT_INIT=""
50 +CONFIG_DEFAULT_HOSTNAME="(none)"
51 +CONFIG_SWAP=y
52 +CONFIG_SYSVIPC=y
53 +CONFIG_SYSVIPC_SYSCTL=y
54 +CONFIG_POSIX_MQUEUE=y
55 +CONFIG_POSIX_MQUEUE_SYSCTL=y
56 +CONFIG_WATCH_QUEUE=y
57 +CONFIG_CROSS_MEMORY_ATTACH=y
58 +CONFIG_USELIB=y
59 +CONFIG_AUDIT=y
60 +CONFIG_HAVE_ARCH_AUDITSYSCALL=y
61 +CONFIG_AUDITSYSCALL=y
64 +# IRQ subsystem
66 +CONFIG_GENERIC_IRQ_PROBE=y
67 +CONFIG_GENERIC_IRQ_SHOW=y
68 +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y
69 +CONFIG_GENERIC_PENDING_IRQ=y
70 +CONFIG_GENERIC_IRQ_MIGRATION=y
71 +CONFIG_HARDIRQS_SW_RESEND=y
72 +CONFIG_GENERIC_IRQ_CHIP=y
73 +CONFIG_IRQ_DOMAIN=y
74 +CONFIG_IRQ_DOMAIN_HIERARCHY=y
75 +CONFIG_GENERIC_MSI_IRQ=y
76 +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y
77 +CONFIG_IRQ_MSI_IOMMU=y
78 +CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y
79 +CONFIG_GENERIC_IRQ_RESERVATION_MODE=y
80 +CONFIG_IRQ_FORCED_THREADING=y
81 +CONFIG_SPARSE_IRQ=y
82 +# CONFIG_GENERIC_IRQ_DEBUGFS is not set
83 +# end of IRQ subsystem
85 +CONFIG_CLOCKSOURCE_WATCHDOG=y
86 +CONFIG_ARCH_CLOCKSOURCE_INIT=y
87 +CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y
88 +CONFIG_GENERIC_TIME_VSYSCALL=y
89 +CONFIG_GENERIC_CLOCKEVENTS=y
90 +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
91 +CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y
92 +CONFIG_GENERIC_CMOS_UPDATE=y
93 +CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y
94 +CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y
97 +# Timers subsystem
99 +CONFIG_TICK_ONESHOT=y
100 +CONFIG_NO_HZ_COMMON=y
101 +# CONFIG_HZ_PERIODIC is not set
102 +# CONFIG_NO_HZ_IDLE is not set
103 +CONFIG_NO_HZ_FULL=y
104 +CONFIG_CONTEXT_TRACKING=y
105 +# CONFIG_CONTEXT_TRACKING_FORCE is not set
106 +# CONFIG_NO_HZ is not set
107 +CONFIG_HIGH_RES_TIMERS=y
108 +# end of Timers subsystem
110 +# CONFIG_PREEMPT_NONE is not set
111 +# CONFIG_PREEMPT_VOLUNTARY is not set
112 +CONFIG_PREEMPT=y
113 +CONFIG_PREEMPT_COUNT=y
114 +CONFIG_PREEMPTION=y
115 +CONFIG_PREEMPT_DYNAMIC=y
118 +# CPU/Task time and stats accounting
120 +CONFIG_VIRT_CPU_ACCOUNTING=y
121 +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y
122 +# CONFIG_IRQ_TIME_ACCOUNTING is not set
123 +CONFIG_BSD_PROCESS_ACCT=y
124 +CONFIG_BSD_PROCESS_ACCT_V3=y
125 +CONFIG_TASKSTATS=y
126 +CONFIG_TASK_DELAY_ACCT=y
127 +CONFIG_TASK_XACCT=y
128 +CONFIG_TASK_IO_ACCOUNTING=y
129 +CONFIG_PSI=y
130 +CONFIG_PSI_DEFAULT_DISABLED=y
131 +# end of CPU/Task time and stats accounting
133 +CONFIG_CPU_ISOLATION=y
136 +# RCU Subsystem
138 +CONFIG_TREE_RCU=y
139 +CONFIG_PREEMPT_RCU=y
140 +CONFIG_RCU_EXPERT=y
141 +CONFIG_SRCU=y
142 +CONFIG_TREE_SRCU=y
143 +CONFIG_TASKS_RCU_GENERIC=y
144 +CONFIG_TASKS_RCU=y
145 +CONFIG_TASKS_TRACE_RCU=y
146 +CONFIG_RCU_STALL_COMMON=y
147 +CONFIG_RCU_NEED_SEGCBLIST=y
148 +CONFIG_RCU_FANOUT=64
149 +CONFIG_RCU_FANOUT_LEAF=16
150 +# CONFIG_RCU_FAST_NO_HZ is not set
151 +CONFIG_RCU_BOOST=y
152 +CONFIG_RCU_BOOST_DELAY=0
153 +CONFIG_RCU_NOCB_CPU=y
154 +# CONFIG_TASKS_TRACE_RCU_READ_MB is not set
155 +# end of RCU Subsystem
157 +CONFIG_BUILD_BIN2C=y
158 +# CONFIG_IKCONFIG is not set
159 +CONFIG_IKHEADERS=m
160 +CONFIG_LOG_BUF_SHIFT=18
161 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12
162 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13
163 +CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
166 +# Scheduler features
168 +CONFIG_UCLAMP_TASK=y
169 +CONFIG_UCLAMP_BUCKETS_COUNT=5
170 +# end of Scheduler features
172 +CONFIG_CACULE_SCHED=y
173 +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
174 +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y
175 +CONFIG_CC_HAS_INT128=y
176 +CONFIG_ARCH_SUPPORTS_INT128=y
177 +CONFIG_NUMA_BALANCING=y
178 +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y
179 +CONFIG_CGROUPS=y
180 +CONFIG_PAGE_COUNTER=y
181 +CONFIG_MEMCG=y
182 +CONFIG_MEMCG_SWAP=y
183 +CONFIG_MEMCG_KMEM=y
184 +CONFIG_BLK_CGROUP=y
185 +CONFIG_CGROUP_WRITEBACK=y
186 +CONFIG_CGROUP_SCHED=y
187 +CONFIG_FAIR_GROUP_SCHED=y
188 +CONFIG_CFS_BANDWIDTH=y
189 +# CONFIG_RT_GROUP_SCHED is not set
190 +CONFIG_UCLAMP_TASK_GROUP=y
191 +CONFIG_CGROUP_PIDS=y
192 +CONFIG_CGROUP_RDMA=y
193 +CONFIG_CGROUP_FREEZER=y
194 +CONFIG_CGROUP_HUGETLB=y
195 +CONFIG_CPUSETS=y
196 +CONFIG_PROC_PID_CPUSET=y
197 +CONFIG_CGROUP_DEVICE=y
198 +CONFIG_CGROUP_CPUACCT=y
199 +CONFIG_CGROUP_PERF=y
200 +CONFIG_CGROUP_BPF=y
201 +# CONFIG_CGROUP_DEBUG is not set
202 +CONFIG_SOCK_CGROUP_DATA=y
203 +CONFIG_NAMESPACES=y
204 +CONFIG_UTS_NS=y
205 +CONFIG_TIME_NS=y
206 +CONFIG_IPC_NS=y
207 +CONFIG_USER_NS=y
208 +CONFIG_PID_NS=y
209 +CONFIG_NET_NS=y
210 +CONFIG_CHECKPOINT_RESTORE=y
211 +CONFIG_SCHED_AUTOGROUP=y
212 +CONFIG_SCHED_AUTOGROUP_DEFAULT_ENABLED=y
213 +# CONFIG_SYSFS_DEPRECATED is not set
214 +CONFIG_RELAY=y
215 +CONFIG_BLK_DEV_INITRD=y
216 +CONFIG_INITRAMFS_SOURCE=""
217 +CONFIG_RD_GZIP=y
218 +CONFIG_RD_BZIP2=y
219 +CONFIG_RD_LZMA=y
220 +CONFIG_RD_XZ=y
221 +CONFIG_RD_LZO=y
222 +CONFIG_RD_LZ4=y
223 +CONFIG_RD_ZSTD=y
224 +CONFIG_BOOT_CONFIG=y
225 +# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE is not set
226 +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3=y
227 +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
228 +CONFIG_LD_ORPHAN_WARN=y
229 +CONFIG_SYSCTL=y
230 +CONFIG_HAVE_UID16=y
231 +CONFIG_SYSCTL_EXCEPTION_TRACE=y
232 +CONFIG_HAVE_PCSPKR_PLATFORM=y
233 +CONFIG_BPF=y
234 +CONFIG_EXPERT=y
235 +CONFIG_UID16=y
236 +CONFIG_MULTIUSER=y
237 +CONFIG_SGETMASK_SYSCALL=y
238 +CONFIG_SYSFS_SYSCALL=y
239 +CONFIG_FHANDLE=y
240 +CONFIG_POSIX_TIMERS=y
241 +CONFIG_PRINTK=y
242 +CONFIG_PRINTK_NMI=y
243 +CONFIG_BUG=y
244 +CONFIG_ELF_CORE=y
245 +CONFIG_PCSPKR_PLATFORM=y
246 +CONFIG_BASE_FULL=y
247 +CONFIG_FUTEX=y
248 +CONFIG_FUTEX2=y
249 +CONFIG_FUTEX_PI=y
250 +CONFIG_EPOLL=y
251 +CONFIG_SIGNALFD=y
252 +CONFIG_TIMERFD=y
253 +CONFIG_EVENTFD=y
254 +CONFIG_SHMEM=y
255 +CONFIG_AIO=y
256 +CONFIG_IO_URING=y
257 +CONFIG_ADVISE_SYSCALLS=y
258 +CONFIG_HAVE_ARCH_USERFAULTFD_WP=y
259 +CONFIG_MEMBARRIER=y
260 +CONFIG_KALLSYMS=y
261 +CONFIG_KALLSYMS_ALL=y
262 +CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y
263 +CONFIG_KALLSYMS_BASE_RELATIVE=y
264 +CONFIG_BPF_SYSCALL=y
265 +CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y
266 +CONFIG_BPF_JIT_ALWAYS_ON=y
267 +CONFIG_BPF_JIT_DEFAULT_ON=y
268 +CONFIG_USERMODE_DRIVER=y
269 +# CONFIG_BPF_PRELOAD is not set
270 +CONFIG_USERFAULTFD=y
271 +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y
272 +CONFIG_KCMP=y
273 +CONFIG_RSEQ=y
274 +# CONFIG_DEBUG_RSEQ is not set
275 +# CONFIG_EMBEDDED is not set
276 +CONFIG_HAVE_PERF_EVENTS=y
277 +CONFIG_PC104=y
280 +# Kernel Performance Events And Counters
282 +CONFIG_PERF_EVENTS=y
283 +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
284 +# end of Kernel Performance Events And Counters
286 +CONFIG_VM_EVENT_COUNTERS=y
287 +CONFIG_SLUB_DEBUG=y
288 +# CONFIG_COMPAT_BRK is not set
289 +# CONFIG_SLAB is not set
290 +CONFIG_SLUB=y
291 +# CONFIG_SLOB is not set
292 +CONFIG_SLAB_MERGE_DEFAULT=y
293 +CONFIG_SLAB_FREELIST_RANDOM=y
294 +CONFIG_SLAB_FREELIST_HARDENED=y
295 +CONFIG_SHUFFLE_PAGE_ALLOCATOR=y
296 +CONFIG_SLUB_CPU_PARTIAL=y
297 +CONFIG_SYSTEM_DATA_VERIFICATION=y
298 +CONFIG_PROFILING=y
299 +# end of General setup
301 +CONFIG_64BIT=y
302 +CONFIG_X86_64=y
303 +CONFIG_X86=y
304 +CONFIG_INSTRUCTION_DECODER=y
305 +CONFIG_OUTPUT_FORMAT="elf64-x86-64"
306 +CONFIG_LOCKDEP_SUPPORT=y
307 +CONFIG_STACKTRACE_SUPPORT=y
308 +CONFIG_MMU=y
309 +CONFIG_ARCH_MMAP_RND_BITS_MIN=28
310 +CONFIG_ARCH_MMAP_RND_BITS_MAX=32
311 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8
312 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16
313 +CONFIG_GENERIC_ISA_DMA=y
314 +CONFIG_GENERIC_BUG=y
315 +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
316 +CONFIG_ARCH_MAY_HAVE_PC_FDC=y
317 +CONFIG_GENERIC_CALIBRATE_DELAY=y
318 +CONFIG_ARCH_HAS_CPU_RELAX=y
319 +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
320 +CONFIG_ARCH_HAS_FILTER_PGPROT=y
321 +CONFIG_HAVE_SETUP_PER_CPU_AREA=y
322 +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
323 +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
324 +CONFIG_ARCH_HIBERNATION_POSSIBLE=y
325 +CONFIG_ARCH_SUSPEND_POSSIBLE=y
326 +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y
327 +CONFIG_ZONE_DMA32=y
328 +CONFIG_AUDIT_ARCH=y
329 +CONFIG_HAVE_INTEL_TXT=y
330 +CONFIG_X86_64_SMP=y
331 +CONFIG_ARCH_SUPPORTS_UPROBES=y
332 +CONFIG_FIX_EARLYCON_MEM=y
333 +CONFIG_DYNAMIC_PHYSICAL_MASK=y
334 +CONFIG_PGTABLE_LEVELS=5
335 +CONFIG_CC_HAS_SANE_STACKPROTECTOR=y
338 +# Processor type and features
340 +CONFIG_ZONE_DMA=y
341 +CONFIG_SMP=y
342 +CONFIG_X86_FEATURE_NAMES=y
343 +CONFIG_X86_X2APIC=y
344 +CONFIG_X86_MPPARSE=y
345 +# CONFIG_GOLDFISH is not set
346 +CONFIG_RETPOLINE=y
347 +CONFIG_X86_CPU_RESCTRL=y
348 +CONFIG_X86_EXTENDED_PLATFORM=y
349 +CONFIG_X86_NUMACHIP=y
350 +# CONFIG_X86_VSMP is not set
351 +CONFIG_X86_UV=y
352 +# CONFIG_X86_GOLDFISH is not set
353 +# CONFIG_X86_INTEL_MID is not set
354 +CONFIG_X86_INTEL_LPSS=y
355 +CONFIG_X86_AMD_PLATFORM_DEVICE=y
356 +CONFIG_IOSF_MBI=y
357 +CONFIG_IOSF_MBI_DEBUG=y
358 +CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y
359 +CONFIG_SCHED_OMIT_FRAME_POINTER=y
360 +CONFIG_HYPERVISOR_GUEST=y
361 +CONFIG_PARAVIRT=y
362 +CONFIG_PARAVIRT_XXL=y
363 +# CONFIG_PARAVIRT_DEBUG is not set
364 +CONFIG_PARAVIRT_SPINLOCKS=y
365 +CONFIG_X86_HV_CALLBACK_VECTOR=y
366 +CONFIG_XEN=y
367 +CONFIG_XEN_PV=y
368 +CONFIG_XEN_512GB=y
369 +CONFIG_XEN_PV_SMP=y
370 +CONFIG_XEN_DOM0=y
371 +CONFIG_XEN_PVHVM=y
372 +CONFIG_XEN_PVHVM_SMP=y
373 +CONFIG_XEN_PVHVM_GUEST=y
374 +CONFIG_XEN_SAVE_RESTORE=y
375 +# CONFIG_XEN_DEBUG_FS is not set
376 +CONFIG_XEN_PVH=y
377 +CONFIG_KVM_GUEST=y
378 +CONFIG_ARCH_CPUIDLE_HALTPOLL=y
379 +CONFIG_PVH=y
380 +# CONFIG_PARAVIRT_TIME_ACCOUNTING is not set
381 +CONFIG_PARAVIRT_CLOCK=y
382 +CONFIG_JAILHOUSE_GUEST=y
383 +CONFIG_ACRN_GUEST=y
384 +# CONFIG_MK8 is not set
385 +# CONFIG_MK8SSE3 is not set
386 +# CONFIG_MK10 is not set
387 +# CONFIG_MBARCELONA is not set
388 +# CONFIG_MBOBCAT is not set
389 +# CONFIG_MJAGUAR is not set
390 +# CONFIG_MBULLDOZER is not set
391 +# CONFIG_MPILEDRIVER is not set
392 +# CONFIG_MSTEAMROLLER is not set
393 +# CONFIG_MEXCAVATOR is not set
394 +# CONFIG_MZEN is not set
395 +# CONFIG_MZEN2 is not set
396 +# CONFIG_MZEN3 is not set
397 +# CONFIG_MPSC is not set
398 +# CONFIG_MCORE2 is not set
399 +# CONFIG_MATOM is not set
400 +# CONFIG_MNEHALEM is not set
401 +# CONFIG_MWESTMERE is not set
402 +# CONFIG_MSILVERMONT is not set
403 +# CONFIG_MGOLDMONT is not set
404 +# CONFIG_MGOLDMONTPLUS is not set
405 +# CONFIG_MSANDYBRIDGE is not set
406 +# CONFIG_MIVYBRIDGE is not set
407 +# CONFIG_MHASWELL is not set
408 +# CONFIG_MBROADWELL is not set
409 +# CONFIG_MSKYLAKE is not set
410 +# CONFIG_MSKYLAKEX is not set
411 +# CONFIG_MCANNONLAKE is not set
412 +# CONFIG_MICELAKE is not set
413 +# CONFIG_MCASCADELAKE is not set
414 +# CONFIG_MCOOPERLAKE is not set
415 +# CONFIG_MTIGERLAKE is not set
416 +# CONFIG_MSAPPHIRERAPIDS is not set
417 +# CONFIG_MROCKETLAKE is not set
418 +# CONFIG_MALDERLAKE is not set
419 +CONFIG_GENERIC_CPU=y
420 +# CONFIG_GENERIC_CPU2 is not set
421 +# CONFIG_GENERIC_CPU3 is not set
422 +# CONFIG_GENERIC_CPU4 is not set
423 +# CONFIG_MNATIVE_INTEL is not set
424 +# CONFIG_MNATIVE_AMD is not set
425 +CONFIG_X86_INTERNODE_CACHE_SHIFT=6
426 +CONFIG_X86_L1_CACHE_SHIFT=6
427 +CONFIG_X86_TSC=y
428 +CONFIG_X86_CMPXCHG64=y
429 +CONFIG_X86_CMOV=y
430 +CONFIG_X86_MINIMUM_CPU_FAMILY=64
431 +CONFIG_X86_DEBUGCTLMSR=y
432 +CONFIG_IA32_FEAT_CTL=y
433 +CONFIG_X86_VMX_FEATURE_NAMES=y
434 +CONFIG_PROCESSOR_SELECT=y
435 +CONFIG_CPU_SUP_INTEL=y
436 +CONFIG_CPU_SUP_AMD=y
437 +CONFIG_CPU_SUP_HYGON=y
438 +CONFIG_CPU_SUP_CENTAUR=y
439 +CONFIG_CPU_SUP_ZHAOXIN=y
440 +CONFIG_HPET_TIMER=y
441 +CONFIG_HPET_EMULATE_RTC=y
442 +CONFIG_DMI=y
443 +CONFIG_GART_IOMMU=y
444 +# CONFIG_MAXSMP is not set
445 +CONFIG_NR_CPUS_RANGE_BEGIN=2
446 +CONFIG_NR_CPUS_RANGE_END=512
447 +CONFIG_NR_CPUS_DEFAULT=64
448 +CONFIG_NR_CPUS=512
449 +CONFIG_SCHED_SMT=y
450 +CONFIG_SCHED_MC=y
451 +CONFIG_SCHED_MC_PRIO=y
452 +CONFIG_X86_LOCAL_APIC=y
453 +CONFIG_X86_IO_APIC=y
454 +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
455 +CONFIG_X86_MCE=y
456 +CONFIG_X86_MCELOG_LEGACY=y
457 +CONFIG_X86_MCE_INTEL=y
458 +CONFIG_X86_MCE_AMD=y
459 +CONFIG_X86_MCE_THRESHOLD=y
460 +CONFIG_X86_MCE_INJECT=m
463 +# Performance monitoring
465 +CONFIG_PERF_EVENTS_INTEL_UNCORE=y
466 +CONFIG_PERF_EVENTS_INTEL_RAPL=m
467 +CONFIG_PERF_EVENTS_INTEL_CSTATE=m
468 +# CONFIG_PERF_EVENTS_AMD_POWER is not set
469 +# end of Performance monitoring
471 +CONFIG_X86_16BIT=y
472 +CONFIG_X86_ESPFIX64=y
473 +CONFIG_X86_VSYSCALL_EMULATION=y
474 +CONFIG_X86_IOPL_IOPERM=y
475 +CONFIG_I8K=m
476 +CONFIG_MICROCODE=y
477 +CONFIG_MICROCODE_INTEL=y
478 +CONFIG_MICROCODE_AMD=y
479 +CONFIG_MICROCODE_OLD_INTERFACE=y
480 +CONFIG_X86_MSR=m
481 +CONFIG_X86_CPUID=m
482 +CONFIG_X86_5LEVEL=y
483 +CONFIG_X86_DIRECT_GBPAGES=y
484 +# CONFIG_X86_CPA_STATISTICS is not set
485 +CONFIG_AMD_MEM_ENCRYPT=y
486 +# CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set
487 +CONFIG_NUMA=y
488 +CONFIG_AMD_NUMA=y
489 +CONFIG_X86_64_ACPI_NUMA=y
490 +# CONFIG_NUMA_EMU is not set
491 +CONFIG_NODES_SHIFT=10
492 +CONFIG_ARCH_SPARSEMEM_ENABLE=y
493 +CONFIG_ARCH_SPARSEMEM_DEFAULT=y
494 +CONFIG_ARCH_SELECT_MEMORY_MODEL=y
495 +CONFIG_ARCH_MEMORY_PROBE=y
496 +CONFIG_ARCH_PROC_KCORE_TEXT=y
497 +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000
498 +CONFIG_X86_PMEM_LEGACY_DEVICE=y
499 +CONFIG_X86_PMEM_LEGACY=y
500 +CONFIG_X86_CHECK_BIOS_CORRUPTION=y
501 +CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y
502 +CONFIG_X86_RESERVE_LOW=64
503 +CONFIG_MTRR=y
504 +CONFIG_MTRR_SANITIZER=y
505 +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1
506 +CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1
507 +CONFIG_X86_PAT=y
508 +CONFIG_ARCH_USES_PG_UNCACHED=y
509 +CONFIG_ARCH_RANDOM=y
510 +CONFIG_X86_SMAP=y
511 +CONFIG_X86_UMIP=y
512 +CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y
513 +# CONFIG_X86_INTEL_TSX_MODE_OFF is not set
514 +# CONFIG_X86_INTEL_TSX_MODE_ON is not set
515 +CONFIG_X86_INTEL_TSX_MODE_AUTO=y
516 +CONFIG_X86_SGX=y
517 +CONFIG_EFI=y
518 +CONFIG_EFI_STUB=y
519 +CONFIG_EFI_MIXED=y
520 +# CONFIG_HZ_100 is not set
521 +# CONFIG_HZ_250 is not set
522 +# CONFIG_HZ_300 is not set
523 +CONFIG_HZ_500=y
524 +# CONFIG_HZ_1000 is not set
525 +CONFIG_HZ=500
526 +CONFIG_SCHED_HRTICK=y
527 +CONFIG_KEXEC=y
528 +CONFIG_KEXEC_FILE=y
529 +CONFIG_ARCH_HAS_KEXEC_PURGATORY=y
530 +CONFIG_KEXEC_SIG=y
531 +# CONFIG_KEXEC_SIG_FORCE is not set
532 +CONFIG_KEXEC_BZIMAGE_VERIFY_SIG=y
533 +CONFIG_CRASH_DUMP=y
534 +CONFIG_KEXEC_JUMP=y
535 +CONFIG_PHYSICAL_START=0x1000000
536 +CONFIG_RELOCATABLE=y
537 +CONFIG_RANDOMIZE_BASE=y
538 +CONFIG_X86_NEED_RELOCS=y
539 +CONFIG_PHYSICAL_ALIGN=0x200000
540 +CONFIG_DYNAMIC_MEMORY_LAYOUT=y
541 +CONFIG_RANDOMIZE_MEMORY=y
542 +CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0xa
543 +CONFIG_HOTPLUG_CPU=y
544 +# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set
545 +# CONFIG_DEBUG_HOTPLUG_CPU0 is not set
546 +# CONFIG_COMPAT_VDSO is not set
547 +# CONFIG_LEGACY_VSYSCALL_EMULATE is not set
548 +CONFIG_LEGACY_VSYSCALL_XONLY=y
549 +# CONFIG_LEGACY_VSYSCALL_NONE is not set
550 +# CONFIG_CMDLINE_BOOL is not set
551 +CONFIG_MODIFY_LDT_SYSCALL=y
552 +CONFIG_HAVE_LIVEPATCH=y
553 +# end of Processor type and features
555 +CONFIG_ARCH_HAS_ADD_PAGES=y
556 +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
557 +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
558 +CONFIG_USE_PERCPU_NUMA_NODE_ID=y
559 +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y
560 +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y
561 +CONFIG_ARCH_ENABLE_THP_MIGRATION=y
564 +# Power management and ACPI options
566 +CONFIG_ARCH_HIBERNATION_HEADER=y
567 +CONFIG_SUSPEND=y
568 +CONFIG_SUSPEND_FREEZER=y
569 +# CONFIG_SUSPEND_SKIP_SYNC is not set
570 +CONFIG_HIBERNATE_CALLBACKS=y
571 +CONFIG_HIBERNATION=y
572 +CONFIG_HIBERNATION_SNAPSHOT_DEV=y
573 +CONFIG_PM_STD_PARTITION=""
574 +CONFIG_PM_SLEEP=y
575 +CONFIG_PM_SLEEP_SMP=y
576 +# CONFIG_PM_AUTOSLEEP is not set
577 +CONFIG_PM_WAKELOCKS=y
578 +CONFIG_PM_WAKELOCKS_LIMIT=100
579 +CONFIG_PM_WAKELOCKS_GC=y
580 +CONFIG_PM=y
581 +CONFIG_PM_DEBUG=y
582 +CONFIG_PM_ADVANCED_DEBUG=y
583 +# CONFIG_PM_TEST_SUSPEND is not set
584 +CONFIG_PM_SLEEP_DEBUG=y
585 +# CONFIG_DPM_WATCHDOG is not set
586 +CONFIG_PM_TRACE=y
587 +CONFIG_PM_TRACE_RTC=y
588 +CONFIG_PM_CLK=y
589 +CONFIG_PM_GENERIC_DOMAINS=y
590 +CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
591 +CONFIG_PM_GENERIC_DOMAINS_SLEEP=y
592 +CONFIG_ENERGY_MODEL=y
593 +CONFIG_ARCH_SUPPORTS_ACPI=y
594 +CONFIG_ACPI=y
595 +CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y
596 +CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y
597 +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y
598 +CONFIG_ACPI_DEBUGGER=y
599 +CONFIG_ACPI_DEBUGGER_USER=y
600 +CONFIG_ACPI_SPCR_TABLE=y
601 +CONFIG_ACPI_FPDT=y
602 +CONFIG_ACPI_LPIT=y
603 +CONFIG_ACPI_SLEEP=y
604 +CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y
605 +CONFIG_ACPI_EC_DEBUGFS=m
606 +CONFIG_ACPI_AC=y
607 +CONFIG_ACPI_BATTERY=y
608 +CONFIG_ACPI_BUTTON=y
609 +CONFIG_ACPI_VIDEO=m
610 +CONFIG_ACPI_FAN=y
611 +CONFIG_ACPI_TAD=m
612 +CONFIG_ACPI_DOCK=y
613 +CONFIG_ACPI_CPU_FREQ_PSS=y
614 +CONFIG_ACPI_PROCESSOR_CSTATE=y
615 +CONFIG_ACPI_PROCESSOR_IDLE=y
616 +CONFIG_ACPI_CPPC_LIB=y
617 +CONFIG_ACPI_PROCESSOR=y
618 +CONFIG_ACPI_IPMI=m
619 +CONFIG_ACPI_HOTPLUG_CPU=y
620 +CONFIG_ACPI_PROCESSOR_AGGREGATOR=m
621 +CONFIG_ACPI_THERMAL=y
622 +CONFIG_ACPI_PLATFORM_PROFILE=m
623 +CONFIG_ACPI_CUSTOM_DSDT_FILE=""
624 +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y
625 +CONFIG_ACPI_TABLE_UPGRADE=y
626 +CONFIG_ACPI_DEBUG=y
627 +CONFIG_ACPI_PCI_SLOT=y
628 +CONFIG_ACPI_CONTAINER=y
629 +CONFIG_ACPI_HOTPLUG_MEMORY=y
630 +CONFIG_ACPI_HOTPLUG_IOAPIC=y
631 +CONFIG_ACPI_SBS=m
632 +CONFIG_ACPI_HED=y
633 +# CONFIG_ACPI_CUSTOM_METHOD is not set
634 +CONFIG_ACPI_BGRT=y
635 +# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set
636 +CONFIG_ACPI_NFIT=m
637 +# CONFIG_NFIT_SECURITY_DEBUG is not set
638 +CONFIG_ACPI_NUMA=y
639 +CONFIG_ACPI_HMAT=y
640 +CONFIG_HAVE_ACPI_APEI=y
641 +CONFIG_HAVE_ACPI_APEI_NMI=y
642 +CONFIG_ACPI_APEI=y
643 +CONFIG_ACPI_APEI_GHES=y
644 +CONFIG_ACPI_APEI_PCIEAER=y
645 +CONFIG_ACPI_APEI_MEMORY_FAILURE=y
646 +CONFIG_ACPI_APEI_EINJ=m
647 +# CONFIG_ACPI_APEI_ERST_DEBUG is not set
648 +CONFIG_ACPI_DPTF=y
649 +CONFIG_DPTF_POWER=m
650 +CONFIG_DPTF_PCH_FIVR=m
651 +CONFIG_ACPI_WATCHDOG=y
652 +CONFIG_ACPI_EXTLOG=m
653 +CONFIG_ACPI_ADXL=y
654 +CONFIG_ACPI_CONFIGFS=m
655 +CONFIG_PMIC_OPREGION=y
656 +CONFIG_BYTCRC_PMIC_OPREGION=y
657 +CONFIG_CHTCRC_PMIC_OPREGION=y
658 +CONFIG_XPOWER_PMIC_OPREGION=y
659 +CONFIG_BXT_WC_PMIC_OPREGION=y
660 +CONFIG_CHT_WC_PMIC_OPREGION=y
661 +CONFIG_CHT_DC_TI_PMIC_OPREGION=y
662 +CONFIG_TPS68470_PMIC_OPREGION=y
663 +CONFIG_X86_PM_TIMER=y
666 +# CPU Frequency scaling
668 +CONFIG_CPU_FREQ=y
669 +CONFIG_CPU_FREQ_GOV_ATTR_SET=y
670 +CONFIG_CPU_FREQ_GOV_COMMON=y
671 +CONFIG_CPU_FREQ_STAT=y
672 +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
673 +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
674 +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
675 +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set
676 +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
677 +CONFIG_CPU_FREQ_GOV_POWERSAVE=y
678 +CONFIG_CPU_FREQ_GOV_USERSPACE=y
679 +CONFIG_CPU_FREQ_GOV_ONDEMAND=y
680 +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
681 +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
684 +# CPU frequency scaling drivers
686 +CONFIG_X86_INTEL_PSTATE=y
687 +CONFIG_X86_PCC_CPUFREQ=y
688 +CONFIG_X86_ACPI_CPUFREQ=y
689 +CONFIG_X86_ACPI_CPUFREQ_CPB=y
690 +CONFIG_X86_POWERNOW_K8=y
691 +CONFIG_X86_AMD_FREQ_SENSITIVITY=m
692 +CONFIG_X86_SPEEDSTEP_CENTRINO=y
693 +CONFIG_X86_P4_CLOCKMOD=m
696 +# shared options
698 +CONFIG_X86_SPEEDSTEP_LIB=m
699 +# end of CPU Frequency scaling
702 +# CPU Idle
704 +CONFIG_CPU_IDLE=y
705 +CONFIG_CPU_IDLE_GOV_LADDER=y
706 +CONFIG_CPU_IDLE_GOV_MENU=y
707 +CONFIG_CPU_IDLE_GOV_TEO=y
708 +CONFIG_CPU_IDLE_GOV_HALTPOLL=y
709 +CONFIG_HALTPOLL_CPUIDLE=m
710 +# end of CPU Idle
712 +CONFIG_INTEL_IDLE=y
713 +# end of Power management and ACPI options
716 +# Bus options (PCI etc.)
718 +CONFIG_PCI_DIRECT=y
719 +CONFIG_PCI_MMCONFIG=y
720 +CONFIG_PCI_XEN=y
721 +CONFIG_MMCONF_FAM10H=y
722 +# CONFIG_PCI_CNB20LE_QUIRK is not set
723 +CONFIG_ISA_BUS=y
724 +CONFIG_ISA_DMA_API=y
725 +CONFIG_AMD_NB=y
726 +# CONFIG_X86_SYSFB is not set
727 +# end of Bus options (PCI etc.)
730 +# Binary Emulations
732 +CONFIG_IA32_EMULATION=y
733 +CONFIG_X86_X32=y
734 +CONFIG_COMPAT_32=y
735 +CONFIG_COMPAT=y
736 +CONFIG_COMPAT_FOR_U64_ALIGNMENT=y
737 +CONFIG_SYSVIPC_COMPAT=y
738 +# end of Binary Emulations
741 +# Firmware Drivers
743 +CONFIG_EDD=y
744 +CONFIG_EDD_OFF=y
745 +CONFIG_FIRMWARE_MEMMAP=y
746 +CONFIG_DMIID=y
747 +CONFIG_DMI_SYSFS=m
748 +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y
749 +CONFIG_ISCSI_IBFT_FIND=y
750 +CONFIG_ISCSI_IBFT=m
751 +CONFIG_FW_CFG_SYSFS=m
752 +# CONFIG_FW_CFG_SYSFS_CMDLINE is not set
753 +# CONFIG_GOOGLE_FIRMWARE is not set
756 +# EFI (Extensible Firmware Interface) Support
758 +CONFIG_EFI_VARS=y
759 +CONFIG_EFI_ESRT=y
760 +CONFIG_EFI_VARS_PSTORE=m
761 +# CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE is not set
762 +CONFIG_EFI_RUNTIME_MAP=y
763 +# CONFIG_EFI_FAKE_MEMMAP is not set
764 +CONFIG_EFI_SOFT_RESERVE=y
765 +CONFIG_EFI_RUNTIME_WRAPPERS=y
766 +CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER=y
767 +CONFIG_EFI_BOOTLOADER_CONTROL=m
768 +CONFIG_EFI_CAPSULE_LOADER=m
769 +CONFIG_EFI_TEST=m
770 +CONFIG_APPLE_PROPERTIES=y
771 +CONFIG_RESET_ATTACK_MITIGATION=y
772 +CONFIG_EFI_RCI2_TABLE=y
773 +# CONFIG_EFI_DISABLE_PCI_DMA is not set
774 +# end of EFI (Extensible Firmware Interface) Support
776 +CONFIG_EFI_EMBEDDED_FIRMWARE=y
777 +CONFIG_UEFI_CPER=y
778 +CONFIG_UEFI_CPER_X86=y
779 +CONFIG_EFI_DEV_PATH_PARSER=y
780 +CONFIG_EFI_EARLYCON=y
781 +CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y
784 +# Tegra firmware driver
786 +# end of Tegra firmware driver
787 +# end of Firmware Drivers
789 +CONFIG_HAVE_KVM=y
790 +CONFIG_HAVE_KVM_IRQCHIP=y
791 +CONFIG_HAVE_KVM_IRQFD=y
792 +CONFIG_HAVE_KVM_IRQ_ROUTING=y
793 +CONFIG_HAVE_KVM_EVENTFD=y
794 +CONFIG_KVM_MMIO=y
795 +CONFIG_KVM_ASYNC_PF=y
796 +CONFIG_HAVE_KVM_MSI=y
797 +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y
798 +CONFIG_KVM_VFIO=y
799 +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y
800 +CONFIG_KVM_COMPAT=y
801 +CONFIG_HAVE_KVM_IRQ_BYPASS=y
802 +CONFIG_HAVE_KVM_NO_POLL=y
803 +CONFIG_KVM_XFER_TO_GUEST_WORK=y
804 +CONFIG_VIRTUALIZATION=y
805 +CONFIG_KVM=m
806 +CONFIG_KVM_WERROR=y
807 +CONFIG_KVM_INTEL=m
808 +CONFIG_KVM_AMD=m
809 +CONFIG_KVM_AMD_SEV=y
810 +CONFIG_KVM_XEN=y
811 +CONFIG_AS_AVX512=y
812 +CONFIG_AS_SHA1_NI=y
813 +CONFIG_AS_SHA256_NI=y
814 +CONFIG_AS_TPAUSE=y
817 +# General architecture-dependent options
819 +CONFIG_CRASH_CORE=y
820 +CONFIG_KEXEC_CORE=y
821 +CONFIG_HOTPLUG_SMT=y
822 +CONFIG_GENERIC_ENTRY=y
823 +CONFIG_KPROBES=y
824 +CONFIG_JUMP_LABEL=y
825 +# CONFIG_STATIC_KEYS_SELFTEST is not set
826 +# CONFIG_STATIC_CALL_SELFTEST is not set
827 +CONFIG_OPTPROBES=y
828 +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
829 +CONFIG_ARCH_USE_BUILTIN_BSWAP=y
830 +CONFIG_KRETPROBES=y
831 +CONFIG_USER_RETURN_NOTIFIER=y
832 +CONFIG_HAVE_IOREMAP_PROT=y
833 +CONFIG_HAVE_KPROBES=y
834 +CONFIG_HAVE_KRETPROBES=y
835 +CONFIG_HAVE_OPTPROBES=y
836 +CONFIG_HAVE_KPROBES_ON_FTRACE=y
837 +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y
838 +CONFIG_HAVE_NMI=y
839 +CONFIG_HAVE_ARCH_TRACEHOOK=y
840 +CONFIG_HAVE_DMA_CONTIGUOUS=y
841 +CONFIG_GENERIC_SMP_IDLE_THREAD=y
842 +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y
843 +CONFIG_ARCH_HAS_SET_MEMORY=y
844 +CONFIG_ARCH_HAS_SET_DIRECT_MAP=y
845 +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y
846 +CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y
847 +CONFIG_HAVE_ASM_MODVERSIONS=y
848 +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
849 +CONFIG_HAVE_RSEQ=y
850 +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y
851 +CONFIG_HAVE_HW_BREAKPOINT=y
852 +CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y
853 +CONFIG_HAVE_USER_RETURN_NOTIFIER=y
854 +CONFIG_HAVE_PERF_EVENTS_NMI=y
855 +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y
856 +CONFIG_HAVE_PERF_REGS=y
857 +CONFIG_HAVE_PERF_USER_STACK_DUMP=y
858 +CONFIG_HAVE_ARCH_JUMP_LABEL=y
859 +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y
860 +CONFIG_MMU_GATHER_TABLE_FREE=y
861 +CONFIG_MMU_GATHER_RCU_TABLE_FREE=y
862 +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y
863 +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y
864 +CONFIG_HAVE_CMPXCHG_LOCAL=y
865 +CONFIG_HAVE_CMPXCHG_DOUBLE=y
866 +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y
867 +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y
868 +CONFIG_HAVE_ARCH_SECCOMP=y
869 +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
870 +CONFIG_SECCOMP=y
871 +CONFIG_SECCOMP_FILTER=y
872 +# CONFIG_SECCOMP_CACHE_DEBUG is not set
873 +CONFIG_HAVE_ARCH_STACKLEAK=y
874 +CONFIG_HAVE_STACKPROTECTOR=y
875 +CONFIG_STACKPROTECTOR=y
876 +CONFIG_STACKPROTECTOR_STRONG=y
877 +CONFIG_ARCH_SUPPORTS_LTO_CLANG=y
878 +CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y
879 +CONFIG_LTO_NONE=y
880 +CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y
881 +CONFIG_HAVE_CONTEXT_TRACKING=y
882 +CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK=y
883 +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y
884 +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
885 +CONFIG_HAVE_MOVE_PUD=y
886 +CONFIG_HAVE_MOVE_PMD=y
887 +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
888 +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y
889 +CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG=y
890 +CONFIG_HAVE_ARCH_HUGE_VMAP=y
891 +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y
892 +CONFIG_HAVE_ARCH_SOFT_DIRTY=y
893 +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y
894 +CONFIG_MODULES_USE_ELF_RELA=y
895 +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y
896 +CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y
897 +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y
898 +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y
899 +CONFIG_HAVE_EXIT_THREAD=y
900 +CONFIG_ARCH_MMAP_RND_BITS=28
901 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y
902 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8
903 +CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y
904 +CONFIG_HAVE_STACK_VALIDATION=y
905 +CONFIG_HAVE_RELIABLE_STACKTRACE=y
906 +CONFIG_ISA_BUS_API=y
907 +CONFIG_OLD_SIGSUSPEND3=y
908 +CONFIG_COMPAT_OLD_SIGACTION=y
909 +CONFIG_COMPAT_32BIT_TIME=y
910 +CONFIG_HAVE_ARCH_VMAP_STACK=y
911 +CONFIG_VMAP_STACK=y
912 +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y
913 +CONFIG_STRICT_KERNEL_RWX=y
914 +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
915 +CONFIG_STRICT_MODULE_RWX=y
916 +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y
917 +CONFIG_ARCH_USE_MEMREMAP_PROT=y
918 +# CONFIG_LOCK_EVENT_COUNTS is not set
919 +CONFIG_ARCH_HAS_MEM_ENCRYPT=y
920 +CONFIG_HAVE_STATIC_CALL=y
921 +CONFIG_HAVE_STATIC_CALL_INLINE=y
922 +CONFIG_HAVE_PREEMPT_DYNAMIC=y
923 +CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y
924 +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
925 +CONFIG_ARCH_HAS_ELFCORE_COMPAT=y
928 +# GCOV-based kernel profiling
930 +# CONFIG_GCOV_KERNEL is not set
931 +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y
932 +# end of GCOV-based kernel profiling
934 +CONFIG_HAVE_GCC_PLUGINS=y
935 +# end of General architecture-dependent options
937 +CONFIG_RT_MUTEXES=y
938 +CONFIG_BASE_SMALL=0
939 +CONFIG_MODULE_SIG_FORMAT=y
940 +CONFIG_MODULES=y
941 +# CONFIG_MODULE_FORCE_LOAD is not set
942 +CONFIG_MODULE_UNLOAD=y
943 +# CONFIG_MODULE_FORCE_UNLOAD is not set
944 +CONFIG_MODVERSIONS=y
945 +CONFIG_ASM_MODVERSIONS=y
946 +CONFIG_MODULE_SRCVERSION_ALL=y
947 +CONFIG_MODULE_SIG=y
948 +# CONFIG_MODULE_SIG_FORCE is not set
949 +CONFIG_MODULE_SIG_ALL=y
950 +# CONFIG_MODULE_SIG_SHA1 is not set
951 +# CONFIG_MODULE_SIG_SHA224 is not set
952 +# CONFIG_MODULE_SIG_SHA256 is not set
953 +# CONFIG_MODULE_SIG_SHA384 is not set
954 +CONFIG_MODULE_SIG_SHA512=y
955 +CONFIG_MODULE_SIG_HASH="sha512"
956 +# CONFIG_MODULE_COMPRESS is not set
957 +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set
958 +# CONFIG_TRIM_UNUSED_KSYMS is not set
959 +CONFIG_MODULES_TREE_LOOKUP=y
960 +CONFIG_BLOCK=y
961 +CONFIG_BLK_SCSI_REQUEST=y
962 +CONFIG_BLK_CGROUP_RWSTAT=y
963 +CONFIG_BLK_DEV_BSG=y
964 +CONFIG_BLK_DEV_BSGLIB=y
965 +CONFIG_BLK_DEV_INTEGRITY=y
966 +CONFIG_BLK_DEV_INTEGRITY_T10=y
967 +CONFIG_BLK_DEV_ZONED=y
968 +CONFIG_BLK_DEV_THROTTLING=y
969 +# CONFIG_BLK_DEV_THROTTLING_LOW is not set
970 +CONFIG_BLK_CMDLINE_PARSER=y
971 +CONFIG_BLK_WBT=y
972 +CONFIG_BLK_CGROUP_IOLATENCY=y
973 +# CONFIG_BLK_CGROUP_IOCOST is not set
974 +CONFIG_BLK_WBT_MQ=y
975 +CONFIG_BLK_DEBUG_FS=y
976 +CONFIG_BLK_DEBUG_FS_ZONED=y
977 +CONFIG_BLK_SED_OPAL=y
978 +CONFIG_BLK_INLINE_ENCRYPTION=y
979 +CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
982 +# Partition Types
984 +CONFIG_PARTITION_ADVANCED=y
985 +# CONFIG_ACORN_PARTITION is not set
986 +CONFIG_AIX_PARTITION=y
987 +CONFIG_OSF_PARTITION=y
988 +CONFIG_AMIGA_PARTITION=y
989 +CONFIG_ATARI_PARTITION=y
990 +CONFIG_MAC_PARTITION=y
991 +CONFIG_MSDOS_PARTITION=y
992 +CONFIG_BSD_DISKLABEL=y
993 +CONFIG_MINIX_SUBPARTITION=y
994 +CONFIG_SOLARIS_X86_PARTITION=y
995 +CONFIG_UNIXWARE_DISKLABEL=y
996 +CONFIG_LDM_PARTITION=y
997 +# CONFIG_LDM_DEBUG is not set
998 +CONFIG_SGI_PARTITION=y
999 +CONFIG_ULTRIX_PARTITION=y
1000 +CONFIG_SUN_PARTITION=y
1001 +CONFIG_KARMA_PARTITION=y
1002 +CONFIG_EFI_PARTITION=y
1003 +CONFIG_SYSV68_PARTITION=y
1004 +CONFIG_CMDLINE_PARTITION=y
1005 +# end of Partition Types
1007 +CONFIG_BLOCK_COMPAT=y
1008 +CONFIG_BLK_MQ_PCI=y
1009 +CONFIG_BLK_MQ_VIRTIO=y
1010 +CONFIG_BLK_MQ_RDMA=y
1011 +CONFIG_BLK_PM=y
1014 +# IO Schedulers
1016 +CONFIG_MQ_IOSCHED_DEADLINE=m
1017 +CONFIG_MQ_IOSCHED_KYBER=m
1018 +CONFIG_IOSCHED_BFQ=y
1019 +CONFIG_BFQ_GROUP_IOSCHED=y
1020 +# CONFIG_BFQ_CGROUP_DEBUG is not set
1021 +# end of IO Schedulers
1023 +CONFIG_PREEMPT_NOTIFIERS=y
1024 +CONFIG_PADATA=y
1025 +CONFIG_ASN1=y
1026 +CONFIG_UNINLINE_SPIN_UNLOCK=y
1027 +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y
1028 +CONFIG_MUTEX_SPIN_ON_OWNER=y
1029 +CONFIG_RWSEM_SPIN_ON_OWNER=y
1030 +CONFIG_LOCK_SPIN_ON_OWNER=y
1031 +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y
1032 +CONFIG_QUEUED_SPINLOCKS=y
1033 +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y
1034 +CONFIG_QUEUED_RWLOCKS=y
1035 +CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y
1036 +CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y
1037 +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y
1038 +CONFIG_FREEZER=y
1041 +# Executable file formats
1043 +CONFIG_BINFMT_ELF=y
1044 +CONFIG_COMPAT_BINFMT_ELF=y
1045 +CONFIG_ELFCORE=y
1046 +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
1047 +CONFIG_BINFMT_SCRIPT=y
1048 +CONFIG_BINFMT_MISC=m
1049 +CONFIG_COREDUMP=y
1050 +# end of Executable file formats
1053 +# Memory Management options
1055 +CONFIG_SELECT_MEMORY_MODEL=y
1056 +CONFIG_SPARSEMEM_MANUAL=y
1057 +CONFIG_SPARSEMEM=y
1058 +CONFIG_NEED_MULTIPLE_NODES=y
1059 +CONFIG_SPARSEMEM_EXTREME=y
1060 +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
1061 +CONFIG_SPARSEMEM_VMEMMAP=y
1062 +CONFIG_CLEAN_LOW_KBYTES=524288
1063 +CONFIG_CLEAN_MIN_KBYTES=0
1064 +CONFIG_HAVE_FAST_GUP=y
1065 +CONFIG_NUMA_KEEP_MEMINFO=y
1066 +CONFIG_MEMORY_ISOLATION=y
1067 +CONFIG_HAVE_BOOTMEM_INFO_NODE=y
1068 +CONFIG_MEMORY_HOTPLUG=y
1069 +CONFIG_MEMORY_HOTPLUG_SPARSE=y
1070 +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
1071 +CONFIG_MEMORY_HOTREMOVE=y
1072 +CONFIG_SPLIT_PTLOCK_CPUS=4
1073 +CONFIG_MEMORY_BALLOON=y
1074 +CONFIG_BALLOON_COMPACTION=y
1075 +CONFIG_COMPACTION=y
1076 +CONFIG_PAGE_REPORTING=y
1077 +CONFIG_MIGRATION=y
1078 +CONFIG_CONTIG_ALLOC=y
1079 +CONFIG_PHYS_ADDR_T_64BIT=y
1080 +CONFIG_BOUNCE=y
1081 +CONFIG_VIRT_TO_BUS=y
1082 +CONFIG_MMU_NOTIFIER=y
1083 +CONFIG_KSM=y
1084 +CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
1085 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y
1086 +CONFIG_MEMORY_FAILURE=y
1087 +CONFIG_HWPOISON_INJECT=m
1088 +CONFIG_TRANSPARENT_HUGEPAGE=y
1089 +# CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS is not set
1090 +CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
1091 +CONFIG_ARCH_WANTS_THP_SWAP=y
1092 +CONFIG_THP_SWAP=y
1093 +CONFIG_CLEANCACHE=y
1094 +CONFIG_FRONTSWAP=y
1095 +# CONFIG_CMA is not set
1096 +CONFIG_MEM_SOFT_DIRTY=y
1097 +CONFIG_ZSWAP=y
1098 +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set
1099 +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO is not set
1100 +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set
1101 +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4=y
1102 +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set
1103 +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set
1104 +CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lz4"
1105 +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD is not set
1106 +CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD=y
1107 +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set
1108 +CONFIG_ZSWAP_ZPOOL_DEFAULT="z3fold"
1109 +# CONFIG_ZSWAP_DEFAULT_ON is not set
1110 +CONFIG_ZPOOL=y
1111 +CONFIG_ZBUD=m
1112 +CONFIG_Z3FOLD=y
1113 +CONFIG_ZSMALLOC=m
1114 +# CONFIG_ZSMALLOC_STAT is not set
1115 +CONFIG_GENERIC_EARLY_IOREMAP=y
1116 +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set
1117 +CONFIG_IDLE_PAGE_TRACKING=y
1118 +CONFIG_ARCH_HAS_PTE_DEVMAP=y
1119 +CONFIG_ZONE_DEVICE=y
1120 +CONFIG_DEV_PAGEMAP_OPS=y
1121 +CONFIG_HMM_MIRROR=y
1122 +CONFIG_DEVICE_PRIVATE=y
1123 +CONFIG_VMAP_PFN=y
1124 +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y
1125 +CONFIG_ARCH_HAS_PKEYS=y
1126 +# CONFIG_PERCPU_STATS is not set
1127 +# CONFIG_GUP_TEST is not set
1128 +# CONFIG_READ_ONLY_THP_FOR_FS is not set
1129 +CONFIG_ARCH_HAS_PTE_SPECIAL=y
1130 +CONFIG_MAPPING_DIRTY_HELPERS=y
1131 +CONFIG_LRU_GEN=y
1132 +CONFIG_NR_LRU_GENS=4
1133 +CONFIG_TIERS_PER_GEN=2
1134 +# CONFIG_LRU_GEN_ENABLED is not set
1135 +# CONFIG_LRU_GEN_STATS is not set
1136 +# end of Memory Management options
1138 +CONFIG_NET=y
1139 +CONFIG_WANT_COMPAT_NETLINK_MESSAGES=y
1140 +CONFIG_COMPAT_NETLINK_MESSAGES=y
1141 +CONFIG_NET_INGRESS=y
1142 +CONFIG_NET_EGRESS=y
1143 +CONFIG_NET_REDIRECT=y
1144 +CONFIG_SKB_EXTENSIONS=y
1147 +# Networking options
1149 +CONFIG_PACKET=y
1150 +CONFIG_PACKET_DIAG=m
1151 +CONFIG_UNIX=y
1152 +CONFIG_UNIX_SCM=y
1153 +CONFIG_UNIX_DIAG=m
1154 +CONFIG_TLS=m
1155 +CONFIG_TLS_DEVICE=y
1156 +# CONFIG_TLS_TOE is not set
1157 +CONFIG_XFRM=y
1158 +CONFIG_XFRM_OFFLOAD=y
1159 +CONFIG_XFRM_ALGO=m
1160 +CONFIG_XFRM_USER=m
1161 +CONFIG_XFRM_USER_COMPAT=m
1162 +CONFIG_XFRM_INTERFACE=m
1163 +# CONFIG_XFRM_SUB_POLICY is not set
1164 +# CONFIG_XFRM_MIGRATE is not set
1165 +CONFIG_XFRM_STATISTICS=y
1166 +CONFIG_XFRM_AH=m
1167 +CONFIG_XFRM_ESP=m
1168 +CONFIG_XFRM_IPCOMP=m
1169 +CONFIG_NET_KEY=m
1170 +# CONFIG_NET_KEY_MIGRATE is not set
1171 +CONFIG_XFRM_ESPINTCP=y
1172 +CONFIG_SMC=m
1173 +CONFIG_SMC_DIAG=m
1174 +CONFIG_XDP_SOCKETS=y
1175 +CONFIG_XDP_SOCKETS_DIAG=m
1176 +CONFIG_INET=y
1177 +CONFIG_IP_MULTICAST=y
1178 +CONFIG_IP_ADVANCED_ROUTER=y
1179 +CONFIG_IP_FIB_TRIE_STATS=y
1180 +CONFIG_IP_MULTIPLE_TABLES=y
1181 +CONFIG_IP_ROUTE_MULTIPATH=y
1182 +CONFIG_IP_ROUTE_VERBOSE=y
1183 +CONFIG_IP_ROUTE_CLASSID=y
1184 +# CONFIG_IP_PNP is not set
1185 +CONFIG_NET_IPIP=m
1186 +CONFIG_NET_IPGRE_DEMUX=m
1187 +CONFIG_NET_IP_TUNNEL=m
1188 +CONFIG_NET_IPGRE=m
1189 +CONFIG_NET_IPGRE_BROADCAST=y
1190 +CONFIG_IP_MROUTE_COMMON=y
1191 +CONFIG_IP_MROUTE=y
1192 +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
1193 +CONFIG_IP_PIMSM_V1=y
1194 +CONFIG_IP_PIMSM_V2=y
1195 +CONFIG_SYN_COOKIES=y
1196 +CONFIG_NET_IPVTI=m
1197 +CONFIG_NET_UDP_TUNNEL=m
1198 +CONFIG_NET_FOU=m
1199 +CONFIG_NET_FOU_IP_TUNNELS=y
1200 +CONFIG_INET_AH=m
1201 +CONFIG_INET_ESP=m
1202 +CONFIG_INET_ESP_OFFLOAD=m
1203 +CONFIG_INET_ESPINTCP=y
1204 +CONFIG_INET_IPCOMP=m
1205 +CONFIG_INET_XFRM_TUNNEL=m
1206 +CONFIG_INET_TUNNEL=m
1207 +CONFIG_INET_DIAG=m
1208 +CONFIG_INET_TCP_DIAG=m
1209 +CONFIG_INET_UDP_DIAG=m
1210 +CONFIG_INET_RAW_DIAG=m
1211 +CONFIG_INET_DIAG_DESTROY=y
1212 +CONFIG_TCP_CONG_ADVANCED=y
1213 +CONFIG_TCP_CONG_BIC=m
1214 +CONFIG_TCP_CONG_CUBIC=m
1215 +CONFIG_TCP_CONG_WESTWOOD=m
1216 +CONFIG_TCP_CONG_HTCP=m
1217 +CONFIG_TCP_CONG_HSTCP=m
1218 +CONFIG_TCP_CONG_HYBLA=m
1219 +CONFIG_TCP_CONG_VEGAS=m
1220 +CONFIG_TCP_CONG_NV=m
1221 +CONFIG_TCP_CONG_SCALABLE=m
1222 +CONFIG_TCP_CONG_LP=m
1223 +CONFIG_TCP_CONG_VENO=m
1224 +CONFIG_TCP_CONG_YEAH=m
1225 +CONFIG_TCP_CONG_ILLINOIS=m
1226 +CONFIG_TCP_CONG_DCTCP=m
1227 +CONFIG_TCP_CONG_CDG=m
1228 +CONFIG_TCP_CONG_BBR=m
1229 +CONFIG_TCP_CONG_BBR2=y
1230 +CONFIG_DEFAULT_BBR2=y
1231 +# CONFIG_DEFAULT_RENO is not set
1232 +CONFIG_DEFAULT_TCP_CONG="bbr2"
1233 +CONFIG_TCP_MD5SIG=y
1234 +CONFIG_IPV6=y
1235 +CONFIG_IPV6_ROUTER_PREF=y
1236 +CONFIG_IPV6_ROUTE_INFO=y
1237 +# CONFIG_IPV6_OPTIMISTIC_DAD is not set
1238 +CONFIG_INET6_AH=m
1239 +CONFIG_INET6_ESP=m
1240 +CONFIG_INET6_ESP_OFFLOAD=m
1241 +CONFIG_INET6_ESPINTCP=y
1242 +CONFIG_INET6_IPCOMP=m
1243 +CONFIG_IPV6_MIP6=m
1244 +CONFIG_IPV6_ILA=m
1245 +CONFIG_INET6_XFRM_TUNNEL=m
1246 +CONFIG_INET6_TUNNEL=m
1247 +CONFIG_IPV6_VTI=m
1248 +CONFIG_IPV6_SIT=m
1249 +CONFIG_IPV6_SIT_6RD=y
1250 +CONFIG_IPV6_NDISC_NODETYPE=y
1251 +CONFIG_IPV6_TUNNEL=m
1252 +CONFIG_IPV6_GRE=m
1253 +CONFIG_IPV6_FOU=m
1254 +CONFIG_IPV6_FOU_TUNNEL=m
1255 +CONFIG_IPV6_MULTIPLE_TABLES=y
1256 +CONFIG_IPV6_SUBTREES=y
1257 +CONFIG_IPV6_MROUTE=y
1258 +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
1259 +CONFIG_IPV6_PIMSM_V2=y
1260 +CONFIG_IPV6_SEG6_LWTUNNEL=y
1261 +CONFIG_IPV6_SEG6_HMAC=y
1262 +CONFIG_IPV6_SEG6_BPF=y
1263 +# CONFIG_IPV6_RPL_LWTUNNEL is not set
1264 +CONFIG_NETLABEL=y
1265 +CONFIG_MPTCP=y
1266 +CONFIG_INET_MPTCP_DIAG=m
1267 +CONFIG_MPTCP_IPV6=y
1268 +CONFIG_NETWORK_SECMARK=y
1269 +CONFIG_NET_PTP_CLASSIFY=y
1270 +CONFIG_NETWORK_PHY_TIMESTAMPING=y
1271 +CONFIG_NETFILTER=y
1272 +CONFIG_NETFILTER_ADVANCED=y
1273 +CONFIG_BRIDGE_NETFILTER=m
1276 +# Core Netfilter Configuration
1278 +CONFIG_NETFILTER_INGRESS=y
1279 +CONFIG_NETFILTER_NETLINK=m
1280 +CONFIG_NETFILTER_FAMILY_BRIDGE=y
1281 +CONFIG_NETFILTER_FAMILY_ARP=y
1282 +CONFIG_NETFILTER_NETLINK_ACCT=m
1283 +CONFIG_NETFILTER_NETLINK_QUEUE=m
1284 +CONFIG_NETFILTER_NETLINK_LOG=m
1285 +CONFIG_NETFILTER_NETLINK_OSF=m
1286 +CONFIG_NF_CONNTRACK=m
1287 +CONFIG_NF_LOG_COMMON=m
1288 +CONFIG_NF_LOG_NETDEV=m
1289 +CONFIG_NETFILTER_CONNCOUNT=m
1290 +CONFIG_NF_CONNTRACK_MARK=y
1291 +CONFIG_NF_CONNTRACK_SECMARK=y
1292 +CONFIG_NF_CONNTRACK_ZONES=y
1293 +# CONFIG_NF_CONNTRACK_PROCFS is not set
1294 +CONFIG_NF_CONNTRACK_EVENTS=y
1295 +CONFIG_NF_CONNTRACK_TIMEOUT=y
1296 +CONFIG_NF_CONNTRACK_TIMESTAMP=y
1297 +CONFIG_NF_CONNTRACK_LABELS=y
1298 +CONFIG_NF_CT_PROTO_DCCP=y
1299 +CONFIG_NF_CT_PROTO_GRE=y
1300 +CONFIG_NF_CT_PROTO_SCTP=y
1301 +CONFIG_NF_CT_PROTO_UDPLITE=y
1302 +CONFIG_NF_CONNTRACK_AMANDA=m
1303 +CONFIG_NF_CONNTRACK_FTP=m
1304 +CONFIG_NF_CONNTRACK_H323=m
1305 +CONFIG_NF_CONNTRACK_IRC=m
1306 +CONFIG_NF_CONNTRACK_BROADCAST=m
1307 +CONFIG_NF_CONNTRACK_NETBIOS_NS=m
1308 +CONFIG_NF_CONNTRACK_SNMP=m
1309 +CONFIG_NF_CONNTRACK_PPTP=m
1310 +CONFIG_NF_CONNTRACK_SANE=m
1311 +CONFIG_NF_CONNTRACK_SIP=m
1312 +CONFIG_NF_CONNTRACK_TFTP=m
1313 +CONFIG_NF_CT_NETLINK=m
1314 +CONFIG_NF_CT_NETLINK_TIMEOUT=m
1315 +CONFIG_NF_CT_NETLINK_HELPER=m
1316 +CONFIG_NETFILTER_NETLINK_GLUE_CT=y
1317 +CONFIG_NF_NAT=m
1318 +CONFIG_NF_NAT_AMANDA=m
1319 +CONFIG_NF_NAT_FTP=m
1320 +CONFIG_NF_NAT_IRC=m
1321 +CONFIG_NF_NAT_SIP=m
1322 +CONFIG_NF_NAT_TFTP=m
1323 +CONFIG_NF_NAT_REDIRECT=y
1324 +CONFIG_NF_NAT_MASQUERADE=y
1325 +CONFIG_NETFILTER_SYNPROXY=m
1326 +CONFIG_NF_TABLES=m
1327 +CONFIG_NF_TABLES_INET=y
1328 +CONFIG_NF_TABLES_NETDEV=y
1329 +CONFIG_NFT_NUMGEN=m
1330 +CONFIG_NFT_CT=m
1331 +CONFIG_NFT_FLOW_OFFLOAD=m
1332 +CONFIG_NFT_COUNTER=m
1333 +CONFIG_NFT_CONNLIMIT=m
1334 +CONFIG_NFT_LOG=m
1335 +CONFIG_NFT_LIMIT=m
1336 +CONFIG_NFT_MASQ=m
1337 +CONFIG_NFT_REDIR=m
1338 +CONFIG_NFT_NAT=m
1339 +CONFIG_NFT_TUNNEL=m
1340 +CONFIG_NFT_OBJREF=m
1341 +CONFIG_NFT_QUEUE=m
1342 +CONFIG_NFT_QUOTA=m
1343 +CONFIG_NFT_REJECT=m
1344 +CONFIG_NFT_REJECT_INET=m
1345 +CONFIG_NFT_COMPAT=m
1346 +CONFIG_NFT_HASH=m
1347 +CONFIG_NFT_FIB=m
1348 +CONFIG_NFT_FIB_INET=m
1349 +CONFIG_NFT_XFRM=m
1350 +CONFIG_NFT_SOCKET=m
1351 +CONFIG_NFT_OSF=m
1352 +CONFIG_NFT_TPROXY=m
1353 +CONFIG_NFT_SYNPROXY=m
1354 +CONFIG_NF_DUP_NETDEV=m
1355 +CONFIG_NFT_DUP_NETDEV=m
1356 +CONFIG_NFT_FWD_NETDEV=m
1357 +CONFIG_NFT_FIB_NETDEV=m
1358 +CONFIG_NFT_REJECT_NETDEV=m
1359 +CONFIG_NF_FLOW_TABLE_INET=m
1360 +CONFIG_NF_FLOW_TABLE=m
1361 +CONFIG_NETFILTER_XTABLES=m
1364 +# Xtables combined modules
1366 +CONFIG_NETFILTER_XT_MARK=m
1367 +CONFIG_NETFILTER_XT_CONNMARK=m
1368 +CONFIG_NETFILTER_XT_SET=m
1371 +# Xtables targets
1373 +CONFIG_NETFILTER_XT_TARGET_AUDIT=m
1374 +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
1375 +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
1376 +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
1377 +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
1378 +CONFIG_NETFILTER_XT_TARGET_CT=m
1379 +CONFIG_NETFILTER_XT_TARGET_DSCP=m
1380 +CONFIG_NETFILTER_XT_TARGET_HL=m
1381 +CONFIG_NETFILTER_XT_TARGET_HMARK=m
1382 +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
1383 +CONFIG_NETFILTER_XT_TARGET_LED=m
1384 +CONFIG_NETFILTER_XT_TARGET_LOG=m
1385 +CONFIG_NETFILTER_XT_TARGET_MARK=m
1386 +CONFIG_NETFILTER_XT_NAT=m
1387 +CONFIG_NETFILTER_XT_TARGET_NETMAP=m
1388 +CONFIG_NETFILTER_XT_TARGET_NFLOG=m
1389 +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
1390 +# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set
1391 +CONFIG_NETFILTER_XT_TARGET_RATEEST=m
1392 +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m
1393 +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m
1394 +CONFIG_NETFILTER_XT_TARGET_TEE=m
1395 +CONFIG_NETFILTER_XT_TARGET_TPROXY=m
1396 +CONFIG_NETFILTER_XT_TARGET_TRACE=m
1397 +CONFIG_NETFILTER_XT_TARGET_SECMARK=m
1398 +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
1399 +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
1402 +# Xtables matches
1404 +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
1405 +CONFIG_NETFILTER_XT_MATCH_BPF=m
1406 +CONFIG_NETFILTER_XT_MATCH_CGROUP=m
1407 +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
1408 +CONFIG_NETFILTER_XT_MATCH_COMMENT=m
1409 +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
1410 +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
1411 +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
1412 +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
1413 +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
1414 +CONFIG_NETFILTER_XT_MATCH_CPU=m
1415 +CONFIG_NETFILTER_XT_MATCH_DCCP=m
1416 +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
1417 +CONFIG_NETFILTER_XT_MATCH_DSCP=m
1418 +CONFIG_NETFILTER_XT_MATCH_ECN=m
1419 +CONFIG_NETFILTER_XT_MATCH_ESP=m
1420 +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
1421 +CONFIG_NETFILTER_XT_MATCH_HELPER=m
1422 +CONFIG_NETFILTER_XT_MATCH_HL=m
1423 +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
1424 +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
1425 +CONFIG_NETFILTER_XT_MATCH_IPVS=m
1426 +CONFIG_NETFILTER_XT_MATCH_L2TP=m
1427 +CONFIG_NETFILTER_XT_MATCH_LENGTH=m
1428 +CONFIG_NETFILTER_XT_MATCH_LIMIT=m
1429 +CONFIG_NETFILTER_XT_MATCH_MAC=m
1430 +CONFIG_NETFILTER_XT_MATCH_MARK=m
1431 +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
1432 +CONFIG_NETFILTER_XT_MATCH_NFACCT=m
1433 +CONFIG_NETFILTER_XT_MATCH_OSF=m
1434 +CONFIG_NETFILTER_XT_MATCH_OWNER=m
1435 +CONFIG_NETFILTER_XT_MATCH_POLICY=m
1436 +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
1437 +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
1438 +CONFIG_NETFILTER_XT_MATCH_QUOTA=m
1439 +CONFIG_NETFILTER_XT_MATCH_RATEEST=m
1440 +CONFIG_NETFILTER_XT_MATCH_REALM=m
1441 +CONFIG_NETFILTER_XT_MATCH_RECENT=m
1442 +CONFIG_NETFILTER_XT_MATCH_SCTP=m
1443 +CONFIG_NETFILTER_XT_MATCH_SOCKET=m
1444 +CONFIG_NETFILTER_XT_MATCH_STATE=m
1445 +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
1446 +CONFIG_NETFILTER_XT_MATCH_STRING=m
1447 +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
1448 +CONFIG_NETFILTER_XT_MATCH_TIME=m
1449 +CONFIG_NETFILTER_XT_MATCH_U32=m
1450 +# end of Core Netfilter Configuration
1452 +CONFIG_IP_SET=m
1453 +CONFIG_IP_SET_MAX=256
1454 +CONFIG_IP_SET_BITMAP_IP=m
1455 +CONFIG_IP_SET_BITMAP_IPMAC=m
1456 +CONFIG_IP_SET_BITMAP_PORT=m
1457 +CONFIG_IP_SET_HASH_IP=m
1458 +CONFIG_IP_SET_HASH_IPMARK=m
1459 +CONFIG_IP_SET_HASH_IPPORT=m
1460 +CONFIG_IP_SET_HASH_IPPORTIP=m
1461 +CONFIG_IP_SET_HASH_IPPORTNET=m
1462 +CONFIG_IP_SET_HASH_IPMAC=m
1463 +CONFIG_IP_SET_HASH_MAC=m
1464 +CONFIG_IP_SET_HASH_NETPORTNET=m
1465 +CONFIG_IP_SET_HASH_NET=m
1466 +CONFIG_IP_SET_HASH_NETNET=m
1467 +CONFIG_IP_SET_HASH_NETPORT=m
1468 +CONFIG_IP_SET_HASH_NETIFACE=m
1469 +CONFIG_IP_SET_LIST_SET=m
1470 +CONFIG_IP_VS=m
1471 +CONFIG_IP_VS_IPV6=y
1472 +# CONFIG_IP_VS_DEBUG is not set
1473 +CONFIG_IP_VS_TAB_BITS=12
1476 +# IPVS transport protocol load balancing support
1478 +CONFIG_IP_VS_PROTO_TCP=y
1479 +CONFIG_IP_VS_PROTO_UDP=y
1480 +CONFIG_IP_VS_PROTO_AH_ESP=y
1481 +CONFIG_IP_VS_PROTO_ESP=y
1482 +CONFIG_IP_VS_PROTO_AH=y
1483 +CONFIG_IP_VS_PROTO_SCTP=y
1486 +# IPVS scheduler
1488 +CONFIG_IP_VS_RR=m
1489 +CONFIG_IP_VS_WRR=m
1490 +CONFIG_IP_VS_LC=m
1491 +CONFIG_IP_VS_WLC=m
1492 +CONFIG_IP_VS_FO=m
1493 +CONFIG_IP_VS_OVF=m
1494 +CONFIG_IP_VS_LBLC=m
1495 +CONFIG_IP_VS_LBLCR=m
1496 +CONFIG_IP_VS_DH=m
1497 +CONFIG_IP_VS_SH=m
1498 +CONFIG_IP_VS_MH=m
1499 +CONFIG_IP_VS_SED=m
1500 +CONFIG_IP_VS_NQ=m
1501 +CONFIG_IP_VS_TWOS=m
1504 +# IPVS SH scheduler
1506 +CONFIG_IP_VS_SH_TAB_BITS=8
1509 +# IPVS MH scheduler
1511 +CONFIG_IP_VS_MH_TAB_INDEX=12
1514 +# IPVS application helper
1516 +CONFIG_IP_VS_FTP=m
1517 +CONFIG_IP_VS_NFCT=y
1518 +CONFIG_IP_VS_PE_SIP=m
1521 +# IP: Netfilter Configuration
1523 +CONFIG_NF_DEFRAG_IPV4=m
1524 +CONFIG_NF_SOCKET_IPV4=m
1525 +CONFIG_NF_TPROXY_IPV4=m
1526 +CONFIG_NF_TABLES_IPV4=y
1527 +CONFIG_NFT_REJECT_IPV4=m
1528 +CONFIG_NFT_DUP_IPV4=m
1529 +CONFIG_NFT_FIB_IPV4=m
1530 +CONFIG_NF_TABLES_ARP=y
1531 +CONFIG_NF_FLOW_TABLE_IPV4=m
1532 +CONFIG_NF_DUP_IPV4=m
1533 +CONFIG_NF_LOG_ARP=m
1534 +CONFIG_NF_LOG_IPV4=m
1535 +CONFIG_NF_REJECT_IPV4=m
1536 +CONFIG_NF_NAT_SNMP_BASIC=m
1537 +CONFIG_NF_NAT_PPTP=m
1538 +CONFIG_NF_NAT_H323=m
1539 +CONFIG_IP_NF_IPTABLES=m
1540 +CONFIG_IP_NF_MATCH_AH=m
1541 +CONFIG_IP_NF_MATCH_ECN=m
1542 +CONFIG_IP_NF_MATCH_RPFILTER=m
1543 +CONFIG_IP_NF_MATCH_TTL=m
1544 +CONFIG_IP_NF_FILTER=m
1545 +CONFIG_IP_NF_TARGET_REJECT=m
1546 +CONFIG_IP_NF_TARGET_SYNPROXY=m
1547 +CONFIG_IP_NF_NAT=m
1548 +CONFIG_IP_NF_TARGET_MASQUERADE=m
1549 +CONFIG_IP_NF_TARGET_NETMAP=m
1550 +CONFIG_IP_NF_TARGET_REDIRECT=m
1551 +CONFIG_IP_NF_MANGLE=m
1552 +CONFIG_IP_NF_TARGET_CLUSTERIP=m
1553 +CONFIG_IP_NF_TARGET_ECN=m
1554 +CONFIG_IP_NF_TARGET_TTL=m
1555 +CONFIG_IP_NF_RAW=m
1556 +CONFIG_IP_NF_SECURITY=m
1557 +CONFIG_IP_NF_ARPTABLES=m
1558 +CONFIG_IP_NF_ARPFILTER=m
1559 +CONFIG_IP_NF_ARP_MANGLE=m
1560 +# end of IP: Netfilter Configuration
1563 +# IPv6: Netfilter Configuration
1565 +CONFIG_NF_SOCKET_IPV6=m
1566 +CONFIG_NF_TPROXY_IPV6=m
1567 +CONFIG_NF_TABLES_IPV6=y
1568 +CONFIG_NFT_REJECT_IPV6=m
1569 +CONFIG_NFT_DUP_IPV6=m
1570 +CONFIG_NFT_FIB_IPV6=m
1571 +CONFIG_NF_FLOW_TABLE_IPV6=m
1572 +CONFIG_NF_DUP_IPV6=m
1573 +CONFIG_NF_REJECT_IPV6=m
1574 +CONFIG_NF_LOG_IPV6=m
1575 +CONFIG_IP6_NF_IPTABLES=m
1576 +CONFIG_IP6_NF_MATCH_AH=m
1577 +CONFIG_IP6_NF_MATCH_EUI64=m
1578 +CONFIG_IP6_NF_MATCH_FRAG=m
1579 +CONFIG_IP6_NF_MATCH_OPTS=m
1580 +CONFIG_IP6_NF_MATCH_HL=m
1581 +CONFIG_IP6_NF_MATCH_IPV6HEADER=m
1582 +CONFIG_IP6_NF_MATCH_MH=m
1583 +CONFIG_IP6_NF_MATCH_RPFILTER=m
1584 +CONFIG_IP6_NF_MATCH_RT=m
1585 +CONFIG_IP6_NF_MATCH_SRH=m
1586 +CONFIG_IP6_NF_TARGET_HL=m
1587 +CONFIG_IP6_NF_FILTER=m
1588 +CONFIG_IP6_NF_TARGET_REJECT=m
1589 +CONFIG_IP6_NF_TARGET_SYNPROXY=m
1590 +CONFIG_IP6_NF_MANGLE=m
1591 +CONFIG_IP6_NF_RAW=m
1592 +CONFIG_IP6_NF_SECURITY=m
1593 +CONFIG_IP6_NF_NAT=m
1594 +CONFIG_IP6_NF_TARGET_MASQUERADE=m
1595 +CONFIG_IP6_NF_TARGET_NPT=m
1596 +# end of IPv6: Netfilter Configuration
1598 +CONFIG_NF_DEFRAG_IPV6=m
1601 +# DECnet: Netfilter Configuration
1603 +CONFIG_DECNET_NF_GRABULATOR=m
1604 +# end of DECnet: Netfilter Configuration
1606 +CONFIG_NF_TABLES_BRIDGE=m
1607 +CONFIG_NFT_BRIDGE_META=m
1608 +CONFIG_NFT_BRIDGE_REJECT=m
1609 +CONFIG_NF_LOG_BRIDGE=m
1610 +CONFIG_NF_CONNTRACK_BRIDGE=m
1611 +CONFIG_BRIDGE_NF_EBTABLES=m
1612 +CONFIG_BRIDGE_EBT_BROUTE=m
1613 +CONFIG_BRIDGE_EBT_T_FILTER=m
1614 +CONFIG_BRIDGE_EBT_T_NAT=m
1615 +CONFIG_BRIDGE_EBT_802_3=m
1616 +CONFIG_BRIDGE_EBT_AMONG=m
1617 +CONFIG_BRIDGE_EBT_ARP=m
1618 +CONFIG_BRIDGE_EBT_IP=m
1619 +CONFIG_BRIDGE_EBT_IP6=m
1620 +CONFIG_BRIDGE_EBT_LIMIT=m
1621 +CONFIG_BRIDGE_EBT_MARK=m
1622 +CONFIG_BRIDGE_EBT_PKTTYPE=m
1623 +CONFIG_BRIDGE_EBT_STP=m
1624 +CONFIG_BRIDGE_EBT_VLAN=m
1625 +CONFIG_BRIDGE_EBT_ARPREPLY=m
1626 +CONFIG_BRIDGE_EBT_DNAT=m
1627 +CONFIG_BRIDGE_EBT_MARK_T=m
1628 +CONFIG_BRIDGE_EBT_REDIRECT=m
1629 +CONFIG_BRIDGE_EBT_SNAT=m
1630 +CONFIG_BRIDGE_EBT_LOG=m
1631 +CONFIG_BRIDGE_EBT_NFLOG=m
1632 +CONFIG_BPFILTER=y
1633 +CONFIG_BPFILTER_UMH=m
1634 +CONFIG_IP_DCCP=m
1635 +CONFIG_INET_DCCP_DIAG=m
1638 +# DCCP CCIDs Configuration
1640 +# CONFIG_IP_DCCP_CCID2_DEBUG is not set
1641 +# CONFIG_IP_DCCP_CCID3 is not set
1642 +# end of DCCP CCIDs Configuration
1645 +# DCCP Kernel Hacking
1647 +# CONFIG_IP_DCCP_DEBUG is not set
1648 +# end of DCCP Kernel Hacking
1650 +CONFIG_IP_SCTP=m
1651 +# CONFIG_SCTP_DBG_OBJCNT is not set
1652 +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set
1653 +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y
1654 +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set
1655 +CONFIG_SCTP_COOKIE_HMAC_MD5=y
1656 +CONFIG_SCTP_COOKIE_HMAC_SHA1=y
1657 +CONFIG_INET_SCTP_DIAG=m
1658 +CONFIG_RDS=m
1659 +CONFIG_RDS_RDMA=m
1660 +CONFIG_RDS_TCP=m
1661 +# CONFIG_RDS_DEBUG is not set
1662 +CONFIG_TIPC=m
1663 +CONFIG_TIPC_MEDIA_IB=y
1664 +CONFIG_TIPC_MEDIA_UDP=y
1665 +CONFIG_TIPC_CRYPTO=y
1666 +CONFIG_TIPC_DIAG=m
1667 +CONFIG_ATM=m
1668 +CONFIG_ATM_CLIP=m
1669 +# CONFIG_ATM_CLIP_NO_ICMP is not set
1670 +CONFIG_ATM_LANE=m
1671 +CONFIG_ATM_MPOA=m
1672 +CONFIG_ATM_BR2684=m
1673 +# CONFIG_ATM_BR2684_IPFILTER is not set
1674 +CONFIG_L2TP=m
1675 +CONFIG_L2TP_DEBUGFS=m
1676 +CONFIG_L2TP_V3=y
1677 +CONFIG_L2TP_IP=m
1678 +CONFIG_L2TP_ETH=m
1679 +CONFIG_STP=m
1680 +CONFIG_GARP=m
1681 +CONFIG_MRP=m
1682 +CONFIG_BRIDGE=m
1683 +CONFIG_BRIDGE_IGMP_SNOOPING=y
1684 +CONFIG_BRIDGE_VLAN_FILTERING=y
1685 +CONFIG_BRIDGE_MRP=y
1686 +CONFIG_BRIDGE_CFM=y
1687 +CONFIG_HAVE_NET_DSA=y
1688 +CONFIG_NET_DSA=m
1689 +CONFIG_NET_DSA_TAG_8021Q=m
1690 +CONFIG_NET_DSA_TAG_AR9331=m
1691 +CONFIG_NET_DSA_TAG_BRCM_COMMON=m
1692 +CONFIG_NET_DSA_TAG_BRCM=m
1693 +CONFIG_NET_DSA_TAG_BRCM_PREPEND=m
1694 +CONFIG_NET_DSA_TAG_HELLCREEK=m
1695 +CONFIG_NET_DSA_TAG_GSWIP=m
1696 +CONFIG_NET_DSA_TAG_DSA_COMMON=m
1697 +CONFIG_NET_DSA_TAG_DSA=m
1698 +CONFIG_NET_DSA_TAG_EDSA=m
1699 +CONFIG_NET_DSA_TAG_MTK=m
1700 +CONFIG_NET_DSA_TAG_KSZ=m
1701 +CONFIG_NET_DSA_TAG_RTL4_A=m
1702 +CONFIG_NET_DSA_TAG_OCELOT=m
1703 +CONFIG_NET_DSA_TAG_OCELOT_8021Q=m
1704 +CONFIG_NET_DSA_TAG_QCA=m
1705 +CONFIG_NET_DSA_TAG_LAN9303=m
1706 +CONFIG_NET_DSA_TAG_SJA1105=m
1707 +CONFIG_NET_DSA_TAG_TRAILER=m
1708 +CONFIG_NET_DSA_TAG_XRS700X=m
1709 +CONFIG_VLAN_8021Q=m
1710 +CONFIG_VLAN_8021Q_GVRP=y
1711 +CONFIG_VLAN_8021Q_MVRP=y
1712 +CONFIG_DECNET=m
1713 +# CONFIG_DECNET_ROUTER is not set
1714 +CONFIG_LLC=m
1715 +CONFIG_LLC2=m
1716 +CONFIG_ATALK=m
1717 +CONFIG_DEV_APPLETALK=m
1718 +# CONFIG_IPDDP is not set
1719 +CONFIG_X25=m
1720 +CONFIG_LAPB=m
1721 +CONFIG_PHONET=m
1722 +CONFIG_6LOWPAN=m
1723 +# CONFIG_6LOWPAN_DEBUGFS is not set
1724 +CONFIG_6LOWPAN_NHC=m
1725 +CONFIG_6LOWPAN_NHC_DEST=m
1726 +CONFIG_6LOWPAN_NHC_FRAGMENT=m
1727 +CONFIG_6LOWPAN_NHC_HOP=m
1728 +CONFIG_6LOWPAN_NHC_IPV6=m
1729 +CONFIG_6LOWPAN_NHC_MOBILITY=m
1730 +CONFIG_6LOWPAN_NHC_ROUTING=m
1731 +CONFIG_6LOWPAN_NHC_UDP=m
1732 +# CONFIG_6LOWPAN_GHC_EXT_HDR_HOP is not set
1733 +# CONFIG_6LOWPAN_GHC_UDP is not set
1734 +# CONFIG_6LOWPAN_GHC_ICMPV6 is not set
1735 +# CONFIG_6LOWPAN_GHC_EXT_HDR_DEST is not set
1736 +# CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG is not set
1737 +# CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE is not set
1738 +CONFIG_IEEE802154=m
1739 +# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set
1740 +CONFIG_IEEE802154_SOCKET=m
1741 +CONFIG_IEEE802154_6LOWPAN=m
1742 +CONFIG_MAC802154=m
1743 +CONFIG_NET_SCHED=y
1746 +# Queueing/Scheduling
1748 +CONFIG_NET_SCH_CBQ=m
1749 +CONFIG_NET_SCH_HTB=m
1750 +CONFIG_NET_SCH_HFSC=m
1751 +CONFIG_NET_SCH_ATM=m
1752 +CONFIG_NET_SCH_PRIO=m
1753 +CONFIG_NET_SCH_MULTIQ=m
1754 +CONFIG_NET_SCH_RED=m
1755 +CONFIG_NET_SCH_SFB=m
1756 +CONFIG_NET_SCH_SFQ=m
1757 +CONFIG_NET_SCH_TEQL=m
1758 +CONFIG_NET_SCH_TBF=m
1759 +CONFIG_NET_SCH_CBS=m
1760 +CONFIG_NET_SCH_ETF=m
1761 +CONFIG_NET_SCH_TAPRIO=m
1762 +CONFIG_NET_SCH_GRED=m
1763 +CONFIG_NET_SCH_DSMARK=m
1764 +CONFIG_NET_SCH_NETEM=m
1765 +CONFIG_NET_SCH_DRR=m
1766 +CONFIG_NET_SCH_MQPRIO=m
1767 +CONFIG_NET_SCH_SKBPRIO=m
1768 +CONFIG_NET_SCH_CHOKE=m
1769 +CONFIG_NET_SCH_QFQ=m
1770 +CONFIG_NET_SCH_CODEL=m
1771 +CONFIG_NET_SCH_FQ_CODEL=m
1772 +CONFIG_NET_SCH_CAKE=m
1773 +CONFIG_NET_SCH_FQ=m
1774 +CONFIG_NET_SCH_HHF=m
1775 +CONFIG_NET_SCH_PIE=y
1776 +CONFIG_NET_SCH_FQ_PIE=y
1777 +CONFIG_NET_SCH_INGRESS=m
1778 +CONFIG_NET_SCH_PLUG=m
1779 +CONFIG_NET_SCH_ETS=m
1780 +CONFIG_NET_SCH_DEFAULT=y
1781 +# CONFIG_DEFAULT_FQ is not set
1782 +# CONFIG_DEFAULT_CODEL is not set
1783 +# CONFIG_DEFAULT_FQ_CODEL is not set
1784 +CONFIG_DEFAULT_FQ_PIE=y
1785 +# CONFIG_DEFAULT_SFQ is not set
1786 +# CONFIG_DEFAULT_PFIFO_FAST is not set
1787 +CONFIG_DEFAULT_NET_SCH="fq_pie"
1790 +# Classification
1792 +CONFIG_NET_CLS=y
1793 +CONFIG_NET_CLS_BASIC=m
1794 +CONFIG_NET_CLS_TCINDEX=m
1795 +CONFIG_NET_CLS_ROUTE4=m
1796 +CONFIG_NET_CLS_FW=m
1797 +CONFIG_NET_CLS_U32=m
1798 +# CONFIG_CLS_U32_PERF is not set
1799 +CONFIG_CLS_U32_MARK=y
1800 +CONFIG_NET_CLS_RSVP=m
1801 +CONFIG_NET_CLS_RSVP6=m
1802 +CONFIG_NET_CLS_FLOW=m
1803 +CONFIG_NET_CLS_CGROUP=m
1804 +CONFIG_NET_CLS_BPF=m
1805 +CONFIG_NET_CLS_FLOWER=m
1806 +CONFIG_NET_CLS_MATCHALL=m
1807 +CONFIG_NET_EMATCH=y
1808 +CONFIG_NET_EMATCH_STACK=32
1809 +CONFIG_NET_EMATCH_CMP=m
1810 +CONFIG_NET_EMATCH_NBYTE=m
1811 +CONFIG_NET_EMATCH_U32=m
1812 +CONFIG_NET_EMATCH_META=m
1813 +CONFIG_NET_EMATCH_TEXT=m
1814 +CONFIG_NET_EMATCH_CANID=m
1815 +CONFIG_NET_EMATCH_IPSET=m
1816 +CONFIG_NET_EMATCH_IPT=m
1817 +CONFIG_NET_CLS_ACT=y
1818 +CONFIG_NET_ACT_POLICE=m
1819 +CONFIG_NET_ACT_GACT=m
1820 +CONFIG_GACT_PROB=y
1821 +CONFIG_NET_ACT_MIRRED=m
1822 +CONFIG_NET_ACT_SAMPLE=m
1823 +CONFIG_NET_ACT_IPT=m
1824 +CONFIG_NET_ACT_NAT=m
1825 +CONFIG_NET_ACT_PEDIT=m
1826 +CONFIG_NET_ACT_SIMP=m
1827 +CONFIG_NET_ACT_SKBEDIT=m
1828 +CONFIG_NET_ACT_CSUM=m
1829 +CONFIG_NET_ACT_MPLS=m
1830 +CONFIG_NET_ACT_VLAN=m
1831 +CONFIG_NET_ACT_BPF=m
1832 +CONFIG_NET_ACT_CONNMARK=m
1833 +CONFIG_NET_ACT_CTINFO=m
1834 +CONFIG_NET_ACT_SKBMOD=m
1835 +# CONFIG_NET_ACT_IFE is not set
1836 +CONFIG_NET_ACT_TUNNEL_KEY=m
1837 +CONFIG_NET_ACT_CT=m
1838 +CONFIG_NET_ACT_GATE=m
1839 +CONFIG_NET_TC_SKB_EXT=y
1840 +CONFIG_NET_SCH_FIFO=y
1841 +CONFIG_DCB=y
1842 +CONFIG_DNS_RESOLVER=y
1843 +CONFIG_BATMAN_ADV=m
1844 +# CONFIG_BATMAN_ADV_BATMAN_V is not set
1845 +CONFIG_BATMAN_ADV_BLA=y
1846 +CONFIG_BATMAN_ADV_DAT=y
1847 +CONFIG_BATMAN_ADV_NC=y
1848 +CONFIG_BATMAN_ADV_MCAST=y
1849 +# CONFIG_BATMAN_ADV_DEBUG is not set
1850 +CONFIG_OPENVSWITCH=m
1851 +CONFIG_OPENVSWITCH_GRE=m
1852 +CONFIG_OPENVSWITCH_VXLAN=m
1853 +CONFIG_OPENVSWITCH_GENEVE=m
1854 +CONFIG_VSOCKETS=m
1855 +CONFIG_VSOCKETS_DIAG=m
1856 +CONFIG_VSOCKETS_LOOPBACK=m
1857 +CONFIG_VMWARE_VMCI_VSOCKETS=m
1858 +CONFIG_VIRTIO_VSOCKETS=m
1859 +CONFIG_VIRTIO_VSOCKETS_COMMON=m
1860 +CONFIG_HYPERV_VSOCKETS=m
1861 +CONFIG_NETLINK_DIAG=m
1862 +CONFIG_MPLS=y
1863 +CONFIG_NET_MPLS_GSO=m
1864 +CONFIG_MPLS_ROUTING=m
1865 +CONFIG_MPLS_IPTUNNEL=m
1866 +CONFIG_NET_NSH=m
1867 +CONFIG_HSR=m
1868 +CONFIG_NET_SWITCHDEV=y
1869 +CONFIG_NET_L3_MASTER_DEV=y
1870 +CONFIG_QRTR=m
1871 +CONFIG_QRTR_SMD=m
1872 +CONFIG_QRTR_TUN=m
1873 +CONFIG_QRTR_MHI=m
1874 +CONFIG_NET_NCSI=y
1875 +CONFIG_NCSI_OEM_CMD_GET_MAC=y
1876 +CONFIG_RPS=y
1877 +CONFIG_RFS_ACCEL=y
1878 +CONFIG_SOCK_RX_QUEUE_MAPPING=y
1879 +CONFIG_XPS=y
1880 +CONFIG_CGROUP_NET_PRIO=y
1881 +CONFIG_CGROUP_NET_CLASSID=y
1882 +CONFIG_NET_RX_BUSY_POLL=y
1883 +CONFIG_BQL=y
1884 +CONFIG_BPF_JIT=y
1885 +CONFIG_BPF_STREAM_PARSER=y
1886 +CONFIG_NET_FLOW_LIMIT=y
1889 +# Network testing
1891 +CONFIG_NET_PKTGEN=m
1892 +# end of Network testing
1893 +# end of Networking options
1895 +CONFIG_HAMRADIO=y
1898 +# Packet Radio protocols
1900 +CONFIG_AX25=m
1901 +CONFIG_AX25_DAMA_SLAVE=y
1902 +CONFIG_NETROM=m
1903 +CONFIG_ROSE=m
1906 +# AX.25 network device drivers
1908 +CONFIG_MKISS=m
1909 +CONFIG_6PACK=m
1910 +CONFIG_BPQETHER=m
1911 +CONFIG_BAYCOM_SER_FDX=m
1912 +CONFIG_BAYCOM_SER_HDX=m
1913 +CONFIG_BAYCOM_PAR=m
1914 +CONFIG_YAM=m
1915 +# end of AX.25 network device drivers
1917 +CONFIG_CAN=m
1918 +CONFIG_CAN_RAW=m
1919 +CONFIG_CAN_BCM=m
1920 +CONFIG_CAN_GW=m
1921 +CONFIG_CAN_J1939=m
1922 +CONFIG_CAN_ISOTP=m
1925 +# CAN Device Drivers
1927 +CONFIG_CAN_VCAN=m
1928 +CONFIG_CAN_VXCAN=m
1929 +CONFIG_CAN_SLCAN=m
1930 +CONFIG_CAN_DEV=m
1931 +CONFIG_CAN_CALC_BITTIMING=y
1932 +CONFIG_CAN_JANZ_ICAN3=m
1933 +CONFIG_CAN_KVASER_PCIEFD=m
1934 +CONFIG_CAN_C_CAN=m
1935 +CONFIG_CAN_C_CAN_PLATFORM=m
1936 +CONFIG_CAN_C_CAN_PCI=m
1937 +CONFIG_CAN_CC770=m
1938 +CONFIG_CAN_CC770_ISA=m
1939 +CONFIG_CAN_CC770_PLATFORM=m
1940 +CONFIG_CAN_IFI_CANFD=m
1941 +CONFIG_CAN_M_CAN=m
1942 +CONFIG_CAN_M_CAN_PCI=m
1943 +CONFIG_CAN_M_CAN_PLATFORM=m
1944 +CONFIG_CAN_M_CAN_TCAN4X5X=m
1945 +CONFIG_CAN_PEAK_PCIEFD=m
1946 +CONFIG_CAN_SJA1000=m
1947 +CONFIG_CAN_EMS_PCI=m
1948 +CONFIG_CAN_EMS_PCMCIA=m
1949 +CONFIG_CAN_F81601=m
1950 +CONFIG_CAN_KVASER_PCI=m
1951 +CONFIG_CAN_PEAK_PCI=m
1952 +CONFIG_CAN_PEAK_PCIEC=y
1953 +CONFIG_CAN_PEAK_PCMCIA=m
1954 +CONFIG_CAN_PLX_PCI=m
1955 +CONFIG_CAN_SJA1000_ISA=m
1956 +CONFIG_CAN_SJA1000_PLATFORM=m
1957 +CONFIG_CAN_SOFTING=m
1958 +CONFIG_CAN_SOFTING_CS=m
1961 +# CAN SPI interfaces
1963 +CONFIG_CAN_HI311X=m
1964 +CONFIG_CAN_MCP251X=m
1965 +CONFIG_CAN_MCP251XFD=m
1966 +# CONFIG_CAN_MCP251XFD_SANITY is not set
1967 +# end of CAN SPI interfaces
1970 +# CAN USB interfaces
1972 +CONFIG_CAN_8DEV_USB=m
1973 +CONFIG_CAN_EMS_USB=m
1974 +CONFIG_CAN_ESD_USB2=m
1975 +CONFIG_CAN_GS_USB=m
1976 +CONFIG_CAN_KVASER_USB=m
1977 +CONFIG_CAN_MCBA_USB=m
1978 +CONFIG_CAN_PEAK_USB=m
1979 +CONFIG_CAN_UCAN=m
1980 +# end of CAN USB interfaces
1982 +# CONFIG_CAN_DEBUG_DEVICES is not set
1983 +# end of CAN Device Drivers
1985 +CONFIG_BT=m
1986 +CONFIG_BT_BREDR=y
1987 +CONFIG_BT_RFCOMM=m
1988 +CONFIG_BT_RFCOMM_TTY=y
1989 +CONFIG_BT_BNEP=m
1990 +CONFIG_BT_BNEP_MC_FILTER=y
1991 +CONFIG_BT_BNEP_PROTO_FILTER=y
1992 +CONFIG_BT_CMTP=m
1993 +CONFIG_BT_HIDP=m
1994 +CONFIG_BT_HS=y
1995 +CONFIG_BT_LE=y
1996 +CONFIG_BT_6LOWPAN=m
1997 +CONFIG_BT_LEDS=y
1998 +CONFIG_BT_MSFTEXT=y
1999 +CONFIG_BT_DEBUGFS=y
2000 +# CONFIG_BT_SELFTEST is not set
2003 +# Bluetooth device drivers
2005 +CONFIG_BT_INTEL=m
2006 +CONFIG_BT_BCM=m
2007 +CONFIG_BT_RTL=m
2008 +CONFIG_BT_QCA=m
2009 +CONFIG_BT_HCIBTUSB=m
2010 +CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y
2011 +CONFIG_BT_HCIBTUSB_BCM=y
2012 +CONFIG_BT_HCIBTUSB_MTK=y
2013 +CONFIG_BT_HCIBTUSB_RTL=y
2014 +CONFIG_BT_HCIBTSDIO=m
2015 +CONFIG_BT_HCIUART=m
2016 +CONFIG_BT_HCIUART_SERDEV=y
2017 +CONFIG_BT_HCIUART_H4=y
2018 +CONFIG_BT_HCIUART_NOKIA=m
2019 +CONFIG_BT_HCIUART_BCSP=y
2020 +CONFIG_BT_HCIUART_ATH3K=y
2021 +CONFIG_BT_HCIUART_LL=y
2022 +CONFIG_BT_HCIUART_3WIRE=y
2023 +CONFIG_BT_HCIUART_INTEL=y
2024 +CONFIG_BT_HCIUART_BCM=y
2025 +CONFIG_BT_HCIUART_RTL=y
2026 +CONFIG_BT_HCIUART_QCA=y
2027 +CONFIG_BT_HCIUART_AG6XX=y
2028 +CONFIG_BT_HCIUART_MRVL=y
2029 +CONFIG_BT_HCIBCM203X=m
2030 +CONFIG_BT_HCIBPA10X=m
2031 +CONFIG_BT_HCIBFUSB=m
2032 +CONFIG_BT_HCIDTL1=m
2033 +CONFIG_BT_HCIBT3C=m
2034 +CONFIG_BT_HCIBLUECARD=m
2035 +CONFIG_BT_HCIVHCI=m
2036 +CONFIG_BT_MRVL=m
2037 +CONFIG_BT_MRVL_SDIO=m
2038 +CONFIG_BT_ATH3K=m
2039 +CONFIG_BT_MTKSDIO=m
2040 +CONFIG_BT_MTKUART=m
2041 +CONFIG_BT_HCIRSI=m
2042 +# end of Bluetooth device drivers
2044 +CONFIG_AF_RXRPC=m
2045 +CONFIG_AF_RXRPC_IPV6=y
2046 +# CONFIG_AF_RXRPC_INJECT_LOSS is not set
2047 +# CONFIG_AF_RXRPC_DEBUG is not set
2048 +CONFIG_RXKAD=y
2049 +CONFIG_AF_KCM=m
2050 +CONFIG_STREAM_PARSER=y
2051 +CONFIG_FIB_RULES=y
2052 +CONFIG_WIRELESS=y
2053 +CONFIG_WIRELESS_EXT=y
2054 +CONFIG_WEXT_CORE=y
2055 +CONFIG_WEXT_PROC=y
2056 +CONFIG_WEXT_SPY=y
2057 +CONFIG_WEXT_PRIV=y
2058 +CONFIG_CFG80211=m
2059 +# CONFIG_NL80211_TESTMODE is not set
2060 +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
2061 +# CONFIG_CFG80211_CERTIFICATION_ONUS is not set
2062 +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y
2063 +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y
2064 +CONFIG_CFG80211_DEFAULT_PS=y
2065 +CONFIG_CFG80211_DEBUGFS=y
2066 +CONFIG_CFG80211_CRDA_SUPPORT=y
2067 +CONFIG_CFG80211_WEXT=y
2068 +CONFIG_CFG80211_WEXT_EXPORT=y
2069 +CONFIG_LIB80211=m
2070 +CONFIG_LIB80211_CRYPT_WEP=m
2071 +CONFIG_LIB80211_CRYPT_CCMP=m
2072 +CONFIG_LIB80211_CRYPT_TKIP=m
2073 +# CONFIG_LIB80211_DEBUG is not set
2074 +CONFIG_MAC80211=m
2075 +CONFIG_MAC80211_HAS_RC=y
2076 +CONFIG_MAC80211_RC_MINSTREL=y
2077 +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
2078 +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht"
2079 +CONFIG_MAC80211_MESH=y
2080 +CONFIG_MAC80211_LEDS=y
2081 +CONFIG_MAC80211_DEBUGFS=y
2082 +CONFIG_MAC80211_MESSAGE_TRACING=y
2083 +# CONFIG_MAC80211_DEBUG_MENU is not set
2084 +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0
2085 +CONFIG_RFKILL=y
2086 +CONFIG_RFKILL_LEDS=y
2087 +CONFIG_RFKILL_INPUT=y
2088 +CONFIG_RFKILL_GPIO=m
2089 +CONFIG_NET_9P=m
2090 +CONFIG_NET_9P_VIRTIO=m
2091 +CONFIG_NET_9P_XEN=m
2092 +CONFIG_NET_9P_RDMA=m
2093 +# CONFIG_NET_9P_DEBUG is not set
2094 +CONFIG_CAIF=m
2095 +# CONFIG_CAIF_DEBUG is not set
2096 +CONFIG_CAIF_NETDEV=m
2097 +CONFIG_CAIF_USB=m
2098 +CONFIG_CEPH_LIB=m
2099 +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set
2100 +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y
2101 +CONFIG_NFC=m
2102 +CONFIG_NFC_DIGITAL=m
2103 +CONFIG_NFC_NCI=m
2104 +CONFIG_NFC_NCI_SPI=m
2105 +CONFIG_NFC_NCI_UART=m
2106 +CONFIG_NFC_HCI=m
2107 +CONFIG_NFC_SHDLC=y
2110 +# Near Field Communication (NFC) devices
2112 +CONFIG_NFC_TRF7970A=m
2113 +CONFIG_NFC_MEI_PHY=m
2114 +CONFIG_NFC_SIM=m
2115 +CONFIG_NFC_PORT100=m
2116 +CONFIG_NFC_VIRTUAL_NCI=m
2117 +CONFIG_NFC_FDP=m
2118 +CONFIG_NFC_FDP_I2C=m
2119 +CONFIG_NFC_PN544=m
2120 +CONFIG_NFC_PN544_I2C=m
2121 +CONFIG_NFC_PN544_MEI=m
2122 +CONFIG_NFC_PN533=m
2123 +CONFIG_NFC_PN533_USB=m
2124 +CONFIG_NFC_PN533_I2C=m
2125 +CONFIG_NFC_PN532_UART=m
2126 +CONFIG_NFC_MICROREAD=m
2127 +CONFIG_NFC_MICROREAD_I2C=m
2128 +CONFIG_NFC_MICROREAD_MEI=m
2129 +CONFIG_NFC_MRVL=m
2130 +CONFIG_NFC_MRVL_USB=m
2131 +CONFIG_NFC_MRVL_UART=m
2132 +CONFIG_NFC_MRVL_I2C=m
2133 +CONFIG_NFC_MRVL_SPI=m
2134 +CONFIG_NFC_ST21NFCA=m
2135 +CONFIG_NFC_ST21NFCA_I2C=m
2136 +CONFIG_NFC_ST_NCI=m
2137 +CONFIG_NFC_ST_NCI_I2C=m
2138 +CONFIG_NFC_ST_NCI_SPI=m
2139 +CONFIG_NFC_NXP_NCI=m
2140 +CONFIG_NFC_NXP_NCI_I2C=m
2141 +CONFIG_NFC_S3FWRN5=m
2142 +CONFIG_NFC_S3FWRN5_I2C=m
2143 +CONFIG_NFC_S3FWRN82_UART=m
2144 +CONFIG_NFC_ST95HF=m
2145 +# end of Near Field Communication (NFC) devices
2147 +CONFIG_PSAMPLE=m
2148 +CONFIG_NET_IFE=m
2149 +CONFIG_LWTUNNEL=y
2150 +CONFIG_LWTUNNEL_BPF=y
2151 +CONFIG_DST_CACHE=y
2152 +CONFIG_GRO_CELLS=y
2153 +CONFIG_SOCK_VALIDATE_XMIT=y
2154 +CONFIG_NET_SOCK_MSG=y
2155 +CONFIG_NET_DEVLINK=y
2156 +CONFIG_PAGE_POOL=y
2157 +CONFIG_FAILOVER=m
2158 +CONFIG_ETHTOOL_NETLINK=y
2159 +CONFIG_HAVE_EBPF_JIT=y
2162 +# Device Drivers
2164 +CONFIG_HAVE_EISA=y
2165 +# CONFIG_EISA is not set
2166 +CONFIG_HAVE_PCI=y
2167 +CONFIG_PCI=y
2168 +CONFIG_PCI_DOMAINS=y
2169 +CONFIG_PCIEPORTBUS=y
2170 +CONFIG_HOTPLUG_PCI_PCIE=y
2171 +CONFIG_PCIEAER=y
2172 +# CONFIG_PCIEAER_INJECT is not set
2173 +# CONFIG_PCIE_ECRC is not set
2174 +CONFIG_PCIEASPM=y
2175 +CONFIG_PCIEASPM_DEFAULT=y
2176 +# CONFIG_PCIEASPM_POWERSAVE is not set
2177 +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set
2178 +# CONFIG_PCIEASPM_PERFORMANCE is not set
2179 +CONFIG_PCIE_PME=y
2180 +CONFIG_PCIE_DPC=y
2181 +CONFIG_PCIE_PTM=y
2182 +# CONFIG_PCIE_EDR is not set
2183 +CONFIG_PCI_MSI=y
2184 +CONFIG_PCI_MSI_IRQ_DOMAIN=y
2185 +CONFIG_PCI_QUIRKS=y
2186 +# CONFIG_PCI_DEBUG is not set
2187 +CONFIG_PCI_REALLOC_ENABLE_AUTO=y
2188 +CONFIG_PCI_STUB=m
2189 +CONFIG_PCI_PF_STUB=m
2190 +CONFIG_XEN_PCIDEV_FRONTEND=m
2191 +CONFIG_PCI_ATS=y
2192 +CONFIG_PCI_LOCKLESS_CONFIG=y
2193 +CONFIG_PCI_IOV=y
2194 +CONFIG_PCI_PRI=y
2195 +CONFIG_PCI_PASID=y
2196 +# CONFIG_PCI_P2PDMA is not set
2197 +CONFIG_PCI_LABEL=y
2198 +CONFIG_PCI_HYPERV=m
2199 +# CONFIG_PCIE_BUS_TUNE_OFF is not set
2200 +CONFIG_PCIE_BUS_DEFAULT=y
2201 +# CONFIG_PCIE_BUS_SAFE is not set
2202 +# CONFIG_PCIE_BUS_PERFORMANCE is not set
2203 +# CONFIG_PCIE_BUS_PEER2PEER is not set
2204 +CONFIG_HOTPLUG_PCI=y
2205 +CONFIG_HOTPLUG_PCI_ACPI=y
2206 +CONFIG_HOTPLUG_PCI_ACPI_IBM=m
2207 +CONFIG_HOTPLUG_PCI_CPCI=y
2208 +CONFIG_HOTPLUG_PCI_CPCI_ZT5550=m
2209 +CONFIG_HOTPLUG_PCI_CPCI_GENERIC=m
2210 +CONFIG_HOTPLUG_PCI_SHPC=y
2213 +# PCI controller drivers
2215 +CONFIG_VMD=m
2216 +CONFIG_PCI_HYPERV_INTERFACE=m
2219 +# DesignWare PCI Core Support
2221 +CONFIG_PCIE_DW=y
2222 +CONFIG_PCIE_DW_HOST=y
2223 +CONFIG_PCIE_DW_EP=y
2224 +CONFIG_PCIE_DW_PLAT=y
2225 +CONFIG_PCIE_DW_PLAT_HOST=y
2226 +CONFIG_PCIE_DW_PLAT_EP=y
2227 +# CONFIG_PCI_MESON is not set
2228 +# end of DesignWare PCI Core Support
2231 +# Mobiveil PCIe Core Support
2233 +# end of Mobiveil PCIe Core Support
2236 +# Cadence PCIe controllers support
2238 +# end of Cadence PCIe controllers support
2239 +# end of PCI controller drivers
2242 +# PCI Endpoint
2244 +CONFIG_PCI_ENDPOINT=y
2245 +CONFIG_PCI_ENDPOINT_CONFIGFS=y
2246 +# CONFIG_PCI_EPF_TEST is not set
2247 +CONFIG_PCI_EPF_NTB=m
2248 +# end of PCI Endpoint
2251 +# PCI switch controller drivers
2253 +CONFIG_PCI_SW_SWITCHTEC=m
2254 +# end of PCI switch controller drivers
2256 +CONFIG_CXL_BUS=m
2257 +CONFIG_CXL_MEM=m
2258 +# CONFIG_CXL_MEM_RAW_COMMANDS is not set
2259 +CONFIG_PCCARD=m
2260 +CONFIG_PCMCIA=m
2261 +CONFIG_PCMCIA_LOAD_CIS=y
2262 +CONFIG_CARDBUS=y
2265 +# PC-card bridges
2267 +CONFIG_YENTA=m
2268 +CONFIG_YENTA_O2=y
2269 +CONFIG_YENTA_RICOH=y
2270 +CONFIG_YENTA_TI=y
2271 +CONFIG_YENTA_ENE_TUNE=y
2272 +CONFIG_YENTA_TOSHIBA=y
2273 +CONFIG_PD6729=m
2274 +CONFIG_I82092=m
2275 +CONFIG_PCCARD_NONSTATIC=y
2276 +CONFIG_RAPIDIO=y
2277 +CONFIG_RAPIDIO_TSI721=m
2278 +CONFIG_RAPIDIO_DISC_TIMEOUT=30
2279 +# CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS is not set
2280 +CONFIG_RAPIDIO_DMA_ENGINE=y
2281 +# CONFIG_RAPIDIO_DEBUG is not set
2282 +CONFIG_RAPIDIO_ENUM_BASIC=m
2283 +CONFIG_RAPIDIO_CHMAN=m
2284 +CONFIG_RAPIDIO_MPORT_CDEV=m
2287 +# RapidIO Switch drivers
2289 +CONFIG_RAPIDIO_TSI57X=m
2290 +CONFIG_RAPIDIO_CPS_XX=m
2291 +CONFIG_RAPIDIO_TSI568=m
2292 +CONFIG_RAPIDIO_CPS_GEN2=m
2293 +CONFIG_RAPIDIO_RXS_GEN3=m
2294 +# end of RapidIO Switch drivers
2297 +# Generic Driver Options
2299 +CONFIG_AUXILIARY_BUS=y
2300 +CONFIG_UEVENT_HELPER=y
2301 +CONFIG_UEVENT_HELPER_PATH=""
2302 +CONFIG_DEVTMPFS=y
2303 +CONFIG_DEVTMPFS_MOUNT=y
2304 +# CONFIG_STANDALONE is not set
2305 +CONFIG_PREVENT_FIRMWARE_BUILD=y
2308 +# Firmware loader
2310 +CONFIG_FW_LOADER=y
2311 +CONFIG_FW_LOADER_PAGED_BUF=y
2312 +CONFIG_EXTRA_FIRMWARE=""
2313 +CONFIG_FW_LOADER_USER_HELPER=y
2314 +# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set
2315 +CONFIG_FW_LOADER_COMPRESS=y
2316 +CONFIG_FW_CACHE=y
2317 +# end of Firmware loader
2319 +CONFIG_WANT_DEV_COREDUMP=y
2320 +CONFIG_ALLOW_DEV_COREDUMP=y
2321 +CONFIG_DEV_COREDUMP=y
2322 +# CONFIG_DEBUG_DRIVER is not set
2323 +# CONFIG_DEBUG_DEVRES is not set
2324 +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set
2325 +CONFIG_HMEM_REPORTING=y
2326 +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set
2327 +CONFIG_SYS_HYPERVISOR=y
2328 +CONFIG_GENERIC_CPU_AUTOPROBE=y
2329 +CONFIG_GENERIC_CPU_VULNERABILITIES=y
2330 +CONFIG_REGMAP=y
2331 +CONFIG_REGMAP_I2C=y
2332 +CONFIG_REGMAP_SLIMBUS=m
2333 +CONFIG_REGMAP_SPI=y
2334 +CONFIG_REGMAP_SPMI=m
2335 +CONFIG_REGMAP_W1=m
2336 +CONFIG_REGMAP_MMIO=y
2337 +CONFIG_REGMAP_IRQ=y
2338 +CONFIG_REGMAP_SOUNDWIRE=m
2339 +CONFIG_REGMAP_SCCB=m
2340 +CONFIG_REGMAP_I3C=m
2341 +CONFIG_REGMAP_SPI_AVMM=m
2342 +CONFIG_DMA_SHARED_BUFFER=y
2343 +# CONFIG_DMA_FENCE_TRACE is not set
2344 +# end of Generic Driver Options
2347 +# Bus devices
2349 +CONFIG_MHI_BUS=m
2350 +# CONFIG_MHI_BUS_DEBUG is not set
2351 +CONFIG_MHI_BUS_PCI_GENERIC=m
2352 +# end of Bus devices
2354 +CONFIG_CONNECTOR=y
2355 +CONFIG_PROC_EVENTS=y
2356 +CONFIG_GNSS=m
2357 +CONFIG_GNSS_SERIAL=m
2358 +CONFIG_GNSS_MTK_SERIAL=m
2359 +CONFIG_GNSS_SIRF_SERIAL=m
2360 +CONFIG_GNSS_UBX_SERIAL=m
2361 +CONFIG_MTD=m
2362 +# CONFIG_MTD_TESTS is not set
2365 +# Partition parsers
2367 +CONFIG_MTD_AR7_PARTS=m
2368 +CONFIG_MTD_CMDLINE_PARTS=m
2369 +CONFIG_MTD_REDBOOT_PARTS=m
2370 +CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1
2371 +# CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set
2372 +# CONFIG_MTD_REDBOOT_PARTS_READONLY is not set
2373 +# end of Partition parsers
2376 +# User Modules And Translation Layers
2378 +CONFIG_MTD_BLKDEVS=m
2379 +CONFIG_MTD_BLOCK=m
2380 +CONFIG_MTD_BLOCK_RO=m
2381 +CONFIG_FTL=m
2382 +CONFIG_NFTL=m
2383 +CONFIG_NFTL_RW=y
2384 +CONFIG_INFTL=m
2385 +CONFIG_RFD_FTL=m
2386 +CONFIG_SSFDC=m
2387 +CONFIG_SM_FTL=m
2388 +CONFIG_MTD_OOPS=m
2389 +CONFIG_MTD_PSTORE=m
2390 +CONFIG_MTD_SWAP=m
2391 +# CONFIG_MTD_PARTITIONED_MASTER is not set
2394 +# RAM/ROM/Flash chip drivers
2396 +CONFIG_MTD_CFI=m
2397 +CONFIG_MTD_JEDECPROBE=m
2398 +CONFIG_MTD_GEN_PROBE=m
2399 +# CONFIG_MTD_CFI_ADV_OPTIONS is not set
2400 +CONFIG_MTD_MAP_BANK_WIDTH_1=y
2401 +CONFIG_MTD_MAP_BANK_WIDTH_2=y
2402 +CONFIG_MTD_MAP_BANK_WIDTH_4=y
2403 +CONFIG_MTD_CFI_I1=y
2404 +CONFIG_MTD_CFI_I2=y
2405 +CONFIG_MTD_CFI_INTELEXT=m
2406 +CONFIG_MTD_CFI_AMDSTD=m
2407 +CONFIG_MTD_CFI_STAA=m
2408 +CONFIG_MTD_CFI_UTIL=m
2409 +CONFIG_MTD_RAM=m
2410 +CONFIG_MTD_ROM=m
2411 +CONFIG_MTD_ABSENT=m
2412 +# end of RAM/ROM/Flash chip drivers
2415 +# Mapping drivers for chip access
2417 +CONFIG_MTD_COMPLEX_MAPPINGS=y
2418 +CONFIG_MTD_PHYSMAP=m
2419 +# CONFIG_MTD_PHYSMAP_COMPAT is not set
2420 +CONFIG_MTD_PHYSMAP_GPIO_ADDR=y
2421 +CONFIG_MTD_SBC_GXX=m
2422 +CONFIG_MTD_AMD76XROM=m
2423 +CONFIG_MTD_ICHXROM=m
2424 +CONFIG_MTD_ESB2ROM=m
2425 +CONFIG_MTD_CK804XROM=m
2426 +CONFIG_MTD_SCB2_FLASH=m
2427 +CONFIG_MTD_NETtel=m
2428 +CONFIG_MTD_L440GX=m
2429 +CONFIG_MTD_PCI=m
2430 +CONFIG_MTD_PCMCIA=m
2431 +# CONFIG_MTD_PCMCIA_ANONYMOUS is not set
2432 +CONFIG_MTD_INTEL_VR_NOR=m
2433 +CONFIG_MTD_PLATRAM=m
2434 +# end of Mapping drivers for chip access
2437 +# Self-contained MTD device drivers
2439 +CONFIG_MTD_PMC551=m
2440 +# CONFIG_MTD_PMC551_BUGFIX is not set
2441 +# CONFIG_MTD_PMC551_DEBUG is not set
2442 +CONFIG_MTD_DATAFLASH=m
2443 +# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
2444 +CONFIG_MTD_DATAFLASH_OTP=y
2445 +CONFIG_MTD_MCHP23K256=m
2446 +CONFIG_MTD_SST25L=m
2447 +CONFIG_MTD_SLRAM=m
2448 +CONFIG_MTD_PHRAM=m
2449 +CONFIG_MTD_MTDRAM=m
2450 +CONFIG_MTDRAM_TOTAL_SIZE=4096
2451 +CONFIG_MTDRAM_ERASE_SIZE=128
2452 +CONFIG_MTD_BLOCK2MTD=m
2455 +# Disk-On-Chip Device Drivers
2457 +# CONFIG_MTD_DOCG3 is not set
2458 +# end of Self-contained MTD device drivers
2461 +# NAND
2463 +CONFIG_MTD_NAND_CORE=m
2464 +CONFIG_MTD_ONENAND=m
2465 +CONFIG_MTD_ONENAND_VERIFY_WRITE=y
2466 +CONFIG_MTD_ONENAND_GENERIC=m
2467 +# CONFIG_MTD_ONENAND_OTP is not set
2468 +CONFIG_MTD_ONENAND_2X_PROGRAM=y
2469 +CONFIG_MTD_RAW_NAND=m
2472 +# Raw/parallel NAND flash controllers
2474 +CONFIG_MTD_NAND_DENALI=m
2475 +CONFIG_MTD_NAND_DENALI_PCI=m
2476 +CONFIG_MTD_NAND_CAFE=m
2477 +CONFIG_MTD_NAND_MXIC=m
2478 +CONFIG_MTD_NAND_GPIO=m
2479 +CONFIG_MTD_NAND_PLATFORM=m
2480 +CONFIG_MTD_NAND_ARASAN=m
2483 +# Misc
2485 +CONFIG_MTD_SM_COMMON=m
2486 +CONFIG_MTD_NAND_NANDSIM=m
2487 +CONFIG_MTD_NAND_RICOH=m
2488 +CONFIG_MTD_NAND_DISKONCHIP=m
2489 +# CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED is not set
2490 +CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0
2491 +# CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE is not set
2492 +CONFIG_MTD_SPI_NAND=m
2495 +# ECC engine support
2497 +CONFIG_MTD_NAND_ECC=y
2498 +CONFIG_MTD_NAND_ECC_SW_HAMMING=y
2499 +# CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC is not set
2500 +CONFIG_MTD_NAND_ECC_SW_BCH=y
2501 +# end of ECC engine support
2502 +# end of NAND
2505 +# LPDDR & LPDDR2 PCM memory drivers
2507 +CONFIG_MTD_LPDDR=m
2508 +CONFIG_MTD_QINFO_PROBE=m
2509 +# end of LPDDR & LPDDR2 PCM memory drivers
2511 +CONFIG_MTD_SPI_NOR=m
2512 +CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y
2513 +# CONFIG_MTD_SPI_NOR_SWP_DISABLE is not set
2514 +CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE=y
2515 +# CONFIG_MTD_SPI_NOR_SWP_KEEP is not set
2516 +# CONFIG_SPI_INTEL_SPI_PCI is not set
2517 +# CONFIG_SPI_INTEL_SPI_PLATFORM is not set
2518 +CONFIG_MTD_UBI=m
2519 +CONFIG_MTD_UBI_WL_THRESHOLD=4096
2520 +CONFIG_MTD_UBI_BEB_LIMIT=20
2521 +CONFIG_MTD_UBI_FASTMAP=y
2522 +CONFIG_MTD_UBI_GLUEBI=m
2523 +CONFIG_MTD_UBI_BLOCK=y
2524 +CONFIG_MTD_HYPERBUS=m
2525 +# CONFIG_OF is not set
2526 +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y
2527 +CONFIG_PARPORT=m
2528 +CONFIG_PARPORT_PC=m
2529 +CONFIG_PARPORT_SERIAL=m
2530 +CONFIG_PARPORT_PC_FIFO=y
2531 +# CONFIG_PARPORT_PC_SUPERIO is not set
2532 +CONFIG_PARPORT_PC_PCMCIA=m
2533 +CONFIG_PARPORT_AX88796=m
2534 +CONFIG_PARPORT_1284=y
2535 +CONFIG_PARPORT_NOT_PC=y
2536 +CONFIG_PNP=y
2537 +# CONFIG_PNP_DEBUG_MESSAGES is not set
2540 +# Protocols
2542 +CONFIG_PNPACPI=y
2543 +CONFIG_BLK_DEV=y
2544 +CONFIG_BLK_DEV_NULL_BLK=m
2545 +CONFIG_BLK_DEV_FD=m
2546 +CONFIG_CDROM=y
2547 +CONFIG_PARIDE=m
2550 +# Parallel IDE high-level drivers
2552 +CONFIG_PARIDE_PD=m
2553 +CONFIG_PARIDE_PCD=m
2554 +CONFIG_PARIDE_PF=m
2555 +CONFIG_PARIDE_PT=m
2556 +CONFIG_PARIDE_PG=m
2559 +# Parallel IDE protocol modules
2561 +CONFIG_PARIDE_ATEN=m
2562 +CONFIG_PARIDE_BPCK=m
2563 +CONFIG_PARIDE_COMM=m
2564 +CONFIG_PARIDE_DSTR=m
2565 +CONFIG_PARIDE_FIT2=m
2566 +CONFIG_PARIDE_FIT3=m
2567 +CONFIG_PARIDE_EPAT=m
2568 +CONFIG_PARIDE_EPATC8=y
2569 +CONFIG_PARIDE_EPIA=m
2570 +CONFIG_PARIDE_FRIQ=m
2571 +CONFIG_PARIDE_FRPW=m
2572 +CONFIG_PARIDE_KBIC=m
2573 +CONFIG_PARIDE_KTTI=m
2574 +CONFIG_PARIDE_ON20=m
2575 +CONFIG_PARIDE_ON26=m
2576 +CONFIG_BLK_DEV_PCIESSD_MTIP32XX=m
2577 +CONFIG_ZRAM=m
2578 +CONFIG_ZRAM_DEF_COMP_LZORLE=y
2579 +# CONFIG_ZRAM_DEF_COMP_ZSTD is not set
2580 +# CONFIG_ZRAM_DEF_COMP_LZ4 is not set
2581 +# CONFIG_ZRAM_DEF_COMP_LZO is not set
2582 +# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set
2583 +# CONFIG_ZRAM_DEF_COMP_842 is not set
2584 +CONFIG_ZRAM_DEF_COMP="lzo-rle"
2585 +CONFIG_ZRAM_WRITEBACK=y
2586 +CONFIG_ZRAM_MEMORY_TRACKING=y
2587 +CONFIG_BLK_DEV_UMEM=m
2588 +CONFIG_BLK_DEV_LOOP=y
2589 +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
2590 +CONFIG_BLK_DEV_CRYPTOLOOP=m
2591 +CONFIG_BLK_DEV_DRBD=m
2592 +# CONFIG_DRBD_FAULT_INJECTION is not set
2593 +CONFIG_BLK_DEV_NBD=m
2594 +CONFIG_BLK_DEV_SX8=m
2595 +CONFIG_BLK_DEV_RAM=m
2596 +CONFIG_BLK_DEV_RAM_COUNT=16
2597 +CONFIG_BLK_DEV_RAM_SIZE=65536
2598 +CONFIG_CDROM_PKTCDVD=m
2599 +CONFIG_CDROM_PKTCDVD_BUFFERS=8
2600 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set
2601 +CONFIG_ATA_OVER_ETH=m
2602 +CONFIG_XEN_BLKDEV_FRONTEND=y
2603 +CONFIG_XEN_BLKDEV_BACKEND=m
2604 +CONFIG_VIRTIO_BLK=m
2605 +CONFIG_BLK_DEV_RBD=m
2606 +CONFIG_BLK_DEV_RSXX=m
2607 +CONFIG_BLK_DEV_RNBD=y
2608 +CONFIG_BLK_DEV_RNBD_CLIENT=m
2609 +CONFIG_BLK_DEV_RNBD_SERVER=m
2612 +# NVME Support
2614 +CONFIG_NVME_CORE=m
2615 +CONFIG_BLK_DEV_NVME=m
2616 +CONFIG_NVME_MULTIPATH=y
2617 +CONFIG_NVME_HWMON=y
2618 +CONFIG_NVME_FABRICS=m
2619 +CONFIG_NVME_RDMA=m
2620 +CONFIG_NVME_FC=m
2621 +CONFIG_NVME_TCP=m
2622 +CONFIG_NVME_TARGET=m
2623 +CONFIG_NVME_TARGET_PASSTHRU=y
2624 +CONFIG_NVME_TARGET_LOOP=m
2625 +CONFIG_NVME_TARGET_RDMA=m
2626 +CONFIG_NVME_TARGET_FC=m
2627 +# CONFIG_NVME_TARGET_FCLOOP is not set
2628 +CONFIG_NVME_TARGET_TCP=m
2629 +# end of NVME Support
2632 +# Misc devices
2634 +CONFIG_SENSORS_LIS3LV02D=m
2635 +CONFIG_AD525X_DPOT=m
2636 +CONFIG_AD525X_DPOT_I2C=m
2637 +CONFIG_AD525X_DPOT_SPI=m
2638 +CONFIG_DUMMY_IRQ=m
2639 +CONFIG_IBM_ASM=m
2640 +CONFIG_PHANTOM=m
2641 +CONFIG_TIFM_CORE=m
2642 +CONFIG_TIFM_7XX1=m
2643 +CONFIG_ICS932S401=m
2644 +CONFIG_ENCLOSURE_SERVICES=m
2645 +CONFIG_SGI_XP=m
2646 +CONFIG_HP_ILO=m
2647 +CONFIG_SGI_GRU=m
2648 +# CONFIG_SGI_GRU_DEBUG is not set
2649 +CONFIG_APDS9802ALS=m
2650 +CONFIG_ISL29003=m
2651 +CONFIG_ISL29020=m
2652 +CONFIG_SENSORS_TSL2550=m
2653 +CONFIG_SENSORS_BH1770=m
2654 +CONFIG_SENSORS_APDS990X=m
2655 +CONFIG_HMC6352=m
2656 +CONFIG_DS1682=m
2657 +CONFIG_VMWARE_BALLOON=m
2658 +CONFIG_LATTICE_ECP3_CONFIG=m
2659 +CONFIG_SRAM=y
2660 +# CONFIG_PCI_ENDPOINT_TEST is not set
2661 +CONFIG_XILINX_SDFEC=m
2662 +CONFIG_MISC_RTSX=m
2663 +CONFIG_PVPANIC=m
2664 +CONFIG_C2PORT=m
2665 +CONFIG_C2PORT_DURAMAR_2150=m
2668 +# EEPROM support
2670 +CONFIG_EEPROM_AT24=m
2671 +CONFIG_EEPROM_AT25=m
2672 +CONFIG_EEPROM_LEGACY=m
2673 +CONFIG_EEPROM_MAX6875=m
2674 +CONFIG_EEPROM_93CX6=m
2675 +CONFIG_EEPROM_93XX46=m
2676 +CONFIG_EEPROM_IDT_89HPESX=m
2677 +CONFIG_EEPROM_EE1004=m
2678 +# end of EEPROM support
2680 +CONFIG_CB710_CORE=m
2681 +# CONFIG_CB710_DEBUG is not set
2682 +CONFIG_CB710_DEBUG_ASSUMPTIONS=y
2685 +# Texas Instruments shared transport line discipline
2687 +CONFIG_TI_ST=m
2688 +# end of Texas Instruments shared transport line discipline
2690 +CONFIG_SENSORS_LIS3_I2C=m
2691 +CONFIG_ALTERA_STAPL=m
2692 +CONFIG_INTEL_MEI=m
2693 +CONFIG_INTEL_MEI_ME=m
2694 +CONFIG_INTEL_MEI_TXE=m
2695 +CONFIG_INTEL_MEI_HDCP=m
2696 +CONFIG_VMWARE_VMCI=m
2697 +CONFIG_GENWQE=m
2698 +CONFIG_GENWQE_PLATFORM_ERROR_RECOVERY=0
2699 +CONFIG_ECHO=m
2700 +CONFIG_BCM_VK=m
2701 +CONFIG_BCM_VK_TTY=y
2702 +CONFIG_MISC_ALCOR_PCI=m
2703 +CONFIG_MISC_RTSX_PCI=m
2704 +CONFIG_MISC_RTSX_USB=m
2705 +CONFIG_HABANA_AI=m
2706 +CONFIG_UACCE=m
2707 +# end of Misc devices
2709 +CONFIG_HAVE_IDE=y
2710 +# CONFIG_IDE is not set
2713 +# SCSI device support
2715 +CONFIG_SCSI_MOD=y
2716 +CONFIG_RAID_ATTRS=m
2717 +CONFIG_SCSI=y
2718 +CONFIG_SCSI_DMA=y
2719 +CONFIG_SCSI_NETLINK=y
2720 +CONFIG_SCSI_PROC_FS=y
2723 +# SCSI support type (disk, tape, CD-ROM)
2725 +CONFIG_BLK_DEV_SD=y
2726 +CONFIG_CHR_DEV_ST=m
2727 +CONFIG_BLK_DEV_SR=y
2728 +CONFIG_CHR_DEV_SG=y
2729 +CONFIG_CHR_DEV_SCH=m
2730 +CONFIG_SCSI_ENCLOSURE=m
2731 +CONFIG_SCSI_CONSTANTS=y
2732 +CONFIG_SCSI_LOGGING=y
2733 +CONFIG_SCSI_SCAN_ASYNC=y
2736 +# SCSI Transports
2738 +CONFIG_SCSI_SPI_ATTRS=m
2739 +CONFIG_SCSI_FC_ATTRS=m
2740 +CONFIG_SCSI_ISCSI_ATTRS=m
2741 +CONFIG_SCSI_SAS_ATTRS=m
2742 +CONFIG_SCSI_SAS_LIBSAS=m
2743 +CONFIG_SCSI_SAS_ATA=y
2744 +CONFIG_SCSI_SAS_HOST_SMP=y
2745 +CONFIG_SCSI_SRP_ATTRS=m
2746 +# end of SCSI Transports
2748 +CONFIG_SCSI_LOWLEVEL=y
2749 +CONFIG_ISCSI_TCP=m
2750 +CONFIG_ISCSI_BOOT_SYSFS=m
2751 +CONFIG_SCSI_CXGB3_ISCSI=m
2752 +CONFIG_SCSI_CXGB4_ISCSI=m
2753 +CONFIG_SCSI_BNX2_ISCSI=m
2754 +CONFIG_SCSI_BNX2X_FCOE=m
2755 +CONFIG_BE2ISCSI=m
2756 +CONFIG_BLK_DEV_3W_XXXX_RAID=m
2757 +CONFIG_SCSI_HPSA=m
2758 +CONFIG_SCSI_3W_9XXX=m
2759 +CONFIG_SCSI_3W_SAS=m
2760 +CONFIG_SCSI_ACARD=m
2761 +CONFIG_SCSI_AACRAID=m
2762 +CONFIG_SCSI_AIC7XXX=m
2763 +CONFIG_AIC7XXX_CMDS_PER_DEVICE=8
2764 +CONFIG_AIC7XXX_RESET_DELAY_MS=5000
2765 +# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
2766 +CONFIG_AIC7XXX_DEBUG_MASK=0
2767 +CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
2768 +CONFIG_SCSI_AIC79XX=m
2769 +CONFIG_AIC79XX_CMDS_PER_DEVICE=32
2770 +CONFIG_AIC79XX_RESET_DELAY_MS=5000
2771 +# CONFIG_AIC79XX_DEBUG_ENABLE is not set
2772 +CONFIG_AIC79XX_DEBUG_MASK=0
2773 +CONFIG_AIC79XX_REG_PRETTY_PRINT=y
2774 +CONFIG_SCSI_AIC94XX=m
2775 +# CONFIG_AIC94XX_DEBUG is not set
2776 +CONFIG_SCSI_MVSAS=m
2777 +# CONFIG_SCSI_MVSAS_DEBUG is not set
2778 +# CONFIG_SCSI_MVSAS_TASKLET is not set
2779 +CONFIG_SCSI_MVUMI=m
2780 +CONFIG_SCSI_DPT_I2O=m
2781 +CONFIG_SCSI_ADVANSYS=m
2782 +CONFIG_SCSI_ARCMSR=m
2783 +CONFIG_SCSI_ESAS2R=m
2784 +CONFIG_MEGARAID_NEWGEN=y
2785 +CONFIG_MEGARAID_MM=m
2786 +CONFIG_MEGARAID_MAILBOX=m
2787 +CONFIG_MEGARAID_LEGACY=m
2788 +CONFIG_MEGARAID_SAS=m
2789 +CONFIG_SCSI_MPT3SAS=m
2790 +CONFIG_SCSI_MPT2SAS_MAX_SGE=128
2791 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128
2792 +CONFIG_SCSI_MPT2SAS=m
2793 +CONFIG_SCSI_SMARTPQI=m
2794 +CONFIG_SCSI_UFSHCD=m
2795 +CONFIG_SCSI_UFSHCD_PCI=m
2796 +CONFIG_SCSI_UFS_DWC_TC_PCI=m
2797 +CONFIG_SCSI_UFSHCD_PLATFORM=m
2798 +CONFIG_SCSI_UFS_CDNS_PLATFORM=m
2799 +CONFIG_SCSI_UFS_DWC_TC_PLATFORM=m
2800 +CONFIG_SCSI_UFS_BSG=y
2801 +CONFIG_SCSI_UFS_CRYPTO=y
2802 +CONFIG_SCSI_HPTIOP=m
2803 +CONFIG_SCSI_BUSLOGIC=m
2804 +CONFIG_SCSI_FLASHPOINT=y
2805 +CONFIG_SCSI_MYRB=m
2806 +CONFIG_SCSI_MYRS=m
2807 +CONFIG_VMWARE_PVSCSI=m
2808 +CONFIG_XEN_SCSI_FRONTEND=m
2809 +CONFIG_HYPERV_STORAGE=m
2810 +CONFIG_LIBFC=m
2811 +CONFIG_LIBFCOE=m
2812 +CONFIG_FCOE=m
2813 +CONFIG_FCOE_FNIC=m
2814 +CONFIG_SCSI_SNIC=m
2815 +# CONFIG_SCSI_SNIC_DEBUG_FS is not set
2816 +CONFIG_SCSI_DMX3191D=m
2817 +CONFIG_SCSI_FDOMAIN=m
2818 +CONFIG_SCSI_FDOMAIN_PCI=m
2819 +CONFIG_SCSI_ISCI=m
2820 +CONFIG_SCSI_IPS=m
2821 +CONFIG_SCSI_INITIO=m
2822 +CONFIG_SCSI_INIA100=m
2823 +CONFIG_SCSI_PPA=m
2824 +CONFIG_SCSI_IMM=m
2825 +# CONFIG_SCSI_IZIP_EPP16 is not set
2826 +# CONFIG_SCSI_IZIP_SLOW_CTR is not set
2827 +CONFIG_SCSI_STEX=m
2828 +CONFIG_SCSI_SYM53C8XX_2=m
2829 +CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1
2830 +CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16
2831 +CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
2832 +CONFIG_SCSI_SYM53C8XX_MMIO=y
2833 +CONFIG_SCSI_IPR=m
2834 +CONFIG_SCSI_IPR_TRACE=y
2835 +CONFIG_SCSI_IPR_DUMP=y
2836 +CONFIG_SCSI_QLOGIC_1280=m
2837 +CONFIG_SCSI_QLA_FC=m
2838 +CONFIG_TCM_QLA2XXX=m
2839 +# CONFIG_TCM_QLA2XXX_DEBUG is not set
2840 +CONFIG_SCSI_QLA_ISCSI=m
2841 +CONFIG_QEDI=m
2842 +CONFIG_QEDF=m
2843 +CONFIG_SCSI_LPFC=m
2844 +# CONFIG_SCSI_LPFC_DEBUG_FS is not set
2845 +CONFIG_SCSI_DC395x=m
2846 +CONFIG_SCSI_AM53C974=m
2847 +CONFIG_SCSI_WD719X=m
2848 +CONFIG_SCSI_DEBUG=m
2849 +CONFIG_SCSI_PMCRAID=m
2850 +CONFIG_SCSI_PM8001=m
2851 +CONFIG_SCSI_BFA_FC=m
2852 +CONFIG_SCSI_VIRTIO=m
2853 +CONFIG_SCSI_CHELSIO_FCOE=m
2854 +CONFIG_SCSI_LOWLEVEL_PCMCIA=y
2855 +CONFIG_PCMCIA_AHA152X=m
2856 +CONFIG_PCMCIA_FDOMAIN=m
2857 +CONFIG_PCMCIA_QLOGIC=m
2858 +CONFIG_PCMCIA_SYM53C500=m
2859 +CONFIG_SCSI_DH=y
2860 +CONFIG_SCSI_DH_RDAC=m
2861 +CONFIG_SCSI_DH_HP_SW=m
2862 +CONFIG_SCSI_DH_EMC=m
2863 +CONFIG_SCSI_DH_ALUA=m
2864 +# end of SCSI device support
2866 +CONFIG_ATA=y
2867 +CONFIG_SATA_HOST=y
2868 +CONFIG_PATA_TIMINGS=y
2869 +CONFIG_ATA_VERBOSE_ERROR=y
2870 +CONFIG_ATA_FORCE=y
2871 +CONFIG_ATA_ACPI=y
2872 +CONFIG_SATA_ZPODD=y
2873 +CONFIG_SATA_PMP=y
2876 +# Controllers with non-SFF native interface
2878 +CONFIG_SATA_AHCI=m
2879 +CONFIG_SATA_MOBILE_LPM_POLICY=3
2880 +CONFIG_SATA_AHCI_PLATFORM=m
2881 +CONFIG_SATA_INIC162X=m
2882 +CONFIG_SATA_ACARD_AHCI=m
2883 +CONFIG_SATA_SIL24=m
2884 +CONFIG_ATA_SFF=y
2887 +# SFF controllers with custom DMA interface
2889 +CONFIG_PDC_ADMA=m
2890 +CONFIG_SATA_QSTOR=m
2891 +CONFIG_SATA_SX4=m
2892 +CONFIG_ATA_BMDMA=y
2895 +# SATA SFF controllers with BMDMA
2897 +CONFIG_ATA_PIIX=y
2898 +CONFIG_SATA_DWC=m
2899 +CONFIG_SATA_DWC_OLD_DMA=y
2900 +# CONFIG_SATA_DWC_DEBUG is not set
2901 +CONFIG_SATA_MV=m
2902 +CONFIG_SATA_NV=m
2903 +CONFIG_SATA_PROMISE=m
2904 +CONFIG_SATA_SIL=m
2905 +CONFIG_SATA_SIS=m
2906 +CONFIG_SATA_SVW=m
2907 +CONFIG_SATA_ULI=m
2908 +CONFIG_SATA_VIA=m
2909 +CONFIG_SATA_VITESSE=m
2912 +# PATA SFF controllers with BMDMA
2914 +CONFIG_PATA_ALI=m
2915 +CONFIG_PATA_AMD=m
2916 +CONFIG_PATA_ARTOP=m
2917 +CONFIG_PATA_ATIIXP=m
2918 +CONFIG_PATA_ATP867X=m
2919 +CONFIG_PATA_CMD64X=m
2920 +CONFIG_PATA_CYPRESS=m
2921 +CONFIG_PATA_EFAR=m
2922 +CONFIG_PATA_HPT366=m
2923 +CONFIG_PATA_HPT37X=m
2924 +CONFIG_PATA_HPT3X2N=m
2925 +CONFIG_PATA_HPT3X3=m
2926 +# CONFIG_PATA_HPT3X3_DMA is not set
2927 +CONFIG_PATA_IT8213=m
2928 +CONFIG_PATA_IT821X=m
2929 +CONFIG_PATA_JMICRON=m
2930 +CONFIG_PATA_MARVELL=m
2931 +CONFIG_PATA_NETCELL=m
2932 +CONFIG_PATA_NINJA32=m
2933 +CONFIG_PATA_NS87415=m
2934 +CONFIG_PATA_OLDPIIX=m
2935 +CONFIG_PATA_OPTIDMA=m
2936 +CONFIG_PATA_PDC2027X=m
2937 +CONFIG_PATA_PDC_OLD=m
2938 +CONFIG_PATA_RADISYS=m
2939 +CONFIG_PATA_RDC=m
2940 +CONFIG_PATA_SCH=m
2941 +CONFIG_PATA_SERVERWORKS=m
2942 +CONFIG_PATA_SIL680=m
2943 +CONFIG_PATA_SIS=y
2944 +CONFIG_PATA_TOSHIBA=m
2945 +CONFIG_PATA_TRIFLEX=m
2946 +CONFIG_PATA_VIA=m
2947 +CONFIG_PATA_WINBOND=m
2950 +# PIO-only SFF controllers
2952 +CONFIG_PATA_CMD640_PCI=m
2953 +CONFIG_PATA_MPIIX=m
2954 +CONFIG_PATA_NS87410=m
2955 +CONFIG_PATA_OPTI=m
2956 +CONFIG_PATA_PCMCIA=m
2957 +CONFIG_PATA_PLATFORM=m
2958 +CONFIG_PATA_RZ1000=m
2961 +# Generic fallback / legacy drivers
2963 +CONFIG_PATA_ACPI=m
2964 +CONFIG_ATA_GENERIC=y
2965 +CONFIG_PATA_LEGACY=m
2966 +CONFIG_MD=y
2967 +CONFIG_BLK_DEV_MD=y
2968 +CONFIG_MD_AUTODETECT=y
2969 +CONFIG_MD_LINEAR=m
2970 +CONFIG_MD_RAID0=m
2971 +CONFIG_MD_RAID1=m
2972 +CONFIG_MD_RAID10=m
2973 +CONFIG_MD_RAID456=m
2974 +CONFIG_MD_MULTIPATH=m
2975 +CONFIG_MD_FAULTY=m
2976 +CONFIG_MD_CLUSTER=m
2977 +CONFIG_BCACHE=m
2978 +# CONFIG_BCACHE_DEBUG is not set
2979 +# CONFIG_BCACHE_CLOSURES_DEBUG is not set
2980 +CONFIG_BCACHE_ASYNC_REGISTRATION=y
2981 +CONFIG_BLK_DEV_DM_BUILTIN=y
2982 +CONFIG_BLK_DEV_DM=y
2983 +# CONFIG_DM_DEBUG is not set
2984 +CONFIG_DM_BUFIO=m
2985 +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set
2986 +CONFIG_DM_BIO_PRISON=m
2987 +CONFIG_DM_PERSISTENT_DATA=m
2988 +CONFIG_DM_UNSTRIPED=m
2989 +CONFIG_DM_CRYPT=m
2990 +CONFIG_DM_SNAPSHOT=m
2991 +CONFIG_DM_THIN_PROVISIONING=m
2992 +CONFIG_DM_CACHE=m
2993 +CONFIG_DM_CACHE_SMQ=m
2994 +CONFIG_DM_WRITECACHE=m
2995 +CONFIG_DM_EBS=m
2996 +CONFIG_DM_ERA=m
2997 +CONFIG_DM_CLONE=m
2998 +CONFIG_DM_MIRROR=m
2999 +CONFIG_DM_LOG_USERSPACE=m
3000 +CONFIG_DM_RAID=m
3001 +CONFIG_DM_ZERO=m
3002 +CONFIG_DM_MULTIPATH=m
3003 +CONFIG_DM_MULTIPATH_QL=m
3004 +CONFIG_DM_MULTIPATH_ST=m
3005 +CONFIG_DM_MULTIPATH_HST=m
3006 +CONFIG_DM_MULTIPATH_IOA=m
3007 +CONFIG_DM_DELAY=m
3008 +# CONFIG_DM_DUST is not set
3009 +CONFIG_DM_INIT=y
3010 +CONFIG_DM_UEVENT=y
3011 +CONFIG_DM_FLAKEY=m
3012 +CONFIG_DM_VERITY=m
3013 +CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y
3014 +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG_SECONDARY_KEYRING is not set
3015 +# CONFIG_DM_VERITY_FEC is not set
3016 +CONFIG_DM_SWITCH=m
3017 +CONFIG_DM_LOG_WRITES=m
3018 +CONFIG_DM_INTEGRITY=m
3019 +CONFIG_DM_ZONED=m
3020 +CONFIG_TARGET_CORE=m
3021 +CONFIG_TCM_IBLOCK=m
3022 +CONFIG_TCM_FILEIO=m
3023 +CONFIG_TCM_PSCSI=m
3024 +CONFIG_TCM_USER2=m
3025 +CONFIG_LOOPBACK_TARGET=m
3026 +CONFIG_TCM_FC=m
3027 +CONFIG_ISCSI_TARGET=m
3028 +CONFIG_ISCSI_TARGET_CXGB4=m
3029 +CONFIG_SBP_TARGET=m
3030 +CONFIG_FUSION=y
3031 +CONFIG_FUSION_SPI=m
3032 +CONFIG_FUSION_FC=m
3033 +CONFIG_FUSION_SAS=m
3034 +CONFIG_FUSION_MAX_SGE=128
3035 +CONFIG_FUSION_CTL=m
3036 +CONFIG_FUSION_LAN=m
3037 +CONFIG_FUSION_LOGGING=y
3040 +# IEEE 1394 (FireWire) support
3042 +CONFIG_FIREWIRE=m
3043 +CONFIG_FIREWIRE_OHCI=m
3044 +CONFIG_FIREWIRE_SBP2=m
3045 +CONFIG_FIREWIRE_NET=m
3046 +CONFIG_FIREWIRE_NOSY=m
3047 +# end of IEEE 1394 (FireWire) support
3049 +CONFIG_MACINTOSH_DRIVERS=y
3050 +CONFIG_MAC_EMUMOUSEBTN=m
3051 +CONFIG_NETDEVICES=y
3052 +CONFIG_MII=m
3053 +CONFIG_NET_CORE=y
3054 +CONFIG_BONDING=m
3055 +CONFIG_DUMMY=m
3056 +CONFIG_WIREGUARD=m
3057 +# CONFIG_WIREGUARD_DEBUG is not set
3058 +CONFIG_EQUALIZER=m
3059 +CONFIG_NET_FC=y
3060 +CONFIG_IFB=m
3061 +CONFIG_NET_TEAM=m
3062 +CONFIG_NET_TEAM_MODE_BROADCAST=m
3063 +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m
3064 +CONFIG_NET_TEAM_MODE_RANDOM=m
3065 +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m
3066 +CONFIG_NET_TEAM_MODE_LOADBALANCE=m
3067 +CONFIG_MACVLAN=m
3068 +CONFIG_MACVTAP=m
3069 +CONFIG_IPVLAN_L3S=y
3070 +CONFIG_IPVLAN=m
3071 +CONFIG_IPVTAP=m
3072 +CONFIG_VXLAN=m
3073 +CONFIG_GENEVE=m
3074 +CONFIG_BAREUDP=m
3075 +CONFIG_GTP=m
3076 +CONFIG_MACSEC=m
3077 +CONFIG_NETCONSOLE=m
3078 +CONFIG_NETCONSOLE_DYNAMIC=y
3079 +CONFIG_NETPOLL=y
3080 +CONFIG_NET_POLL_CONTROLLER=y
3081 +CONFIG_NTB_NETDEV=m
3082 +CONFIG_RIONET=m
3083 +CONFIG_RIONET_TX_SIZE=128
3084 +CONFIG_RIONET_RX_SIZE=128
3085 +CONFIG_TUN=y
3086 +CONFIG_TAP=m
3087 +# CONFIG_TUN_VNET_CROSS_LE is not set
3088 +CONFIG_VETH=m
3089 +CONFIG_VIRTIO_NET=m
3090 +CONFIG_NLMON=m
3091 +CONFIG_NET_VRF=m
3092 +CONFIG_VSOCKMON=m
3093 +CONFIG_MHI_NET=m
3094 +CONFIG_SUNGEM_PHY=m
3095 +CONFIG_ARCNET=m
3096 +CONFIG_ARCNET_1201=m
3097 +CONFIG_ARCNET_1051=m
3098 +CONFIG_ARCNET_RAW=m
3099 +CONFIG_ARCNET_CAP=m
3100 +CONFIG_ARCNET_COM90xx=m
3101 +CONFIG_ARCNET_COM90xxIO=m
3102 +CONFIG_ARCNET_RIM_I=m
3103 +CONFIG_ARCNET_COM20020=m
3104 +CONFIG_ARCNET_COM20020_PCI=m
3105 +CONFIG_ARCNET_COM20020_CS=m
3106 +CONFIG_ATM_DRIVERS=y
3107 +CONFIG_ATM_DUMMY=m
3108 +CONFIG_ATM_TCP=m
3109 +CONFIG_ATM_LANAI=m
3110 +CONFIG_ATM_ENI=m
3111 +# CONFIG_ATM_ENI_DEBUG is not set
3112 +# CONFIG_ATM_ENI_TUNE_BURST is not set
3113 +CONFIG_ATM_FIRESTREAM=m
3114 +CONFIG_ATM_ZATM=m
3115 +# CONFIG_ATM_ZATM_DEBUG is not set
3116 +CONFIG_ATM_NICSTAR=m
3117 +# CONFIG_ATM_NICSTAR_USE_SUNI is not set
3118 +# CONFIG_ATM_NICSTAR_USE_IDT77105 is not set
3119 +CONFIG_ATM_IDT77252=m
3120 +# CONFIG_ATM_IDT77252_DEBUG is not set
3121 +# CONFIG_ATM_IDT77252_RCV_ALL is not set
3122 +CONFIG_ATM_IDT77252_USE_SUNI=y
3123 +CONFIG_ATM_AMBASSADOR=m
3124 +# CONFIG_ATM_AMBASSADOR_DEBUG is not set
3125 +CONFIG_ATM_HORIZON=m
3126 +# CONFIG_ATM_HORIZON_DEBUG is not set
3127 +CONFIG_ATM_IA=m
3128 +# CONFIG_ATM_IA_DEBUG is not set
3129 +CONFIG_ATM_FORE200E=m
3130 +# CONFIG_ATM_FORE200E_USE_TASKLET is not set
3131 +CONFIG_ATM_FORE200E_TX_RETRY=16
3132 +CONFIG_ATM_FORE200E_DEBUG=0
3133 +CONFIG_ATM_HE=m
3134 +CONFIG_ATM_HE_USE_SUNI=y
3135 +CONFIG_ATM_SOLOS=m
3136 +CONFIG_CAIF_DRIVERS=y
3137 +CONFIG_CAIF_TTY=m
3138 +CONFIG_CAIF_HSI=m
3139 +CONFIG_CAIF_VIRTIO=m
3142 +# Distributed Switch Architecture drivers
3144 +CONFIG_B53=m
3145 +CONFIG_B53_SPI_DRIVER=m
3146 +CONFIG_B53_MDIO_DRIVER=m
3147 +CONFIG_B53_MMAP_DRIVER=m
3148 +CONFIG_B53_SRAB_DRIVER=m
3149 +CONFIG_B53_SERDES=m
3150 +CONFIG_NET_DSA_BCM_SF2=m
3151 +# CONFIG_NET_DSA_LOOP is not set
3152 +CONFIG_NET_DSA_HIRSCHMANN_HELLCREEK=m
3153 +CONFIG_NET_DSA_LANTIQ_GSWIP=m
3154 +CONFIG_NET_DSA_MT7530=m
3155 +CONFIG_NET_DSA_MV88E6060=m
3156 +CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON=m
3157 +CONFIG_NET_DSA_MICROCHIP_KSZ9477=m
3158 +CONFIG_NET_DSA_MICROCHIP_KSZ9477_I2C=m
3159 +CONFIG_NET_DSA_MICROCHIP_KSZ9477_SPI=m
3160 +CONFIG_NET_DSA_MICROCHIP_KSZ8795=m
3161 +CONFIG_NET_DSA_MICROCHIP_KSZ8795_SPI=m
3162 +CONFIG_NET_DSA_MV88E6XXX=m
3163 +CONFIG_NET_DSA_MV88E6XXX_PTP=y
3164 +CONFIG_NET_DSA_MSCC_SEVILLE=m
3165 +CONFIG_NET_DSA_AR9331=m
3166 +CONFIG_NET_DSA_SJA1105=m
3167 +CONFIG_NET_DSA_SJA1105_PTP=y
3168 +CONFIG_NET_DSA_SJA1105_TAS=y
3169 +CONFIG_NET_DSA_SJA1105_VL=y
3170 +CONFIG_NET_DSA_XRS700X=m
3171 +CONFIG_NET_DSA_XRS700X_I2C=m
3172 +CONFIG_NET_DSA_XRS700X_MDIO=m
3173 +CONFIG_NET_DSA_QCA8K=m
3174 +CONFIG_NET_DSA_REALTEK_SMI=m
3175 +CONFIG_NET_DSA_SMSC_LAN9303=m
3176 +CONFIG_NET_DSA_SMSC_LAN9303_I2C=m
3177 +CONFIG_NET_DSA_SMSC_LAN9303_MDIO=m
3178 +CONFIG_NET_DSA_VITESSE_VSC73XX=m
3179 +CONFIG_NET_DSA_VITESSE_VSC73XX_SPI=m
3180 +CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM=m
3181 +# end of Distributed Switch Architecture drivers
3183 +CONFIG_ETHERNET=y
3184 +CONFIG_MDIO=m
3185 +CONFIG_NET_VENDOR_3COM=y
3186 +CONFIG_PCMCIA_3C574=m
3187 +CONFIG_PCMCIA_3C589=m
3188 +CONFIG_VORTEX=m
3189 +CONFIG_TYPHOON=m
3190 +CONFIG_NET_VENDOR_ADAPTEC=y
3191 +CONFIG_ADAPTEC_STARFIRE=m
3192 +CONFIG_NET_VENDOR_AGERE=y
3193 +CONFIG_ET131X=m
3194 +CONFIG_NET_VENDOR_ALACRITECH=y
3195 +CONFIG_SLICOSS=m
3196 +CONFIG_NET_VENDOR_ALTEON=y
3197 +CONFIG_ACENIC=m
3198 +# CONFIG_ACENIC_OMIT_TIGON_I is not set
3199 +CONFIG_ALTERA_TSE=m
3200 +CONFIG_NET_VENDOR_AMAZON=y
3201 +CONFIG_ENA_ETHERNET=m
3202 +CONFIG_NET_VENDOR_AMD=y
3203 +CONFIG_AMD8111_ETH=m
3204 +CONFIG_PCNET32=m
3205 +CONFIG_PCMCIA_NMCLAN=m
3206 +CONFIG_AMD_XGBE=m
3207 +CONFIG_AMD_XGBE_DCB=y
3208 +CONFIG_AMD_XGBE_HAVE_ECC=y
3209 +CONFIG_NET_VENDOR_AQUANTIA=y
3210 +CONFIG_AQTION=m
3211 +CONFIG_NET_VENDOR_ARC=y
3212 +CONFIG_NET_VENDOR_ATHEROS=y
3213 +CONFIG_ATL2=m
3214 +CONFIG_ATL1=m
3215 +CONFIG_ATL1E=m
3216 +CONFIG_ATL1C=m
3217 +CONFIG_ALX=m
3218 +CONFIG_NET_VENDOR_BROADCOM=y
3219 +CONFIG_B44=m
3220 +CONFIG_B44_PCI_AUTOSELECT=y
3221 +CONFIG_B44_PCICORE_AUTOSELECT=y
3222 +CONFIG_B44_PCI=y
3223 +CONFIG_BCMGENET=m
3224 +CONFIG_BNX2=m
3225 +CONFIG_CNIC=m
3226 +CONFIG_TIGON3=m
3227 +CONFIG_TIGON3_HWMON=y
3228 +CONFIG_BNX2X=m
3229 +CONFIG_BNX2X_SRIOV=y
3230 +CONFIG_SYSTEMPORT=m
3231 +CONFIG_BNXT=m
3232 +CONFIG_BNXT_SRIOV=y
3233 +CONFIG_BNXT_FLOWER_OFFLOAD=y
3234 +CONFIG_BNXT_DCB=y
3235 +CONFIG_BNXT_HWMON=y
3236 +CONFIG_NET_VENDOR_BROCADE=y
3237 +CONFIG_BNA=m
3238 +CONFIG_NET_VENDOR_CADENCE=y
3239 +CONFIG_MACB=m
3240 +CONFIG_MACB_USE_HWSTAMP=y
3241 +CONFIG_MACB_PCI=m
3242 +CONFIG_NET_VENDOR_CAVIUM=y
3243 +CONFIG_THUNDER_NIC_PF=m
3244 +CONFIG_THUNDER_NIC_VF=m
3245 +CONFIG_THUNDER_NIC_BGX=m
3246 +CONFIG_THUNDER_NIC_RGX=m
3247 +CONFIG_CAVIUM_PTP=m
3248 +CONFIG_LIQUIDIO=m
3249 +CONFIG_LIQUIDIO_VF=m
3250 +CONFIG_NET_VENDOR_CHELSIO=y
3251 +CONFIG_CHELSIO_T1=m
3252 +CONFIG_CHELSIO_T1_1G=y
3253 +CONFIG_CHELSIO_T3=m
3254 +CONFIG_CHELSIO_T4=m
3255 +CONFIG_CHELSIO_T4_DCB=y
3256 +CONFIG_CHELSIO_T4_FCOE=y
3257 +CONFIG_CHELSIO_T4VF=m
3258 +CONFIG_CHELSIO_LIB=m
3259 +CONFIG_CHELSIO_INLINE_CRYPTO=y
3260 +CONFIG_CHELSIO_IPSEC_INLINE=m
3261 +CONFIG_CHELSIO_TLS_DEVICE=m
3262 +CONFIG_NET_VENDOR_CISCO=y
3263 +CONFIG_ENIC=m
3264 +CONFIG_NET_VENDOR_CORTINA=y
3265 +CONFIG_CX_ECAT=m
3266 +CONFIG_DNET=m
3267 +CONFIG_NET_VENDOR_DEC=y
3268 +CONFIG_NET_TULIP=y
3269 +CONFIG_DE2104X=m
3270 +CONFIG_DE2104X_DSL=0
3271 +CONFIG_TULIP=m
3272 +# CONFIG_TULIP_MWI is not set
3273 +# CONFIG_TULIP_MMIO is not set
3274 +# CONFIG_TULIP_NAPI is not set
3275 +CONFIG_DE4X5=m
3276 +CONFIG_WINBOND_840=m
3277 +CONFIG_DM9102=m
3278 +CONFIG_ULI526X=m
3279 +CONFIG_PCMCIA_XIRCOM=m
3280 +CONFIG_NET_VENDOR_DLINK=y
3281 +CONFIG_DL2K=m
3282 +CONFIG_SUNDANCE=m
3283 +# CONFIG_SUNDANCE_MMIO is not set
3284 +CONFIG_NET_VENDOR_EMULEX=y
3285 +CONFIG_BE2NET=m
3286 +CONFIG_BE2NET_HWMON=y
3287 +CONFIG_BE2NET_BE2=y
3288 +CONFIG_BE2NET_BE3=y
3289 +CONFIG_BE2NET_LANCER=y
3290 +CONFIG_BE2NET_SKYHAWK=y
3291 +CONFIG_NET_VENDOR_EZCHIP=y
3292 +CONFIG_NET_VENDOR_FUJITSU=y
3293 +CONFIG_PCMCIA_FMVJ18X=m
3294 +CONFIG_NET_VENDOR_GOOGLE=y
3295 +CONFIG_GVE=m
3296 +CONFIG_NET_VENDOR_HUAWEI=y
3297 +CONFIG_HINIC=m
3298 +CONFIG_NET_VENDOR_I825XX=y
3299 +CONFIG_NET_VENDOR_INTEL=y
3300 +CONFIG_E100=m
3301 +CONFIG_E1000=m
3302 +CONFIG_E1000E=m
3303 +CONFIG_E1000E_HWTS=y
3304 +CONFIG_IGB=m
3305 +CONFIG_IGB_HWMON=y
3306 +CONFIG_IGB_DCA=y
3307 +CONFIG_IGBVF=m
3308 +CONFIG_IXGB=m
3309 +CONFIG_IXGBE=m
3310 +CONFIG_IXGBE_HWMON=y
3311 +CONFIG_IXGBE_DCA=y
3312 +CONFIG_IXGBE_DCB=y
3313 +CONFIG_IXGBE_IPSEC=y
3314 +CONFIG_IXGBEVF=m
3315 +CONFIG_IXGBEVF_IPSEC=y
3316 +CONFIG_I40E=m
3317 +CONFIG_I40E_DCB=y
3318 +CONFIG_IAVF=m
3319 +CONFIG_I40EVF=m
3320 +CONFIG_ICE=m
3321 +CONFIG_FM10K=m
3322 +CONFIG_IGC=m
3323 +CONFIG_JME=m
3324 +CONFIG_NET_VENDOR_MARVELL=y
3325 +CONFIG_MVMDIO=m
3326 +CONFIG_SKGE=m
3327 +# CONFIG_SKGE_DEBUG is not set
3328 +CONFIG_SKGE_GENESIS=y
3329 +CONFIG_SKY2=m
3330 +# CONFIG_SKY2_DEBUG is not set
3331 +CONFIG_PRESTERA=m
3332 +CONFIG_PRESTERA_PCI=m
3333 +CONFIG_NET_VENDOR_MELLANOX=y
3334 +CONFIG_MLX4_EN=m
3335 +CONFIG_MLX4_EN_DCB=y
3336 +CONFIG_MLX4_CORE=m
3337 +CONFIG_MLX4_DEBUG=y
3338 +CONFIG_MLX4_CORE_GEN2=y
3339 +CONFIG_MLX5_CORE=m
3340 +CONFIG_MLX5_ACCEL=y
3341 +CONFIG_MLX5_FPGA=y
3342 +CONFIG_MLX5_CORE_EN=y
3343 +CONFIG_MLX5_EN_ARFS=y
3344 +CONFIG_MLX5_EN_RXNFC=y
3345 +CONFIG_MLX5_MPFS=y
3346 +CONFIG_MLX5_ESWITCH=y
3347 +CONFIG_MLX5_CLS_ACT=y
3348 +CONFIG_MLX5_TC_CT=y
3349 +CONFIG_MLX5_CORE_EN_DCB=y
3350 +CONFIG_MLX5_CORE_IPOIB=y
3351 +CONFIG_MLX5_FPGA_IPSEC=y
3352 +CONFIG_MLX5_IPSEC=y
3353 +CONFIG_MLX5_EN_IPSEC=y
3354 +CONFIG_MLX5_FPGA_TLS=y
3355 +CONFIG_MLX5_TLS=y
3356 +CONFIG_MLX5_EN_TLS=y
3357 +CONFIG_MLX5_SW_STEERING=y
3358 +CONFIG_MLX5_SF=y
3359 +CONFIG_MLX5_SF_MANAGER=y
3360 +CONFIG_MLXSW_CORE=m
3361 +CONFIG_MLXSW_CORE_HWMON=y
3362 +CONFIG_MLXSW_CORE_THERMAL=y
3363 +CONFIG_MLXSW_PCI=m
3364 +CONFIG_MLXSW_I2C=m
3365 +CONFIG_MLXSW_SWITCHIB=m
3366 +CONFIG_MLXSW_SWITCHX2=m
3367 +CONFIG_MLXSW_SPECTRUM=m
3368 +CONFIG_MLXSW_SPECTRUM_DCB=y
3369 +CONFIG_MLXSW_MINIMAL=m
3370 +CONFIG_MLXFW=m
3371 +CONFIG_NET_VENDOR_MICREL=y
3372 +CONFIG_KS8842=m
3373 +CONFIG_KS8851=m
3374 +CONFIG_KS8851_MLL=m
3375 +CONFIG_KSZ884X_PCI=m
3376 +CONFIG_NET_VENDOR_MICROCHIP=y
3377 +CONFIG_ENC28J60=m
3378 +# CONFIG_ENC28J60_WRITEVERIFY is not set
3379 +CONFIG_ENCX24J600=m
3380 +CONFIG_LAN743X=m
3381 +CONFIG_NET_VENDOR_MICROSEMI=y
3382 +CONFIG_MSCC_OCELOT_SWITCH_LIB=m
3383 +CONFIG_NET_VENDOR_MYRI=y
3384 +CONFIG_MYRI10GE=m
3385 +CONFIG_MYRI10GE_DCA=y
3386 +CONFIG_FEALNX=m
3387 +CONFIG_NET_VENDOR_NATSEMI=y
3388 +CONFIG_NATSEMI=m
3389 +CONFIG_NS83820=m
3390 +CONFIG_NET_VENDOR_NETERION=y
3391 +CONFIG_S2IO=m
3392 +CONFIG_VXGE=m
3393 +# CONFIG_VXGE_DEBUG_TRACE_ALL is not set
3394 +CONFIG_NET_VENDOR_NETRONOME=y
3395 +CONFIG_NFP=m
3396 +CONFIG_NFP_APP_FLOWER=y
3397 +CONFIG_NFP_APP_ABM_NIC=y
3398 +# CONFIG_NFP_DEBUG is not set
3399 +CONFIG_NET_VENDOR_NI=y
3400 +CONFIG_NI_XGE_MANAGEMENT_ENET=m
3401 +CONFIG_NET_VENDOR_8390=y
3402 +CONFIG_PCMCIA_AXNET=m
3403 +CONFIG_NE2K_PCI=m
3404 +CONFIG_PCMCIA_PCNET=m
3405 +CONFIG_NET_VENDOR_NVIDIA=y
3406 +CONFIG_FORCEDETH=m
3407 +CONFIG_NET_VENDOR_OKI=y
3408 +CONFIG_ETHOC=m
3409 +CONFIG_NET_VENDOR_PACKET_ENGINES=y
3410 +CONFIG_HAMACHI=m
3411 +CONFIG_YELLOWFIN=m
3412 +CONFIG_NET_VENDOR_PENSANDO=y
3413 +CONFIG_IONIC=m
3414 +CONFIG_NET_VENDOR_QLOGIC=y
3415 +CONFIG_QLA3XXX=m
3416 +CONFIG_QLCNIC=m
3417 +CONFIG_QLCNIC_SRIOV=y
3418 +CONFIG_QLCNIC_DCB=y
3419 +CONFIG_QLCNIC_HWMON=y
3420 +CONFIG_NETXEN_NIC=m
3421 +CONFIG_QED=m
3422 +CONFIG_QED_LL2=y
3423 +CONFIG_QED_SRIOV=y
3424 +CONFIG_QEDE=m
3425 +CONFIG_QED_RDMA=y
3426 +CONFIG_QED_ISCSI=y
3427 +CONFIG_QED_FCOE=y
3428 +CONFIG_QED_OOO=y
3429 +CONFIG_NET_VENDOR_QUALCOMM=y
3430 +CONFIG_QCOM_EMAC=m
3431 +CONFIG_RMNET=m
3432 +CONFIG_NET_VENDOR_RDC=y
3433 +CONFIG_R6040=m
3434 +CONFIG_NET_VENDOR_REALTEK=y
3435 +CONFIG_ATP=m
3436 +CONFIG_8139CP=m
3437 +CONFIG_8139TOO=m
3438 +CONFIG_8139TOO_PIO=y
3439 +# CONFIG_8139TOO_TUNE_TWISTER is not set
3440 +CONFIG_8139TOO_8129=y
3441 +# CONFIG_8139_OLD_RX_RESET is not set
3442 +CONFIG_R8169=m
3443 +CONFIG_NET_VENDOR_RENESAS=y
3444 +CONFIG_NET_VENDOR_ROCKER=y
3445 +CONFIG_ROCKER=m
3446 +CONFIG_NET_VENDOR_SAMSUNG=y
3447 +CONFIG_SXGBE_ETH=m
3448 +CONFIG_NET_VENDOR_SEEQ=y
3449 +CONFIG_NET_VENDOR_SOLARFLARE=y
3450 +CONFIG_SFC=m
3451 +CONFIG_SFC_MTD=y
3452 +CONFIG_SFC_MCDI_MON=y
3453 +CONFIG_SFC_SRIOV=y
3454 +CONFIG_SFC_MCDI_LOGGING=y
3455 +CONFIG_SFC_FALCON=m
3456 +CONFIG_SFC_FALCON_MTD=y
3457 +CONFIG_NET_VENDOR_SILAN=y
3458 +CONFIG_SC92031=m
3459 +CONFIG_NET_VENDOR_SIS=y
3460 +CONFIG_SIS900=m
3461 +CONFIG_SIS190=m
3462 +CONFIG_NET_VENDOR_SMSC=y
3463 +CONFIG_PCMCIA_SMC91C92=m
3464 +CONFIG_EPIC100=m
3465 +CONFIG_SMSC911X=m
3466 +CONFIG_SMSC9420=m
3467 +CONFIG_NET_VENDOR_SOCIONEXT=y
3468 +CONFIG_NET_VENDOR_STMICRO=y
3469 +CONFIG_STMMAC_ETH=m
3470 +# CONFIG_STMMAC_SELFTESTS is not set
3471 +CONFIG_STMMAC_PLATFORM=m
3472 +CONFIG_DWMAC_GENERIC=m
3473 +CONFIG_DWMAC_INTEL=m
3474 +CONFIG_STMMAC_PCI=m
3475 +CONFIG_NET_VENDOR_SUN=y
3476 +CONFIG_HAPPYMEAL=m
3477 +CONFIG_SUNGEM=m
3478 +CONFIG_CASSINI=m
3479 +CONFIG_NIU=m
3480 +CONFIG_NET_VENDOR_SYNOPSYS=y
3481 +CONFIG_DWC_XLGMAC=m
3482 +CONFIG_DWC_XLGMAC_PCI=m
3483 +CONFIG_NET_VENDOR_TEHUTI=y
3484 +CONFIG_TEHUTI=m
3485 +CONFIG_NET_VENDOR_TI=y
3486 +# CONFIG_TI_CPSW_PHY_SEL is not set
3487 +CONFIG_TLAN=m
3488 +CONFIG_NET_VENDOR_VIA=y
3489 +CONFIG_VIA_RHINE=m
3490 +CONFIG_VIA_RHINE_MMIO=y
3491 +CONFIG_VIA_VELOCITY=m
3492 +CONFIG_NET_VENDOR_WIZNET=y
3493 +CONFIG_WIZNET_W5100=m
3494 +CONFIG_WIZNET_W5300=m
3495 +# CONFIG_WIZNET_BUS_DIRECT is not set
3496 +# CONFIG_WIZNET_BUS_INDIRECT is not set
3497 +CONFIG_WIZNET_BUS_ANY=y
3498 +CONFIG_WIZNET_W5100_SPI=m
3499 +CONFIG_NET_VENDOR_XILINX=y
3500 +CONFIG_XILINX_EMACLITE=m
3501 +CONFIG_XILINX_AXI_EMAC=m
3502 +CONFIG_XILINX_LL_TEMAC=m
3503 +CONFIG_NET_VENDOR_XIRCOM=y
3504 +CONFIG_PCMCIA_XIRC2PS=m
3505 +CONFIG_FDDI=y
3506 +CONFIG_DEFXX=m
3507 +# CONFIG_DEFXX_MMIO is not set
3508 +CONFIG_SKFP=m
3509 +# CONFIG_HIPPI is not set
3510 +CONFIG_NET_SB1000=m
3511 +CONFIG_PHYLINK=m
3512 +CONFIG_PHYLIB=m
3513 +CONFIG_SWPHY=y
3514 +CONFIG_LED_TRIGGER_PHY=y
3515 +CONFIG_FIXED_PHY=m
3516 +CONFIG_SFP=m
3519 +# MII PHY device drivers
3521 +CONFIG_AMD_PHY=m
3522 +CONFIG_ADIN_PHY=m
3523 +CONFIG_AQUANTIA_PHY=m
3524 +CONFIG_AX88796B_PHY=m
3525 +CONFIG_BROADCOM_PHY=m
3526 +CONFIG_BCM54140_PHY=m
3527 +CONFIG_BCM7XXX_PHY=m
3528 +CONFIG_BCM84881_PHY=m
3529 +CONFIG_BCM87XX_PHY=m
3530 +CONFIG_BCM_NET_PHYLIB=m
3531 +CONFIG_CICADA_PHY=m
3532 +CONFIG_CORTINA_PHY=m
3533 +CONFIG_DAVICOM_PHY=m
3534 +CONFIG_ICPLUS_PHY=m
3535 +CONFIG_LXT_PHY=m
3536 +CONFIG_INTEL_XWAY_PHY=m
3537 +CONFIG_LSI_ET1011C_PHY=m
3538 +CONFIG_MARVELL_PHY=m
3539 +CONFIG_MARVELL_10G_PHY=m
3540 +CONFIG_MICREL_PHY=m
3541 +CONFIG_MICROCHIP_PHY=m
3542 +CONFIG_MICROCHIP_T1_PHY=m
3543 +CONFIG_MICROSEMI_PHY=m
3544 +CONFIG_NATIONAL_PHY=m
3545 +CONFIG_NXP_TJA11XX_PHY=m
3546 +CONFIG_AT803X_PHY=m
3547 +CONFIG_QSEMI_PHY=m
3548 +CONFIG_REALTEK_PHY=m
3549 +CONFIG_RENESAS_PHY=m
3550 +CONFIG_ROCKCHIP_PHY=m
3551 +CONFIG_SMSC_PHY=m
3552 +CONFIG_STE10XP=m
3553 +CONFIG_TERANETICS_PHY=m
3554 +CONFIG_DP83822_PHY=m
3555 +CONFIG_DP83TC811_PHY=m
3556 +CONFIG_DP83848_PHY=m
3557 +CONFIG_DP83867_PHY=m
3558 +CONFIG_DP83869_PHY=m
3559 +CONFIG_VITESSE_PHY=m
3560 +CONFIG_XILINX_GMII2RGMII=m
3561 +CONFIG_MICREL_KS8995MA=m
3562 +CONFIG_MDIO_DEVICE=m
3563 +CONFIG_MDIO_BUS=m
3564 +CONFIG_MDIO_DEVRES=m
3565 +CONFIG_MDIO_BITBANG=m
3566 +CONFIG_MDIO_BCM_UNIMAC=m
3567 +CONFIG_MDIO_CAVIUM=m
3568 +CONFIG_MDIO_GPIO=m
3569 +CONFIG_MDIO_I2C=m
3570 +CONFIG_MDIO_MVUSB=m
3571 +CONFIG_MDIO_MSCC_MIIM=m
3572 +CONFIG_MDIO_THUNDER=m
3575 +# MDIO Multiplexers
3579 +# PCS device drivers
3581 +CONFIG_PCS_XPCS=m
3582 +CONFIG_PCS_LYNX=m
3583 +# end of PCS device drivers
3585 +CONFIG_PLIP=m
3586 +CONFIG_PPP=y
3587 +CONFIG_PPP_BSDCOMP=m
3588 +CONFIG_PPP_DEFLATE=m
3589 +CONFIG_PPP_FILTER=y
3590 +CONFIG_PPP_MPPE=m
3591 +CONFIG_PPP_MULTILINK=y
3592 +CONFIG_PPPOATM=m
3593 +CONFIG_PPPOE=m
3594 +CONFIG_PPTP=m
3595 +CONFIG_PPPOL2TP=m
3596 +CONFIG_PPP_ASYNC=m
3597 +CONFIG_PPP_SYNC_TTY=m
3598 +CONFIG_SLIP=m
3599 +CONFIG_SLHC=y
3600 +CONFIG_SLIP_COMPRESSED=y
3601 +CONFIG_SLIP_SMART=y
3602 +CONFIG_SLIP_MODE_SLIP6=y
3603 +CONFIG_USB_NET_DRIVERS=m
3604 +CONFIG_USB_CATC=m
3605 +CONFIG_USB_KAWETH=m
3606 +CONFIG_USB_PEGASUS=m
3607 +CONFIG_USB_RTL8150=m
3608 +CONFIG_USB_RTL8152=m
3609 +CONFIG_USB_LAN78XX=m
3610 +CONFIG_USB_USBNET=m
3611 +CONFIG_USB_NET_AX8817X=m
3612 +CONFIG_USB_NET_AX88179_178A=m
3613 +CONFIG_USB_NET_CDCETHER=m
3614 +CONFIG_USB_NET_CDC_EEM=m
3615 +CONFIG_USB_NET_CDC_NCM=m
3616 +CONFIG_USB_NET_HUAWEI_CDC_NCM=m
3617 +CONFIG_USB_NET_CDC_MBIM=m
3618 +CONFIG_USB_NET_DM9601=m
3619 +CONFIG_USB_NET_SR9700=m
3620 +CONFIG_USB_NET_SR9800=m
3621 +CONFIG_USB_NET_SMSC75XX=m
3622 +CONFIG_USB_NET_SMSC95XX=m
3623 +CONFIG_USB_NET_GL620A=m
3624 +CONFIG_USB_NET_NET1080=m
3625 +CONFIG_USB_NET_PLUSB=m
3626 +CONFIG_USB_NET_MCS7830=m
3627 +CONFIG_USB_NET_RNDIS_HOST=m
3628 +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m
3629 +CONFIG_USB_NET_CDC_SUBSET=m
3630 +CONFIG_USB_ALI_M5632=y
3631 +CONFIG_USB_AN2720=y
3632 +CONFIG_USB_BELKIN=y
3633 +CONFIG_USB_ARMLINUX=y
3634 +CONFIG_USB_EPSON2888=y
3635 +CONFIG_USB_KC2190=y
3636 +CONFIG_USB_NET_ZAURUS=m
3637 +CONFIG_USB_NET_CX82310_ETH=m
3638 +CONFIG_USB_NET_KALMIA=m
3639 +CONFIG_USB_NET_QMI_WWAN=m
3640 +CONFIG_USB_HSO=m
3641 +CONFIG_USB_NET_INT51X1=m
3642 +CONFIG_USB_CDC_PHONET=m
3643 +CONFIG_USB_IPHETH=m
3644 +CONFIG_USB_SIERRA_NET=m
3645 +CONFIG_USB_VL600=m
3646 +CONFIG_USB_NET_CH9200=m
3647 +CONFIG_USB_NET_AQC111=m
3648 +CONFIG_USB_RTL8153_ECM=m
3649 +CONFIG_WLAN=y
3650 +CONFIG_WLAN_VENDOR_ADMTEK=y
3651 +CONFIG_ADM8211=m
3652 +CONFIG_ATH_COMMON=m
3653 +CONFIG_WLAN_VENDOR_ATH=y
3654 +# CONFIG_ATH_DEBUG is not set
3655 +CONFIG_ATH5K=m
3656 +# CONFIG_ATH5K_DEBUG is not set
3657 +CONFIG_ATH5K_PCI=y
3658 +CONFIG_ATH9K_HW=m
3659 +CONFIG_ATH9K_COMMON=m
3660 +CONFIG_ATH9K_COMMON_DEBUG=y
3661 +CONFIG_ATH9K_BTCOEX_SUPPORT=y
3662 +CONFIG_ATH9K=m
3663 +CONFIG_ATH9K_PCI=y
3664 +CONFIG_ATH9K_AHB=y
3665 +CONFIG_ATH9K_DEBUGFS=y
3666 +CONFIG_ATH9K_STATION_STATISTICS=y
3667 +# CONFIG_ATH9K_DYNACK is not set
3668 +CONFIG_ATH9K_WOW=y
3669 +CONFIG_ATH9K_RFKILL=y
3670 +CONFIG_ATH9K_CHANNEL_CONTEXT=y
3671 +CONFIG_ATH9K_PCOEM=y
3672 +CONFIG_ATH9K_PCI_NO_EEPROM=m
3673 +CONFIG_ATH9K_HTC=m
3674 +CONFIG_ATH9K_HTC_DEBUGFS=y
3675 +CONFIG_ATH9K_HWRNG=y
3676 +CONFIG_ATH9K_COMMON_SPECTRAL=y
3677 +CONFIG_CARL9170=m
3678 +CONFIG_CARL9170_LEDS=y
3679 +# CONFIG_CARL9170_DEBUGFS is not set
3680 +CONFIG_CARL9170_WPC=y
3681 +CONFIG_CARL9170_HWRNG=y
3682 +CONFIG_ATH6KL=m
3683 +CONFIG_ATH6KL_SDIO=m
3684 +CONFIG_ATH6KL_USB=m
3685 +# CONFIG_ATH6KL_DEBUG is not set
3686 +CONFIG_AR5523=m
3687 +CONFIG_WIL6210=m
3688 +CONFIG_WIL6210_ISR_COR=y
3689 +CONFIG_WIL6210_DEBUGFS=y
3690 +CONFIG_ATH10K=m
3691 +CONFIG_ATH10K_CE=y
3692 +CONFIG_ATH10K_PCI=m
3693 +CONFIG_ATH10K_SDIO=m
3694 +CONFIG_ATH10K_USB=m
3695 +# CONFIG_ATH10K_DEBUG is not set
3696 +CONFIG_ATH10K_DEBUGFS=y
3697 +CONFIG_ATH10K_SPECTRAL=y
3698 +CONFIG_WCN36XX=m
3699 +# CONFIG_WCN36XX_DEBUGFS is not set
3700 +CONFIG_ATH11K=m
3701 +CONFIG_ATH11K_AHB=m
3702 +CONFIG_ATH11K_PCI=m
3703 +# CONFIG_ATH11K_DEBUG is not set
3704 +CONFIG_ATH11K_DEBUGFS=y
3705 +CONFIG_ATH11K_SPECTRAL=y
3706 +CONFIG_WLAN_VENDOR_ATMEL=y
3707 +CONFIG_ATMEL=m
3708 +CONFIG_PCI_ATMEL=m
3709 +CONFIG_PCMCIA_ATMEL=m
3710 +CONFIG_AT76C50X_USB=m
3711 +CONFIG_WLAN_VENDOR_BROADCOM=y
3712 +CONFIG_B43=m
3713 +CONFIG_B43_BCMA=y
3714 +CONFIG_B43_SSB=y
3715 +CONFIG_B43_BUSES_BCMA_AND_SSB=y
3716 +# CONFIG_B43_BUSES_BCMA is not set
3717 +# CONFIG_B43_BUSES_SSB is not set
3718 +CONFIG_B43_PCI_AUTOSELECT=y
3719 +CONFIG_B43_PCICORE_AUTOSELECT=y
3720 +# CONFIG_B43_SDIO is not set
3721 +CONFIG_B43_BCMA_PIO=y
3722 +CONFIG_B43_PIO=y
3723 +CONFIG_B43_PHY_G=y
3724 +CONFIG_B43_PHY_N=y
3725 +CONFIG_B43_PHY_LP=y
3726 +CONFIG_B43_PHY_HT=y
3727 +CONFIG_B43_LEDS=y
3728 +CONFIG_B43_HWRNG=y
3729 +# CONFIG_B43_DEBUG is not set
3730 +CONFIG_B43LEGACY=m
3731 +CONFIG_B43LEGACY_PCI_AUTOSELECT=y
3732 +CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y
3733 +CONFIG_B43LEGACY_LEDS=y
3734 +CONFIG_B43LEGACY_HWRNG=y
3735 +# CONFIG_B43LEGACY_DEBUG is not set
3736 +CONFIG_B43LEGACY_DMA=y
3737 +CONFIG_B43LEGACY_PIO=y
3738 +CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
3739 +# CONFIG_B43LEGACY_DMA_MODE is not set
3740 +# CONFIG_B43LEGACY_PIO_MODE is not set
3741 +CONFIG_BRCMUTIL=m
3742 +CONFIG_BRCMSMAC=m
3743 +CONFIG_BRCMFMAC=m
3744 +CONFIG_BRCMFMAC_PROTO_BCDC=y
3745 +CONFIG_BRCMFMAC_PROTO_MSGBUF=y
3746 +CONFIG_BRCMFMAC_SDIO=y
3747 +CONFIG_BRCMFMAC_USB=y
3748 +CONFIG_BRCMFMAC_PCIE=y
3749 +CONFIG_BRCM_TRACING=y
3750 +# CONFIG_BRCMDBG is not set
3751 +CONFIG_WLAN_VENDOR_CISCO=y
3752 +CONFIG_AIRO=m
3753 +CONFIG_AIRO_CS=m
3754 +CONFIG_WLAN_VENDOR_INTEL=y
3755 +CONFIG_IPW2100=m
3756 +CONFIG_IPW2100_MONITOR=y
3757 +# CONFIG_IPW2100_DEBUG is not set
3758 +CONFIG_IPW2200=m
3759 +CONFIG_IPW2200_MONITOR=y
3760 +CONFIG_IPW2200_RADIOTAP=y
3761 +CONFIG_IPW2200_PROMISCUOUS=y
3762 +CONFIG_IPW2200_QOS=y
3763 +# CONFIG_IPW2200_DEBUG is not set
3764 +CONFIG_LIBIPW=m
3765 +# CONFIG_LIBIPW_DEBUG is not set
3766 +CONFIG_IWLEGACY=m
3767 +CONFIG_IWL4965=m
3768 +CONFIG_IWL3945=m
3771 +# iwl3945 / iwl4965 Debugging Options
3773 +# CONFIG_IWLEGACY_DEBUG is not set
3774 +CONFIG_IWLEGACY_DEBUGFS=y
3775 +# end of iwl3945 / iwl4965 Debugging Options
3777 +CONFIG_IWLWIFI=m
3778 +CONFIG_IWLWIFI_LEDS=y
3779 +CONFIG_IWLDVM=m
3780 +CONFIG_IWLMVM=m
3781 +CONFIG_IWLWIFI_OPMODE_MODULAR=y
3782 +# CONFIG_IWLWIFI_BCAST_FILTERING is not set
3785 +# Debugging Options
3787 +# CONFIG_IWLWIFI_DEBUG is not set
3788 +CONFIG_IWLWIFI_DEBUGFS=y
3789 +# end of Debugging Options
3791 +CONFIG_WLAN_VENDOR_INTERSIL=y
3792 +CONFIG_HOSTAP=m
3793 +CONFIG_HOSTAP_FIRMWARE=y
3794 +CONFIG_HOSTAP_FIRMWARE_NVRAM=y
3795 +CONFIG_HOSTAP_PLX=m
3796 +CONFIG_HOSTAP_PCI=m
3797 +CONFIG_HOSTAP_CS=m
3798 +CONFIG_HERMES=m
3799 +# CONFIG_HERMES_PRISM is not set
3800 +CONFIG_HERMES_CACHE_FW_ON_INIT=y
3801 +CONFIG_PLX_HERMES=m
3802 +CONFIG_TMD_HERMES=m
3803 +CONFIG_NORTEL_HERMES=m
3804 +CONFIG_PCMCIA_HERMES=m
3805 +CONFIG_PCMCIA_SPECTRUM=m
3806 +CONFIG_ORINOCO_USB=m
3807 +CONFIG_P54_COMMON=m
3808 +CONFIG_P54_USB=m
3809 +CONFIG_P54_PCI=m
3810 +CONFIG_P54_SPI=m
3811 +# CONFIG_P54_SPI_DEFAULT_EEPROM is not set
3812 +CONFIG_P54_LEDS=y
3813 +# CONFIG_PRISM54 is not set
3814 +CONFIG_WLAN_VENDOR_MARVELL=y
3815 +CONFIG_LIBERTAS=m
3816 +CONFIG_LIBERTAS_USB=m
3817 +CONFIG_LIBERTAS_CS=m
3818 +CONFIG_LIBERTAS_SDIO=m
3819 +CONFIG_LIBERTAS_SPI=m
3820 +# CONFIG_LIBERTAS_DEBUG is not set
3821 +CONFIG_LIBERTAS_MESH=y
3822 +CONFIG_LIBERTAS_THINFIRM=m
3823 +# CONFIG_LIBERTAS_THINFIRM_DEBUG is not set
3824 +CONFIG_LIBERTAS_THINFIRM_USB=m
3825 +CONFIG_MWIFIEX=m
3826 +CONFIG_MWIFIEX_SDIO=m
3827 +CONFIG_MWIFIEX_PCIE=m
3828 +CONFIG_MWIFIEX_USB=m
3829 +CONFIG_MWL8K=m
3830 +CONFIG_WLAN_VENDOR_MEDIATEK=y
3831 +CONFIG_MT7601U=m
3832 +CONFIG_MT76_CORE=m
3833 +CONFIG_MT76_LEDS=y
3834 +CONFIG_MT76_USB=m
3835 +CONFIG_MT76_SDIO=m
3836 +CONFIG_MT76x02_LIB=m
3837 +CONFIG_MT76x02_USB=m
3838 +CONFIG_MT76_CONNAC_LIB=m
3839 +CONFIG_MT76x0_COMMON=m
3840 +CONFIG_MT76x0U=m
3841 +CONFIG_MT76x0E=m
3842 +CONFIG_MT76x2_COMMON=m
3843 +CONFIG_MT76x2E=m
3844 +CONFIG_MT76x2U=m
3845 +CONFIG_MT7603E=m
3846 +CONFIG_MT7615_COMMON=m
3847 +CONFIG_MT7615E=m
3848 +CONFIG_MT7663_USB_SDIO_COMMON=m
3849 +CONFIG_MT7663U=m
3850 +CONFIG_MT7663S=m
3851 +CONFIG_MT7915E=m
3852 +CONFIG_MT7921E=m
3853 +CONFIG_WLAN_VENDOR_MICROCHIP=y
3854 +CONFIG_WILC1000=m
3855 +CONFIG_WILC1000_SDIO=m
3856 +CONFIG_WILC1000_SPI=m
3857 +CONFIG_WILC1000_HW_OOB_INTR=y
3858 +CONFIG_WLAN_VENDOR_RALINK=y
3859 +CONFIG_RT2X00=m
3860 +CONFIG_RT2400PCI=m
3861 +CONFIG_RT2500PCI=m
3862 +CONFIG_RT61PCI=m
3863 +CONFIG_RT2800PCI=m
3864 +CONFIG_RT2800PCI_RT33XX=y
3865 +CONFIG_RT2800PCI_RT35XX=y
3866 +CONFIG_RT2800PCI_RT53XX=y
3867 +CONFIG_RT2800PCI_RT3290=y
3868 +CONFIG_RT2500USB=m
3869 +CONFIG_RT73USB=m
3870 +CONFIG_RT2800USB=m
3871 +CONFIG_RT2800USB_RT33XX=y
3872 +CONFIG_RT2800USB_RT35XX=y
3873 +CONFIG_RT2800USB_RT3573=y
3874 +CONFIG_RT2800USB_RT53XX=y
3875 +CONFIG_RT2800USB_RT55XX=y
3876 +CONFIG_RT2800USB_UNKNOWN=y
3877 +CONFIG_RT2800_LIB=m
3878 +CONFIG_RT2800_LIB_MMIO=m
3879 +CONFIG_RT2X00_LIB_MMIO=m
3880 +CONFIG_RT2X00_LIB_PCI=m
3881 +CONFIG_RT2X00_LIB_USB=m
3882 +CONFIG_RT2X00_LIB=m
3883 +CONFIG_RT2X00_LIB_FIRMWARE=y
3884 +CONFIG_RT2X00_LIB_CRYPTO=y
3885 +CONFIG_RT2X00_LIB_LEDS=y
3886 +# CONFIG_RT2X00_LIB_DEBUGFS is not set
3887 +# CONFIG_RT2X00_DEBUG is not set
3888 +CONFIG_WLAN_VENDOR_REALTEK=y
3889 +CONFIG_RTL8180=m
3890 +CONFIG_RTL8187=m
3891 +CONFIG_RTL8187_LEDS=y
3892 +CONFIG_RTL_CARDS=m
3893 +CONFIG_RTL8192CE=m
3894 +CONFIG_RTL8192SE=m
3895 +CONFIG_RTL8192DE=m
3896 +CONFIG_RTL8723AE=m
3897 +CONFIG_RTL8723BE=m
3898 +CONFIG_RTL8188EE=m
3899 +CONFIG_RTL8192EE=m
3900 +CONFIG_RTL8821AE=m
3901 +CONFIG_RTL8192CU=m
3902 +CONFIG_RTLWIFI=m
3903 +CONFIG_RTLWIFI_PCI=m
3904 +CONFIG_RTLWIFI_USB=m
3905 +# CONFIG_RTLWIFI_DEBUG is not set
3906 +CONFIG_RTL8192C_COMMON=m
3907 +CONFIG_RTL8723_COMMON=m
3908 +CONFIG_RTLBTCOEXIST=m
3909 +CONFIG_RTL8XXXU=m
3910 +CONFIG_RTL8XXXU_UNTESTED=y
3911 +CONFIG_RTW88=m
3912 +CONFIG_RTW88_CORE=m
3913 +CONFIG_RTW88_PCI=m
3914 +CONFIG_RTW88_8822B=m
3915 +CONFIG_RTW88_8822C=m
3916 +CONFIG_RTW88_8723D=m
3917 +CONFIG_RTW88_8821C=m
3918 +CONFIG_RTW88_8822BE=m
3919 +CONFIG_RTW88_8822CE=m
3920 +CONFIG_RTW88_8723DE=m
3921 +CONFIG_RTW88_8821CE=m
3922 +CONFIG_RTW88_DEBUG=y
3923 +CONFIG_RTW88_DEBUGFS=y
3924 +CONFIG_WLAN_VENDOR_RSI=y
3925 +CONFIG_RSI_91X=m
3926 +# CONFIG_RSI_DEBUGFS is not set
3927 +CONFIG_RSI_SDIO=m
3928 +CONFIG_RSI_USB=m
3929 +CONFIG_RSI_COEX=y
3930 +CONFIG_WLAN_VENDOR_ST=y
3931 +CONFIG_CW1200=m
3932 +CONFIG_CW1200_WLAN_SDIO=m
3933 +CONFIG_CW1200_WLAN_SPI=m
3934 +CONFIG_WLAN_VENDOR_TI=y
3935 +CONFIG_WL1251=m
3936 +CONFIG_WL1251_SPI=m
3937 +CONFIG_WL1251_SDIO=m
3938 +CONFIG_WL12XX=m
3939 +CONFIG_WL18XX=m
3940 +CONFIG_WLCORE=m
3941 +CONFIG_WLCORE_SDIO=m
3942 +CONFIG_WILINK_PLATFORM_DATA=y
3943 +CONFIG_WLAN_VENDOR_ZYDAS=y
3944 +CONFIG_USB_ZD1201=m
3945 +CONFIG_ZD1211RW=m
3946 +# CONFIG_ZD1211RW_DEBUG is not set
3947 +CONFIG_WLAN_VENDOR_QUANTENNA=y
3948 +CONFIG_QTNFMAC=m
3949 +CONFIG_QTNFMAC_PCIE=m
3950 +CONFIG_PCMCIA_RAYCS=m
3951 +CONFIG_PCMCIA_WL3501=m
3952 +CONFIG_MAC80211_HWSIM=m
3953 +CONFIG_USB_NET_RNDIS_WLAN=m
3954 +CONFIG_VIRT_WIFI=m
3955 +CONFIG_WAN=y
3956 +CONFIG_LANMEDIA=m
3957 +CONFIG_HDLC=m
3958 +CONFIG_HDLC_RAW=m
3959 +CONFIG_HDLC_RAW_ETH=m
3960 +CONFIG_HDLC_CISCO=m
3961 +CONFIG_HDLC_FR=m
3962 +CONFIG_HDLC_PPP=m
3963 +CONFIG_HDLC_X25=m
3964 +CONFIG_PCI200SYN=m
3965 +CONFIG_WANXL=m
3966 +CONFIG_PC300TOO=m
3967 +CONFIG_FARSYNC=m
3968 +CONFIG_LAPBETHER=m
3969 +CONFIG_SBNI=m
3970 +# CONFIG_SBNI_MULTILINE is not set
3971 +CONFIG_IEEE802154_DRIVERS=m
3972 +CONFIG_IEEE802154_FAKELB=m
3973 +CONFIG_IEEE802154_AT86RF230=m
3974 +CONFIG_IEEE802154_AT86RF230_DEBUGFS=y
3975 +CONFIG_IEEE802154_MRF24J40=m
3976 +CONFIG_IEEE802154_CC2520=m
3977 +CONFIG_IEEE802154_ATUSB=m
3978 +CONFIG_IEEE802154_ADF7242=m
3979 +CONFIG_IEEE802154_CA8210=m
3980 +CONFIG_IEEE802154_CA8210_DEBUGFS=y
3981 +CONFIG_IEEE802154_MCR20A=m
3982 +CONFIG_IEEE802154_HWSIM=m
3983 +CONFIG_XEN_NETDEV_FRONTEND=y
3984 +CONFIG_XEN_NETDEV_BACKEND=m
3985 +CONFIG_VMXNET3=m
3986 +CONFIG_FUJITSU_ES=m
3987 +CONFIG_USB4_NET=m
3988 +CONFIG_HYPERV_NET=m
3989 +CONFIG_NETDEVSIM=m
3990 +CONFIG_NET_FAILOVER=m
3991 +CONFIG_ISDN=y
3992 +CONFIG_ISDN_CAPI=y
3993 +CONFIG_CAPI_TRACE=y
3994 +CONFIG_ISDN_CAPI_MIDDLEWARE=y
3995 +CONFIG_MISDN=m
3996 +CONFIG_MISDN_DSP=m
3997 +CONFIG_MISDN_L1OIP=m
4000 +# mISDN hardware drivers
4002 +CONFIG_MISDN_HFCPCI=m
4003 +CONFIG_MISDN_HFCMULTI=m
4004 +CONFIG_MISDN_HFCUSB=m
4005 +CONFIG_MISDN_AVMFRITZ=m
4006 +CONFIG_MISDN_SPEEDFAX=m
4007 +CONFIG_MISDN_INFINEON=m
4008 +CONFIG_MISDN_W6692=m
4009 +CONFIG_MISDN_NETJET=m
4010 +CONFIG_MISDN_HDLC=m
4011 +CONFIG_MISDN_IPAC=m
4012 +CONFIG_MISDN_ISAR=m
4013 +CONFIG_NVM=y
4014 +CONFIG_NVM_PBLK=m
4015 +# CONFIG_NVM_PBLK_DEBUG is not set
4018 +# Input device support
4020 +CONFIG_INPUT=y
4021 +CONFIG_INPUT_LEDS=m
4022 +CONFIG_INPUT_FF_MEMLESS=m
4023 +CONFIG_INPUT_SPARSEKMAP=m
4024 +CONFIG_INPUT_MATRIXKMAP=m
4027 +# Userland interfaces
4029 +CONFIG_INPUT_MOUSEDEV=y
4030 +CONFIG_INPUT_MOUSEDEV_PSAUX=y
4031 +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
4032 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
4033 +CONFIG_INPUT_JOYDEV=m
4034 +CONFIG_INPUT_EVDEV=y
4035 +# CONFIG_INPUT_EVBUG is not set
4038 +# Input Device Drivers
4040 +CONFIG_INPUT_KEYBOARD=y
4041 +CONFIG_KEYBOARD_ADC=m
4042 +CONFIG_KEYBOARD_ADP5520=m
4043 +CONFIG_KEYBOARD_ADP5588=m
4044 +CONFIG_KEYBOARD_ADP5589=m
4045 +CONFIG_KEYBOARD_APPLESPI=m
4046 +CONFIG_KEYBOARD_ATKBD=y
4047 +CONFIG_KEYBOARD_QT1050=m
4048 +CONFIG_KEYBOARD_QT1070=m
4049 +CONFIG_KEYBOARD_QT2160=m
4050 +CONFIG_KEYBOARD_DLINK_DIR685=m
4051 +CONFIG_KEYBOARD_LKKBD=m
4052 +CONFIG_KEYBOARD_GPIO=m
4053 +CONFIG_KEYBOARD_GPIO_POLLED=m
4054 +CONFIG_KEYBOARD_TCA6416=m
4055 +CONFIG_KEYBOARD_TCA8418=m
4056 +CONFIG_KEYBOARD_MATRIX=m
4057 +CONFIG_KEYBOARD_LM8323=m
4058 +CONFIG_KEYBOARD_LM8333=m
4059 +CONFIG_KEYBOARD_MAX7359=m
4060 +CONFIG_KEYBOARD_MCS=m
4061 +CONFIG_KEYBOARD_MPR121=m
4062 +CONFIG_KEYBOARD_NEWTON=m
4063 +CONFIG_KEYBOARD_OPENCORES=m
4064 +CONFIG_KEYBOARD_SAMSUNG=m
4065 +CONFIG_KEYBOARD_STOWAWAY=m
4066 +CONFIG_KEYBOARD_SUNKBD=m
4067 +CONFIG_KEYBOARD_IQS62X=m
4068 +CONFIG_KEYBOARD_TM2_TOUCHKEY=m
4069 +CONFIG_KEYBOARD_TWL4030=m
4070 +CONFIG_KEYBOARD_XTKBD=m
4071 +CONFIG_KEYBOARD_CROS_EC=m
4072 +CONFIG_KEYBOARD_MTK_PMIC=m
4073 +CONFIG_INPUT_MOUSE=y
4074 +CONFIG_MOUSE_PS2=m
4075 +CONFIG_MOUSE_PS2_ALPS=y
4076 +CONFIG_MOUSE_PS2_BYD=y
4077 +CONFIG_MOUSE_PS2_LOGIPS2PP=y
4078 +CONFIG_MOUSE_PS2_SYNAPTICS=y
4079 +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y
4080 +CONFIG_MOUSE_PS2_CYPRESS=y
4081 +CONFIG_MOUSE_PS2_LIFEBOOK=y
4082 +CONFIG_MOUSE_PS2_TRACKPOINT=y
4083 +CONFIG_MOUSE_PS2_ELANTECH=y
4084 +CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y
4085 +CONFIG_MOUSE_PS2_SENTELIC=y
4086 +CONFIG_MOUSE_PS2_TOUCHKIT=y
4087 +CONFIG_MOUSE_PS2_FOCALTECH=y
4088 +CONFIG_MOUSE_PS2_VMMOUSE=y
4089 +CONFIG_MOUSE_PS2_SMBUS=y
4090 +CONFIG_MOUSE_SERIAL=m
4091 +CONFIG_MOUSE_APPLETOUCH=m
4092 +CONFIG_MOUSE_BCM5974=m
4093 +CONFIG_MOUSE_CYAPA=m
4094 +CONFIG_MOUSE_ELAN_I2C=m
4095 +CONFIG_MOUSE_ELAN_I2C_I2C=y
4096 +CONFIG_MOUSE_ELAN_I2C_SMBUS=y
4097 +CONFIG_MOUSE_VSXXXAA=m
4098 +CONFIG_MOUSE_GPIO=m
4099 +CONFIG_MOUSE_SYNAPTICS_I2C=m
4100 +CONFIG_MOUSE_SYNAPTICS_USB=m
4101 +CONFIG_INPUT_JOYSTICK=y
4102 +CONFIG_JOYSTICK_ANALOG=m
4103 +CONFIG_JOYSTICK_A3D=m
4104 +CONFIG_JOYSTICK_ADC=m
4105 +CONFIG_JOYSTICK_ADI=m
4106 +CONFIG_JOYSTICK_COBRA=m
4107 +CONFIG_JOYSTICK_GF2K=m
4108 +CONFIG_JOYSTICK_GRIP=m
4109 +CONFIG_JOYSTICK_GRIP_MP=m
4110 +CONFIG_JOYSTICK_GUILLEMOT=m
4111 +CONFIG_JOYSTICK_INTERACT=m
4112 +CONFIG_JOYSTICK_SIDEWINDER=m
4113 +CONFIG_JOYSTICK_TMDC=m
4114 +CONFIG_JOYSTICK_IFORCE=m
4115 +CONFIG_JOYSTICK_IFORCE_USB=m
4116 +CONFIG_JOYSTICK_IFORCE_232=m
4117 +CONFIG_JOYSTICK_WARRIOR=m
4118 +CONFIG_JOYSTICK_MAGELLAN=m
4119 +CONFIG_JOYSTICK_SPACEORB=m
4120 +CONFIG_JOYSTICK_SPACEBALL=m
4121 +CONFIG_JOYSTICK_STINGER=m
4122 +CONFIG_JOYSTICK_TWIDJOY=m
4123 +CONFIG_JOYSTICK_ZHENHUA=m
4124 +CONFIG_JOYSTICK_DB9=m
4125 +CONFIG_JOYSTICK_GAMECON=m
4126 +CONFIG_JOYSTICK_TURBOGRAFX=m
4127 +CONFIG_JOYSTICK_AS5011=m
4128 +CONFIG_JOYSTICK_JOYDUMP=m
4129 +CONFIG_JOYSTICK_XPAD=m
4130 +CONFIG_JOYSTICK_XPAD_FF=y
4131 +CONFIG_JOYSTICK_XPAD_LEDS=y
4132 +CONFIG_JOYSTICK_WALKERA0701=m
4133 +CONFIG_JOYSTICK_PSXPAD_SPI=m
4134 +CONFIG_JOYSTICK_PSXPAD_SPI_FF=y
4135 +CONFIG_JOYSTICK_PXRC=m
4136 +CONFIG_JOYSTICK_FSIA6B=m
4137 +CONFIG_INPUT_TABLET=y
4138 +CONFIG_TABLET_USB_ACECAD=m
4139 +CONFIG_TABLET_USB_AIPTEK=m
4140 +CONFIG_TABLET_USB_HANWANG=m
4141 +CONFIG_TABLET_USB_KBTAB=m
4142 +CONFIG_TABLET_USB_PEGASUS=m
4143 +CONFIG_TABLET_SERIAL_WACOM4=m
4144 +CONFIG_INPUT_TOUCHSCREEN=y
4145 +CONFIG_TOUCHSCREEN_PROPERTIES=y
4146 +CONFIG_TOUCHSCREEN_88PM860X=m
4147 +CONFIG_TOUCHSCREEN_ADS7846=m
4148 +CONFIG_TOUCHSCREEN_AD7877=m
4149 +CONFIG_TOUCHSCREEN_AD7879=m
4150 +CONFIG_TOUCHSCREEN_AD7879_I2C=m
4151 +CONFIG_TOUCHSCREEN_AD7879_SPI=m
4152 +CONFIG_TOUCHSCREEN_ADC=m
4153 +CONFIG_TOUCHSCREEN_ATMEL_MXT=m
4154 +CONFIG_TOUCHSCREEN_ATMEL_MXT_T37=y
4155 +CONFIG_TOUCHSCREEN_AUO_PIXCIR=m
4156 +CONFIG_TOUCHSCREEN_BU21013=m
4157 +CONFIG_TOUCHSCREEN_BU21029=m
4158 +CONFIG_TOUCHSCREEN_CHIPONE_ICN8505=m
4159 +CONFIG_TOUCHSCREEN_CY8CTMA140=m
4160 +CONFIG_TOUCHSCREEN_CY8CTMG110=m
4161 +CONFIG_TOUCHSCREEN_CYTTSP_CORE=m
4162 +CONFIG_TOUCHSCREEN_CYTTSP_I2C=m
4163 +CONFIG_TOUCHSCREEN_CYTTSP_SPI=m
4164 +CONFIG_TOUCHSCREEN_CYTTSP4_CORE=m
4165 +CONFIG_TOUCHSCREEN_CYTTSP4_I2C=m
4166 +CONFIG_TOUCHSCREEN_CYTTSP4_SPI=m
4167 +CONFIG_TOUCHSCREEN_DA9034=m
4168 +CONFIG_TOUCHSCREEN_DA9052=m
4169 +CONFIG_TOUCHSCREEN_DYNAPRO=m
4170 +CONFIG_TOUCHSCREEN_HAMPSHIRE=m
4171 +CONFIG_TOUCHSCREEN_EETI=m
4172 +CONFIG_TOUCHSCREEN_EGALAX_SERIAL=m
4173 +CONFIG_TOUCHSCREEN_EXC3000=m
4174 +CONFIG_TOUCHSCREEN_FUJITSU=m
4175 +CONFIG_TOUCHSCREEN_GOODIX=m
4176 +CONFIG_TOUCHSCREEN_HIDEEP=m
4177 +CONFIG_TOUCHSCREEN_ILI210X=m
4178 +CONFIG_TOUCHSCREEN_S6SY761=m
4179 +CONFIG_TOUCHSCREEN_GUNZE=m
4180 +CONFIG_TOUCHSCREEN_EKTF2127=m
4181 +CONFIG_TOUCHSCREEN_ELAN=y
4182 +CONFIG_TOUCHSCREEN_ELO=m
4183 +CONFIG_TOUCHSCREEN_WACOM_W8001=m
4184 +CONFIG_TOUCHSCREEN_WACOM_I2C=m
4185 +CONFIG_TOUCHSCREEN_MAX11801=m
4186 +CONFIG_TOUCHSCREEN_MCS5000=m
4187 +CONFIG_TOUCHSCREEN_MMS114=m
4188 +CONFIG_TOUCHSCREEN_MELFAS_MIP4=m
4189 +CONFIG_TOUCHSCREEN_MTOUCH=m
4190 +CONFIG_TOUCHSCREEN_INEXIO=m
4191 +CONFIG_TOUCHSCREEN_MK712=m
4192 +CONFIG_TOUCHSCREEN_PENMOUNT=m
4193 +CONFIG_TOUCHSCREEN_EDT_FT5X06=m
4194 +CONFIG_TOUCHSCREEN_TOUCHRIGHT=m
4195 +CONFIG_TOUCHSCREEN_TOUCHWIN=m
4196 +CONFIG_TOUCHSCREEN_TI_AM335X_TSC=m
4197 +CONFIG_TOUCHSCREEN_UCB1400=m
4198 +CONFIG_TOUCHSCREEN_PIXCIR=m
4199 +CONFIG_TOUCHSCREEN_WDT87XX_I2C=m
4200 +CONFIG_TOUCHSCREEN_WM831X=m
4201 +CONFIG_TOUCHSCREEN_WM97XX=m
4202 +CONFIG_TOUCHSCREEN_WM9705=y
4203 +CONFIG_TOUCHSCREEN_WM9712=y
4204 +CONFIG_TOUCHSCREEN_WM9713=y
4205 +CONFIG_TOUCHSCREEN_USB_COMPOSITE=m
4206 +CONFIG_TOUCHSCREEN_MC13783=m
4207 +CONFIG_TOUCHSCREEN_USB_EGALAX=y
4208 +CONFIG_TOUCHSCREEN_USB_PANJIT=y
4209 +CONFIG_TOUCHSCREEN_USB_3M=y
4210 +CONFIG_TOUCHSCREEN_USB_ITM=y
4211 +CONFIG_TOUCHSCREEN_USB_ETURBO=y
4212 +CONFIG_TOUCHSCREEN_USB_GUNZE=y
4213 +CONFIG_TOUCHSCREEN_USB_DMC_TSC10=y
4214 +CONFIG_TOUCHSCREEN_USB_IRTOUCH=y
4215 +CONFIG_TOUCHSCREEN_USB_IDEALTEK=y
4216 +CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH=y
4217 +CONFIG_TOUCHSCREEN_USB_GOTOP=y
4218 +CONFIG_TOUCHSCREEN_USB_JASTEC=y
4219 +CONFIG_TOUCHSCREEN_USB_ELO=y
4220 +CONFIG_TOUCHSCREEN_USB_E2I=y
4221 +CONFIG_TOUCHSCREEN_USB_ZYTRONIC=y
4222 +CONFIG_TOUCHSCREEN_USB_ETT_TC45USB=y
4223 +CONFIG_TOUCHSCREEN_USB_NEXIO=y
4224 +CONFIG_TOUCHSCREEN_USB_EASYTOUCH=y
4225 +CONFIG_TOUCHSCREEN_TOUCHIT213=m
4226 +CONFIG_TOUCHSCREEN_TSC_SERIO=m
4227 +CONFIG_TOUCHSCREEN_TSC200X_CORE=m
4228 +CONFIG_TOUCHSCREEN_TSC2004=m
4229 +CONFIG_TOUCHSCREEN_TSC2005=m
4230 +CONFIG_TOUCHSCREEN_TSC2007=m
4231 +CONFIG_TOUCHSCREEN_TSC2007_IIO=y
4232 +CONFIG_TOUCHSCREEN_PCAP=m
4233 +CONFIG_TOUCHSCREEN_RM_TS=m
4234 +CONFIG_TOUCHSCREEN_SILEAD=m
4235 +CONFIG_TOUCHSCREEN_SIS_I2C=m
4236 +CONFIG_TOUCHSCREEN_ST1232=m
4237 +CONFIG_TOUCHSCREEN_STMFTS=m
4238 +CONFIG_TOUCHSCREEN_SUR40=m
4239 +CONFIG_TOUCHSCREEN_SURFACE3_SPI=m
4240 +CONFIG_TOUCHSCREEN_SX8654=m
4241 +CONFIG_TOUCHSCREEN_TPS6507X=m
4242 +CONFIG_TOUCHSCREEN_ZET6223=m
4243 +CONFIG_TOUCHSCREEN_ZFORCE=m
4244 +CONFIG_TOUCHSCREEN_ROHM_BU21023=m
4245 +CONFIG_TOUCHSCREEN_IQS5XX=m
4246 +CONFIG_TOUCHSCREEN_ZINITIX=m
4247 +CONFIG_INPUT_MISC=y
4248 +CONFIG_INPUT_88PM860X_ONKEY=m
4249 +CONFIG_INPUT_88PM80X_ONKEY=m
4250 +CONFIG_INPUT_AD714X=m
4251 +CONFIG_INPUT_AD714X_I2C=m
4252 +CONFIG_INPUT_AD714X_SPI=m
4253 +CONFIG_INPUT_ARIZONA_HAPTICS=m
4254 +CONFIG_INPUT_BMA150=m
4255 +CONFIG_INPUT_E3X0_BUTTON=m
4256 +CONFIG_INPUT_PCSPKR=m
4257 +CONFIG_INPUT_MAX77693_HAPTIC=m
4258 +CONFIG_INPUT_MAX8925_ONKEY=m
4259 +CONFIG_INPUT_MAX8997_HAPTIC=m
4260 +CONFIG_INPUT_MC13783_PWRBUTTON=m
4261 +CONFIG_INPUT_MMA8450=m
4262 +CONFIG_INPUT_APANEL=m
4263 +CONFIG_INPUT_GPIO_BEEPER=m
4264 +CONFIG_INPUT_GPIO_DECODER=m
4265 +CONFIG_INPUT_GPIO_VIBRA=m
4266 +CONFIG_INPUT_ATLAS_BTNS=m
4267 +CONFIG_INPUT_ATI_REMOTE2=m
4268 +CONFIG_INPUT_KEYSPAN_REMOTE=m
4269 +CONFIG_INPUT_KXTJ9=m
4270 +CONFIG_INPUT_POWERMATE=m
4271 +CONFIG_INPUT_YEALINK=m
4272 +CONFIG_INPUT_CM109=m
4273 +CONFIG_INPUT_REGULATOR_HAPTIC=m
4274 +CONFIG_INPUT_RETU_PWRBUTTON=m
4275 +CONFIG_INPUT_AXP20X_PEK=m
4276 +CONFIG_INPUT_TWL4030_PWRBUTTON=m
4277 +CONFIG_INPUT_TWL4030_VIBRA=m
4278 +CONFIG_INPUT_TWL6040_VIBRA=m
4279 +CONFIG_INPUT_UINPUT=y
4280 +CONFIG_INPUT_PALMAS_PWRBUTTON=m
4281 +CONFIG_INPUT_PCF50633_PMU=m
4282 +CONFIG_INPUT_PCF8574=m
4283 +CONFIG_INPUT_PWM_BEEPER=m
4284 +CONFIG_INPUT_PWM_VIBRA=m
4285 +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m
4286 +CONFIG_INPUT_DA7280_HAPTICS=m
4287 +CONFIG_INPUT_DA9052_ONKEY=m
4288 +CONFIG_INPUT_DA9055_ONKEY=m
4289 +CONFIG_INPUT_DA9063_ONKEY=m
4290 +CONFIG_INPUT_WM831X_ON=m
4291 +CONFIG_INPUT_PCAP=m
4292 +CONFIG_INPUT_ADXL34X=m
4293 +CONFIG_INPUT_ADXL34X_I2C=m
4294 +CONFIG_INPUT_ADXL34X_SPI=m
4295 +CONFIG_INPUT_IMS_PCU=m
4296 +CONFIG_INPUT_IQS269A=m
4297 +CONFIG_INPUT_CMA3000=m
4298 +CONFIG_INPUT_CMA3000_I2C=m
4299 +CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m
4300 +CONFIG_INPUT_IDEAPAD_SLIDEBAR=m
4301 +CONFIG_INPUT_SOC_BUTTON_ARRAY=m
4302 +CONFIG_INPUT_DRV260X_HAPTICS=m
4303 +CONFIG_INPUT_DRV2665_HAPTICS=m
4304 +CONFIG_INPUT_DRV2667_HAPTICS=m
4305 +CONFIG_INPUT_RAVE_SP_PWRBUTTON=m
4306 +CONFIG_RMI4_CORE=m
4307 +CONFIG_RMI4_I2C=m
4308 +CONFIG_RMI4_SPI=m
4309 +CONFIG_RMI4_SMB=m
4310 +CONFIG_RMI4_F03=y
4311 +CONFIG_RMI4_F03_SERIO=m
4312 +CONFIG_RMI4_2D_SENSOR=y
4313 +CONFIG_RMI4_F11=y
4314 +CONFIG_RMI4_F12=y
4315 +CONFIG_RMI4_F30=y
4316 +CONFIG_RMI4_F34=y
4317 +CONFIG_RMI4_F3A=y
4318 +CONFIG_RMI4_F54=y
4319 +CONFIG_RMI4_F55=y
4322 +# Hardware I/O ports
4324 +CONFIG_SERIO=y
4325 +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y
4326 +CONFIG_SERIO_I8042=y
4327 +CONFIG_SERIO_SERPORT=m
4328 +CONFIG_SERIO_CT82C710=m
4329 +CONFIG_SERIO_PARKBD=m
4330 +CONFIG_SERIO_PCIPS2=m
4331 +CONFIG_SERIO_LIBPS2=y
4332 +CONFIG_SERIO_RAW=m
4333 +CONFIG_SERIO_ALTERA_PS2=m
4334 +CONFIG_SERIO_PS2MULT=m
4335 +CONFIG_SERIO_ARC_PS2=m
4336 +CONFIG_HYPERV_KEYBOARD=m
4337 +CONFIG_SERIO_GPIO_PS2=m
4338 +CONFIG_USERIO=m
4339 +CONFIG_GAMEPORT=m
4340 +CONFIG_GAMEPORT_NS558=m
4341 +CONFIG_GAMEPORT_L4=m
4342 +CONFIG_GAMEPORT_EMU10K1=m
4343 +CONFIG_GAMEPORT_FM801=m
4344 +# end of Hardware I/O ports
4345 +# end of Input device support
4348 +# Character devices
4350 +CONFIG_TTY=y
4351 +CONFIG_VT=y
4352 +CONFIG_CONSOLE_TRANSLATIONS=y
4353 +CONFIG_VT_CONSOLE=y
4354 +CONFIG_VT_CONSOLE_SLEEP=y
4355 +CONFIG_HW_CONSOLE=y
4356 +CONFIG_VT_HW_CONSOLE_BINDING=y
4357 +CONFIG_UNIX98_PTYS=y
4358 +CONFIG_LEGACY_PTYS=y
4359 +CONFIG_LEGACY_PTY_COUNT=0
4360 +CONFIG_LDISC_AUTOLOAD=y
4363 +# Serial drivers
4365 +CONFIG_SERIAL_EARLYCON=y
4366 +CONFIG_SERIAL_8250=y
4367 +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
4368 +CONFIG_SERIAL_8250_PNP=y
4369 +CONFIG_SERIAL_8250_16550A_VARIANTS=y
4370 +CONFIG_SERIAL_8250_FINTEK=y
4371 +CONFIG_SERIAL_8250_CONSOLE=y
4372 +CONFIG_SERIAL_8250_DMA=y
4373 +CONFIG_SERIAL_8250_PCI=y
4374 +CONFIG_SERIAL_8250_EXAR=m
4375 +CONFIG_SERIAL_8250_CS=m
4376 +CONFIG_SERIAL_8250_MEN_MCB=m
4377 +CONFIG_SERIAL_8250_NR_UARTS=48
4378 +CONFIG_SERIAL_8250_RUNTIME_UARTS=32
4379 +CONFIG_SERIAL_8250_EXTENDED=y
4380 +CONFIG_SERIAL_8250_MANY_PORTS=y
4381 +CONFIG_SERIAL_8250_SHARE_IRQ=y
4382 +# CONFIG_SERIAL_8250_DETECT_IRQ is not set
4383 +CONFIG_SERIAL_8250_RSA=y
4384 +CONFIG_SERIAL_8250_DWLIB=y
4385 +CONFIG_SERIAL_8250_DW=m
4386 +CONFIG_SERIAL_8250_RT288X=y
4387 +CONFIG_SERIAL_8250_LPSS=m
4388 +CONFIG_SERIAL_8250_MID=m
4391 +# Non-8250 serial port support
4393 +CONFIG_SERIAL_KGDB_NMI=y
4394 +CONFIG_SERIAL_MAX3100=m
4395 +CONFIG_SERIAL_MAX310X=y
4396 +CONFIG_SERIAL_UARTLITE=m
4397 +CONFIG_SERIAL_UARTLITE_NR_UARTS=1
4398 +CONFIG_SERIAL_CORE=y
4399 +CONFIG_SERIAL_CORE_CONSOLE=y
4400 +CONFIG_CONSOLE_POLL=y
4401 +CONFIG_SERIAL_JSM=m
4402 +CONFIG_SERIAL_LANTIQ=m
4403 +CONFIG_SERIAL_SCCNXP=y
4404 +CONFIG_SERIAL_SCCNXP_CONSOLE=y
4405 +CONFIG_SERIAL_SC16IS7XX_CORE=m
4406 +CONFIG_SERIAL_SC16IS7XX=m
4407 +CONFIG_SERIAL_SC16IS7XX_I2C=y
4408 +CONFIG_SERIAL_SC16IS7XX_SPI=y
4409 +CONFIG_SERIAL_BCM63XX=m
4410 +CONFIG_SERIAL_ALTERA_JTAGUART=m
4411 +CONFIG_SERIAL_ALTERA_UART=m
4412 +CONFIG_SERIAL_ALTERA_UART_MAXPORTS=4
4413 +CONFIG_SERIAL_ALTERA_UART_BAUDRATE=115200
4414 +CONFIG_SERIAL_ARC=m
4415 +CONFIG_SERIAL_ARC_NR_PORTS=1
4416 +CONFIG_SERIAL_RP2=m
4417 +CONFIG_SERIAL_RP2_NR_UARTS=32
4418 +CONFIG_SERIAL_FSL_LPUART=m
4419 +CONFIG_SERIAL_FSL_LINFLEXUART=m
4420 +CONFIG_SERIAL_MEN_Z135=m
4421 +CONFIG_SERIAL_SPRD=m
4422 +# end of Serial drivers
4424 +CONFIG_SERIAL_MCTRL_GPIO=y
4425 +CONFIG_SERIAL_NONSTANDARD=y
4426 +CONFIG_ROCKETPORT=m
4427 +CONFIG_CYCLADES=m
4428 +# CONFIG_CYZ_INTR is not set
4429 +CONFIG_MOXA_INTELLIO=m
4430 +CONFIG_MOXA_SMARTIO=m
4431 +CONFIG_SYNCLINK_GT=m
4432 +CONFIG_ISI=m
4433 +CONFIG_N_HDLC=m
4434 +CONFIG_N_GSM=m
4435 +CONFIG_NOZOMI=m
4436 +CONFIG_NULL_TTY=m
4437 +CONFIG_TRACE_ROUTER=m
4438 +CONFIG_TRACE_SINK=m
4439 +CONFIG_HVC_DRIVER=y
4440 +CONFIG_HVC_IRQ=y
4441 +CONFIG_HVC_XEN=y
4442 +CONFIG_HVC_XEN_FRONTEND=y
4443 +CONFIG_SERIAL_DEV_BUS=y
4444 +CONFIG_SERIAL_DEV_CTRL_TTYPORT=y
4445 +CONFIG_TTY_PRINTK=y
4446 +CONFIG_TTY_PRINTK_LEVEL=6
4447 +CONFIG_PRINTER=m
4448 +# CONFIG_LP_CONSOLE is not set
4449 +CONFIG_PPDEV=m
4450 +CONFIG_VIRTIO_CONSOLE=y
4451 +CONFIG_IPMI_HANDLER=m
4452 +CONFIG_IPMI_DMI_DECODE=y
4453 +CONFIG_IPMI_PLAT_DATA=y
4454 +# CONFIG_IPMI_PANIC_EVENT is not set
4455 +CONFIG_IPMI_DEVICE_INTERFACE=m
4456 +CONFIG_IPMI_SI=m
4457 +CONFIG_IPMI_SSIF=m
4458 +CONFIG_IPMI_WATCHDOG=m
4459 +CONFIG_IPMI_POWEROFF=m
4460 +CONFIG_HW_RANDOM=y
4461 +CONFIG_HW_RANDOM_TIMERIOMEM=m
4462 +CONFIG_HW_RANDOM_INTEL=m
4463 +CONFIG_HW_RANDOM_AMD=m
4464 +CONFIG_HW_RANDOM_BA431=m
4465 +CONFIG_HW_RANDOM_VIA=m
4466 +CONFIG_HW_RANDOM_VIRTIO=m
4467 +CONFIG_HW_RANDOM_XIPHERA=m
4468 +CONFIG_APPLICOM=m
4471 +# PCMCIA character devices
4473 +CONFIG_SYNCLINK_CS=m
4474 +CONFIG_CARDMAN_4000=m
4475 +CONFIG_CARDMAN_4040=m
4476 +CONFIG_SCR24X=m
4477 +CONFIG_IPWIRELESS=m
4478 +# end of PCMCIA character devices
4480 +CONFIG_MWAVE=m
4481 +CONFIG_DEVMEM=y
4482 +# CONFIG_DEVKMEM is not set
4483 +CONFIG_NVRAM=m
4484 +CONFIG_RAW_DRIVER=m
4485 +CONFIG_MAX_RAW_DEVS=256
4486 +CONFIG_DEVPORT=y
4487 +CONFIG_HPET=y
4488 +CONFIG_HPET_MMAP=y
4489 +CONFIG_HPET_MMAP_DEFAULT=y
4490 +CONFIG_HANGCHECK_TIMER=m
4491 +CONFIG_UV_MMTIMER=m
4492 +CONFIG_TCG_TPM=y
4493 +CONFIG_HW_RANDOM_TPM=y
4494 +CONFIG_TCG_TIS_CORE=y
4495 +CONFIG_TCG_TIS=y
4496 +CONFIG_TCG_TIS_SPI=m
4497 +CONFIG_TCG_TIS_SPI_CR50=y
4498 +CONFIG_TCG_TIS_I2C_CR50=m
4499 +CONFIG_TCG_TIS_I2C_ATMEL=m
4500 +CONFIG_TCG_TIS_I2C_INFINEON=m
4501 +CONFIG_TCG_TIS_I2C_NUVOTON=m
4502 +CONFIG_TCG_NSC=m
4503 +CONFIG_TCG_ATMEL=m
4504 +CONFIG_TCG_INFINEON=m
4505 +CONFIG_TCG_XEN=m
4506 +CONFIG_TCG_CRB=y
4507 +CONFIG_TCG_VTPM_PROXY=m
4508 +CONFIG_TCG_TIS_ST33ZP24=m
4509 +CONFIG_TCG_TIS_ST33ZP24_I2C=m
4510 +CONFIG_TCG_TIS_ST33ZP24_SPI=m
4511 +CONFIG_TELCLOCK=m
4512 +CONFIG_XILLYBUS=m
4513 +CONFIG_XILLYBUS_PCIE=m
4514 +# end of Character devices
4516 +CONFIG_RANDOM_TRUST_CPU=y
4517 +CONFIG_RANDOM_TRUST_BOOTLOADER=y
4520 +# I2C support
4522 +CONFIG_I2C=y
4523 +CONFIG_ACPI_I2C_OPREGION=y
4524 +CONFIG_I2C_BOARDINFO=y
4525 +CONFIG_I2C_COMPAT=y
4526 +CONFIG_I2C_CHARDEV=y
4527 +CONFIG_I2C_MUX=m
4530 +# Multiplexer I2C Chip support
4532 +CONFIG_I2C_MUX_GPIO=m
4533 +CONFIG_I2C_MUX_LTC4306=m
4534 +CONFIG_I2C_MUX_PCA9541=m
4535 +CONFIG_I2C_MUX_PCA954x=m
4536 +CONFIG_I2C_MUX_REG=m
4537 +CONFIG_I2C_MUX_MLXCPLD=m
4538 +# end of Multiplexer I2C Chip support
4540 +CONFIG_I2C_HELPER_AUTO=y
4541 +CONFIG_I2C_SMBUS=m
4542 +CONFIG_I2C_ALGOBIT=m
4543 +CONFIG_I2C_ALGOPCA=m
4546 +# I2C Hardware Bus support
4550 +# PC SMBus host controller drivers
4552 +CONFIG_I2C_ALI1535=m
4553 +CONFIG_I2C_ALI1563=m
4554 +CONFIG_I2C_ALI15X3=m
4555 +CONFIG_I2C_AMD756=m
4556 +CONFIG_I2C_AMD756_S4882=m
4557 +CONFIG_I2C_AMD8111=m
4558 +CONFIG_I2C_AMD_MP2=m
4559 +CONFIG_I2C_I801=m
4560 +CONFIG_I2C_ISCH=m
4561 +CONFIG_I2C_ISMT=m
4562 +CONFIG_I2C_PIIX4=m
4563 +CONFIG_I2C_CHT_WC=m
4564 +CONFIG_I2C_NFORCE2=m
4565 +CONFIG_I2C_NFORCE2_S4985=m
4566 +CONFIG_I2C_NVIDIA_GPU=m
4567 +CONFIG_I2C_SIS5595=m
4568 +CONFIG_I2C_SIS630=m
4569 +CONFIG_I2C_SIS96X=m
4570 +CONFIG_I2C_VIA=m
4571 +CONFIG_I2C_VIAPRO=m
4574 +# ACPI drivers
4576 +CONFIG_I2C_SCMI=m
4579 +# I2C system bus drivers (mostly embedded / system-on-chip)
4581 +CONFIG_I2C_CBUS_GPIO=m
4582 +CONFIG_I2C_DESIGNWARE_CORE=y
4583 +# CONFIG_I2C_DESIGNWARE_SLAVE is not set
4584 +CONFIG_I2C_DESIGNWARE_PLATFORM=y
4585 +CONFIG_I2C_DESIGNWARE_BAYTRAIL=y
4586 +CONFIG_I2C_DESIGNWARE_PCI=m
4587 +# CONFIG_I2C_EMEV2 is not set
4588 +CONFIG_I2C_GPIO=m
4589 +# CONFIG_I2C_GPIO_FAULT_INJECTOR is not set
4590 +CONFIG_I2C_KEMPLD=m
4591 +CONFIG_I2C_OCORES=m
4592 +CONFIG_I2C_PCA_PLATFORM=m
4593 +CONFIG_I2C_SIMTEC=m
4594 +CONFIG_I2C_XILINX=m
4597 +# External I2C/SMBus adapter drivers
4599 +CONFIG_I2C_DIOLAN_U2C=m
4600 +CONFIG_I2C_DLN2=m
4601 +CONFIG_I2C_PARPORT=m
4602 +CONFIG_I2C_ROBOTFUZZ_OSIF=m
4603 +CONFIG_I2C_TAOS_EVM=m
4604 +CONFIG_I2C_TINY_USB=m
4605 +CONFIG_I2C_VIPERBOARD=m
4608 +# Other I2C/SMBus bus drivers
4610 +CONFIG_I2C_MLXCPLD=m
4611 +CONFIG_I2C_CROS_EC_TUNNEL=m
4612 +# end of I2C Hardware Bus support
4614 +CONFIG_I2C_STUB=m
4615 +# CONFIG_I2C_SLAVE is not set
4616 +# CONFIG_I2C_DEBUG_CORE is not set
4617 +# CONFIG_I2C_DEBUG_ALGO is not set
4618 +# CONFIG_I2C_DEBUG_BUS is not set
4619 +# end of I2C support
4621 +CONFIG_I3C=m
4622 +CONFIG_CDNS_I3C_MASTER=m
4623 +CONFIG_DW_I3C_MASTER=m
4624 +CONFIG_SVC_I3C_MASTER=m
4625 +CONFIG_MIPI_I3C_HCI=m
4626 +CONFIG_SPI=y
4627 +# CONFIG_SPI_DEBUG is not set
4628 +CONFIG_SPI_MASTER=y
4629 +CONFIG_SPI_MEM=y
4632 +# SPI Master Controller Drivers
4634 +CONFIG_SPI_ALTERA=m
4635 +CONFIG_SPI_AXI_SPI_ENGINE=m
4636 +CONFIG_SPI_BITBANG=m
4637 +CONFIG_SPI_BUTTERFLY=m
4638 +CONFIG_SPI_CADENCE=m
4639 +CONFIG_SPI_DESIGNWARE=m
4640 +CONFIG_SPI_DW_DMA=y
4641 +CONFIG_SPI_DW_PCI=m
4642 +CONFIG_SPI_DW_MMIO=m
4643 +CONFIG_SPI_DLN2=m
4644 +CONFIG_SPI_NXP_FLEXSPI=m
4645 +CONFIG_SPI_GPIO=m
4646 +CONFIG_SPI_LM70_LLP=m
4647 +CONFIG_SPI_LANTIQ_SSC=m
4648 +CONFIG_SPI_OC_TINY=m
4649 +CONFIG_SPI_PXA2XX=m
4650 +CONFIG_SPI_PXA2XX_PCI=m
4651 +# CONFIG_SPI_ROCKCHIP is not set
4652 +CONFIG_SPI_SC18IS602=m
4653 +CONFIG_SPI_SIFIVE=m
4654 +CONFIG_SPI_MXIC=m
4655 +CONFIG_SPI_XCOMM=m
4656 +# CONFIG_SPI_XILINX is not set
4657 +CONFIG_SPI_ZYNQMP_GQSPI=m
4658 +CONFIG_SPI_AMD=m
4661 +# SPI Multiplexer support
4663 +CONFIG_SPI_MUX=m
4666 +# SPI Protocol Masters
4668 +CONFIG_SPI_SPIDEV=m
4669 +CONFIG_SPI_LOOPBACK_TEST=m
4670 +CONFIG_SPI_TLE62X0=m
4671 +CONFIG_SPI_SLAVE=y
4672 +CONFIG_SPI_SLAVE_TIME=m
4673 +CONFIG_SPI_SLAVE_SYSTEM_CONTROL=m
4674 +CONFIG_SPI_DYNAMIC=y
4675 +CONFIG_SPMI=m
4676 +CONFIG_HSI=m
4677 +CONFIG_HSI_BOARDINFO=y
4680 +# HSI controllers
4684 +# HSI clients
4686 +CONFIG_HSI_CHAR=m
4687 +CONFIG_PPS=y
4688 +# CONFIG_PPS_DEBUG is not set
4691 +# PPS clients support
4693 +# CONFIG_PPS_CLIENT_KTIMER is not set
4694 +CONFIG_PPS_CLIENT_LDISC=m
4695 +CONFIG_PPS_CLIENT_PARPORT=m
4696 +CONFIG_PPS_CLIENT_GPIO=m
4699 +# PPS generators support
4703 +# PTP clock support
4705 +CONFIG_PTP_1588_CLOCK=y
4706 +CONFIG_DP83640_PHY=m
4707 +CONFIG_PTP_1588_CLOCK_INES=m
4708 +CONFIG_PTP_1588_CLOCK_KVM=m
4709 +CONFIG_PTP_1588_CLOCK_IDT82P33=m
4710 +CONFIG_PTP_1588_CLOCK_IDTCM=m
4711 +CONFIG_PTP_1588_CLOCK_VMW=m
4712 +CONFIG_PTP_1588_CLOCK_OCP=m
4713 +# end of PTP clock support
4715 +CONFIG_PINCTRL=y
4716 +CONFIG_PINMUX=y
4717 +CONFIG_PINCONF=y
4718 +CONFIG_GENERIC_PINCONF=y
4719 +# CONFIG_DEBUG_PINCTRL is not set
4720 +CONFIG_PINCTRL_AMD=y
4721 +CONFIG_PINCTRL_DA9062=m
4722 +CONFIG_PINCTRL_MCP23S08_I2C=m
4723 +CONFIG_PINCTRL_MCP23S08_SPI=m
4724 +CONFIG_PINCTRL_MCP23S08=m
4725 +CONFIG_PINCTRL_SX150X=y
4726 +CONFIG_PINCTRL_BAYTRAIL=y
4727 +CONFIG_PINCTRL_CHERRYVIEW=y
4728 +CONFIG_PINCTRL_LYNXPOINT=m
4729 +CONFIG_PINCTRL_INTEL=y
4730 +CONFIG_PINCTRL_ALDERLAKE=m
4731 +CONFIG_PINCTRL_BROXTON=m
4732 +CONFIG_PINCTRL_CANNONLAKE=m
4733 +CONFIG_PINCTRL_CEDARFORK=m
4734 +CONFIG_PINCTRL_DENVERTON=m
4735 +CONFIG_PINCTRL_ELKHARTLAKE=m
4736 +CONFIG_PINCTRL_EMMITSBURG=m
4737 +CONFIG_PINCTRL_GEMINILAKE=m
4738 +CONFIG_PINCTRL_ICELAKE=m
4739 +CONFIG_PINCTRL_JASPERLAKE=m
4740 +CONFIG_PINCTRL_LAKEFIELD=m
4741 +CONFIG_PINCTRL_LEWISBURG=m
4742 +CONFIG_PINCTRL_SUNRISEPOINT=m
4743 +CONFIG_PINCTRL_TIGERLAKE=m
4746 +# Renesas pinctrl drivers
4748 +# end of Renesas pinctrl drivers
4750 +CONFIG_PINCTRL_MADERA=m
4751 +CONFIG_PINCTRL_CS47L15=y
4752 +CONFIG_PINCTRL_CS47L35=y
4753 +CONFIG_PINCTRL_CS47L85=y
4754 +CONFIG_PINCTRL_CS47L90=y
4755 +CONFIG_PINCTRL_CS47L92=y
4756 +CONFIG_GPIOLIB=y
4757 +CONFIG_GPIOLIB_FASTPATH_LIMIT=512
4758 +CONFIG_GPIO_ACPI=y
4759 +CONFIG_GPIOLIB_IRQCHIP=y
4760 +# CONFIG_DEBUG_GPIO is not set
4761 +CONFIG_GPIO_SYSFS=y
4762 +CONFIG_GPIO_CDEV=y
4763 +# CONFIG_GPIO_CDEV_V1 is not set
4764 +CONFIG_GPIO_GENERIC=m
4765 +CONFIG_GPIO_MAX730X=m
4768 +# Memory mapped GPIO drivers
4770 +CONFIG_GPIO_AMDPT=m
4771 +CONFIG_GPIO_DWAPB=m
4772 +CONFIG_GPIO_EXAR=m
4773 +CONFIG_GPIO_GENERIC_PLATFORM=m
4774 +CONFIG_GPIO_ICH=m
4775 +CONFIG_GPIO_MB86S7X=m
4776 +CONFIG_GPIO_MENZ127=m
4777 +CONFIG_GPIO_SIOX=m
4778 +CONFIG_GPIO_VX855=m
4779 +CONFIG_GPIO_AMD_FCH=m
4780 +# end of Memory mapped GPIO drivers
4783 +# Port-mapped I/O GPIO drivers
4785 +CONFIG_GPIO_104_DIO_48E=m
4786 +CONFIG_GPIO_104_IDIO_16=m
4787 +CONFIG_GPIO_104_IDI_48=m
4788 +CONFIG_GPIO_F7188X=m
4789 +CONFIG_GPIO_GPIO_MM=m
4790 +CONFIG_GPIO_IT87=m
4791 +CONFIG_GPIO_SCH=m
4792 +CONFIG_GPIO_SCH311X=m
4793 +CONFIG_GPIO_WINBOND=m
4794 +CONFIG_GPIO_WS16C48=m
4795 +# end of Port-mapped I/O GPIO drivers
4798 +# I2C GPIO expanders
4800 +CONFIG_GPIO_ADP5588=m
4801 +CONFIG_GPIO_MAX7300=m
4802 +CONFIG_GPIO_MAX732X=m
4803 +CONFIG_GPIO_PCA953X=m
4804 +CONFIG_GPIO_PCA953X_IRQ=y
4805 +CONFIG_GPIO_PCA9570=m
4806 +CONFIG_GPIO_PCF857X=m
4807 +CONFIG_GPIO_TPIC2810=m
4808 +# end of I2C GPIO expanders
4811 +# MFD GPIO expanders
4813 +CONFIG_GPIO_ADP5520=m
4814 +CONFIG_GPIO_ARIZONA=m
4815 +CONFIG_GPIO_BD9571MWV=m
4816 +CONFIG_GPIO_CRYSTAL_COVE=y
4817 +CONFIG_GPIO_DA9052=m
4818 +CONFIG_GPIO_DA9055=m
4819 +CONFIG_GPIO_DLN2=m
4820 +CONFIG_GPIO_JANZ_TTL=m
4821 +CONFIG_GPIO_KEMPLD=m
4822 +CONFIG_GPIO_LP3943=m
4823 +CONFIG_GPIO_LP873X=m
4824 +CONFIG_GPIO_MADERA=m
4825 +CONFIG_GPIO_PALMAS=y
4826 +CONFIG_GPIO_RC5T583=y
4827 +CONFIG_GPIO_TPS65086=m
4828 +CONFIG_GPIO_TPS6586X=y
4829 +CONFIG_GPIO_TPS65910=y
4830 +CONFIG_GPIO_TPS65912=m
4831 +CONFIG_GPIO_TPS68470=y
4832 +CONFIG_GPIO_TQMX86=m
4833 +CONFIG_GPIO_TWL4030=m
4834 +CONFIG_GPIO_TWL6040=m
4835 +CONFIG_GPIO_UCB1400=m
4836 +CONFIG_GPIO_WHISKEY_COVE=m
4837 +CONFIG_GPIO_WM831X=m
4838 +CONFIG_GPIO_WM8350=m
4839 +CONFIG_GPIO_WM8994=m
4840 +# end of MFD GPIO expanders
4843 +# PCI GPIO expanders
4845 +CONFIG_GPIO_AMD8111=m
4846 +CONFIG_GPIO_ML_IOH=m
4847 +CONFIG_GPIO_PCI_IDIO_16=m
4848 +CONFIG_GPIO_PCIE_IDIO_24=m
4849 +CONFIG_GPIO_RDC321X=m
4850 +# end of PCI GPIO expanders
4853 +# SPI GPIO expanders
4855 +CONFIG_GPIO_MAX3191X=m
4856 +CONFIG_GPIO_MAX7301=m
4857 +CONFIG_GPIO_MC33880=m
4858 +CONFIG_GPIO_PISOSR=m
4859 +CONFIG_GPIO_XRA1403=m
4860 +# end of SPI GPIO expanders
4863 +# USB GPIO expanders
4865 +CONFIG_GPIO_VIPERBOARD=m
4866 +# end of USB GPIO expanders
4869 +# Virtual GPIO drivers
4871 +CONFIG_GPIO_AGGREGATOR=m
4872 +# CONFIG_GPIO_MOCKUP is not set
4873 +# end of Virtual GPIO drivers
4875 +CONFIG_W1=m
4876 +CONFIG_W1_CON=y
4879 +# 1-wire Bus Masters
4881 +CONFIG_W1_MASTER_MATROX=m
4882 +CONFIG_W1_MASTER_DS2490=m
4883 +CONFIG_W1_MASTER_DS2482=m
4884 +CONFIG_W1_MASTER_DS1WM=m
4885 +CONFIG_W1_MASTER_GPIO=m
4886 +CONFIG_W1_MASTER_SGI=m
4887 +# end of 1-wire Bus Masters
4890 +# 1-wire Slaves
4892 +CONFIG_W1_SLAVE_THERM=m
4893 +CONFIG_W1_SLAVE_SMEM=m
4894 +CONFIG_W1_SLAVE_DS2405=m
4895 +CONFIG_W1_SLAVE_DS2408=m
4896 +CONFIG_W1_SLAVE_DS2408_READBACK=y
4897 +CONFIG_W1_SLAVE_DS2413=m
4898 +CONFIG_W1_SLAVE_DS2406=m
4899 +CONFIG_W1_SLAVE_DS2423=m
4900 +CONFIG_W1_SLAVE_DS2805=m
4901 +CONFIG_W1_SLAVE_DS2430=m
4902 +CONFIG_W1_SLAVE_DS2431=m
4903 +CONFIG_W1_SLAVE_DS2433=m
4904 +# CONFIG_W1_SLAVE_DS2433_CRC is not set
4905 +CONFIG_W1_SLAVE_DS2438=m
4906 +CONFIG_W1_SLAVE_DS250X=m
4907 +CONFIG_W1_SLAVE_DS2780=m
4908 +CONFIG_W1_SLAVE_DS2781=m
4909 +CONFIG_W1_SLAVE_DS28E04=m
4910 +CONFIG_W1_SLAVE_DS28E17=m
4911 +# end of 1-wire Slaves
4913 +CONFIG_POWER_RESET=y
4914 +CONFIG_POWER_RESET_MT6323=y
4915 +CONFIG_POWER_RESET_RESTART=y
4916 +CONFIG_POWER_SUPPLY=y
4917 +# CONFIG_POWER_SUPPLY_DEBUG is not set
4918 +CONFIG_POWER_SUPPLY_HWMON=y
4919 +CONFIG_PDA_POWER=m
4920 +CONFIG_GENERIC_ADC_BATTERY=m
4921 +CONFIG_MAX8925_POWER=m
4922 +CONFIG_WM831X_BACKUP=m
4923 +CONFIG_WM831X_POWER=m
4924 +CONFIG_WM8350_POWER=m
4925 +CONFIG_TEST_POWER=m
4926 +CONFIG_BATTERY_88PM860X=m
4927 +CONFIG_CHARGER_ADP5061=m
4928 +CONFIG_BATTERY_CW2015=m
4929 +CONFIG_BATTERY_DS2760=m
4930 +CONFIG_BATTERY_DS2780=m
4931 +CONFIG_BATTERY_DS2781=m
4932 +CONFIG_BATTERY_DS2782=m
4933 +CONFIG_BATTERY_SBS=m
4934 +CONFIG_CHARGER_SBS=m
4935 +CONFIG_MANAGER_SBS=m
4936 +CONFIG_BATTERY_BQ27XXX=m
4937 +CONFIG_BATTERY_BQ27XXX_I2C=m
4938 +CONFIG_BATTERY_BQ27XXX_HDQ=m
4939 +# CONFIG_BATTERY_BQ27XXX_DT_UPDATES_NVM is not set
4940 +CONFIG_BATTERY_DA9030=m
4941 +CONFIG_BATTERY_DA9052=m
4942 +CONFIG_CHARGER_DA9150=m
4943 +CONFIG_BATTERY_DA9150=m
4944 +CONFIG_CHARGER_AXP20X=m
4945 +CONFIG_BATTERY_AXP20X=m
4946 +CONFIG_AXP20X_POWER=m
4947 +CONFIG_AXP288_CHARGER=m
4948 +CONFIG_AXP288_FUEL_GAUGE=m
4949 +CONFIG_BATTERY_MAX17040=m
4950 +CONFIG_BATTERY_MAX17042=m
4951 +CONFIG_BATTERY_MAX1721X=m
4952 +CONFIG_BATTERY_TWL4030_MADC=m
4953 +CONFIG_CHARGER_88PM860X=m
4954 +CONFIG_CHARGER_PCF50633=m
4955 +CONFIG_BATTERY_RX51=m
4956 +CONFIG_CHARGER_ISP1704=m
4957 +CONFIG_CHARGER_MAX8903=m
4958 +CONFIG_CHARGER_TWL4030=m
4959 +CONFIG_CHARGER_LP8727=m
4960 +CONFIG_CHARGER_LP8788=m
4961 +CONFIG_CHARGER_GPIO=m
4962 +CONFIG_CHARGER_MANAGER=y
4963 +CONFIG_CHARGER_LT3651=m
4964 +CONFIG_CHARGER_LTC4162L=m
4965 +CONFIG_CHARGER_MAX14577=m
4966 +CONFIG_CHARGER_MAX77693=m
4967 +CONFIG_CHARGER_MAX8997=m
4968 +CONFIG_CHARGER_MAX8998=m
4969 +CONFIG_CHARGER_MP2629=m
4970 +CONFIG_CHARGER_BQ2415X=m
4971 +CONFIG_CHARGER_BQ24190=m
4972 +CONFIG_CHARGER_BQ24257=m
4973 +CONFIG_CHARGER_BQ24735=m
4974 +CONFIG_CHARGER_BQ2515X=m
4975 +CONFIG_CHARGER_BQ25890=m
4976 +CONFIG_CHARGER_BQ25980=m
4977 +CONFIG_CHARGER_BQ256XX=m
4978 +CONFIG_CHARGER_SMB347=m
4979 +CONFIG_CHARGER_TPS65090=m
4980 +CONFIG_BATTERY_GAUGE_LTC2941=m
4981 +CONFIG_BATTERY_RT5033=m
4982 +CONFIG_CHARGER_RT9455=m
4983 +CONFIG_CHARGER_CROS_USBPD=m
4984 +CONFIG_CHARGER_BD99954=m
4985 +CONFIG_CHARGER_WILCO=m
4986 +CONFIG_HWMON=y
4987 +CONFIG_HWMON_VID=m
4988 +# CONFIG_HWMON_DEBUG_CHIP is not set
4991 +# Native drivers
4993 +CONFIG_SENSORS_ABITUGURU=m
4994 +CONFIG_SENSORS_ABITUGURU3=m
4995 +CONFIG_SENSORS_AD7314=m
4996 +CONFIG_SENSORS_AD7414=m
4997 +CONFIG_SENSORS_AD7418=m
4998 +CONFIG_SENSORS_ADM1021=m
4999 +CONFIG_SENSORS_ADM1025=m
5000 +CONFIG_SENSORS_ADM1026=m
5001 +CONFIG_SENSORS_ADM1029=m
5002 +CONFIG_SENSORS_ADM1031=m
5003 +CONFIG_SENSORS_ADM1177=m
5004 +CONFIG_SENSORS_ADM9240=m
5005 +CONFIG_SENSORS_ADT7X10=m
5006 +CONFIG_SENSORS_ADT7310=m
5007 +CONFIG_SENSORS_ADT7410=m
5008 +CONFIG_SENSORS_ADT7411=m
5009 +CONFIG_SENSORS_ADT7462=m
5010 +CONFIG_SENSORS_ADT7470=m
5011 +CONFIG_SENSORS_ADT7475=m
5012 +CONFIG_SENSORS_AHT10=m
5013 +CONFIG_SENSORS_AS370=m
5014 +CONFIG_SENSORS_ASC7621=m
5015 +CONFIG_SENSORS_AXI_FAN_CONTROL=m
5016 +CONFIG_SENSORS_K8TEMP=m
5017 +CONFIG_SENSORS_K10TEMP=m
5018 +CONFIG_SENSORS_FAM15H_POWER=m
5019 +CONFIG_SENSORS_AMD_ENERGY=m
5020 +CONFIG_SENSORS_APPLESMC=m
5021 +CONFIG_SENSORS_ASB100=m
5022 +CONFIG_SENSORS_ASPEED=m
5023 +CONFIG_SENSORS_ATXP1=m
5024 +CONFIG_SENSORS_CORSAIR_CPRO=m
5025 +CONFIG_SENSORS_CORSAIR_PSU=m
5026 +CONFIG_SENSORS_DRIVETEMP=m
5027 +CONFIG_SENSORS_DS620=m
5028 +CONFIG_SENSORS_DS1621=m
5029 +CONFIG_SENSORS_DELL_SMM=m
5030 +CONFIG_SENSORS_DA9052_ADC=m
5031 +CONFIG_SENSORS_DA9055=m
5032 +CONFIG_SENSORS_I5K_AMB=m
5033 +CONFIG_SENSORS_F71805F=m
5034 +CONFIG_SENSORS_F71882FG=m
5035 +CONFIG_SENSORS_F75375S=m
5036 +CONFIG_SENSORS_MC13783_ADC=m
5037 +CONFIG_SENSORS_FSCHMD=m
5038 +CONFIG_SENSORS_FTSTEUTATES=m
5039 +CONFIG_SENSORS_GL518SM=m
5040 +CONFIG_SENSORS_GL520SM=m
5041 +CONFIG_SENSORS_G760A=m
5042 +CONFIG_SENSORS_G762=m
5043 +CONFIG_SENSORS_HIH6130=m
5044 +CONFIG_SENSORS_IBMAEM=m
5045 +CONFIG_SENSORS_IBMPEX=m
5046 +CONFIG_SENSORS_IIO_HWMON=m
5047 +CONFIG_SENSORS_I5500=m
5048 +CONFIG_SENSORS_CORETEMP=m
5049 +CONFIG_SENSORS_IT87=m
5050 +CONFIG_SENSORS_JC42=m
5051 +CONFIG_SENSORS_POWR1220=m
5052 +CONFIG_SENSORS_LINEAGE=m
5053 +CONFIG_SENSORS_LTC2945=m
5054 +CONFIG_SENSORS_LTC2947=m
5055 +CONFIG_SENSORS_LTC2947_I2C=m
5056 +CONFIG_SENSORS_LTC2947_SPI=m
5057 +CONFIG_SENSORS_LTC2990=m
5058 +CONFIG_SENSORS_LTC2992=m
5059 +CONFIG_SENSORS_LTC4151=m
5060 +CONFIG_SENSORS_LTC4215=m
5061 +CONFIG_SENSORS_LTC4222=m
5062 +CONFIG_SENSORS_LTC4245=m
5063 +CONFIG_SENSORS_LTC4260=m
5064 +CONFIG_SENSORS_LTC4261=m
5065 +CONFIG_SENSORS_MAX1111=m
5066 +CONFIG_SENSORS_MAX127=m
5067 +CONFIG_SENSORS_MAX16065=m
5068 +CONFIG_SENSORS_MAX1619=m
5069 +CONFIG_SENSORS_MAX1668=m
5070 +CONFIG_SENSORS_MAX197=m
5071 +CONFIG_SENSORS_MAX31722=m
5072 +CONFIG_SENSORS_MAX31730=m
5073 +CONFIG_SENSORS_MAX6621=m
5074 +CONFIG_SENSORS_MAX6639=m
5075 +CONFIG_SENSORS_MAX6642=m
5076 +CONFIG_SENSORS_MAX6650=m
5077 +CONFIG_SENSORS_MAX6697=m
5078 +CONFIG_SENSORS_MAX31790=m
5079 +CONFIG_SENSORS_MCP3021=m
5080 +CONFIG_SENSORS_MLXREG_FAN=m
5081 +CONFIG_SENSORS_TC654=m
5082 +CONFIG_SENSORS_TPS23861=m
5083 +CONFIG_SENSORS_MENF21BMC_HWMON=m
5084 +CONFIG_SENSORS_MR75203=m
5085 +CONFIG_SENSORS_ADCXX=m
5086 +CONFIG_SENSORS_LM63=m
5087 +CONFIG_SENSORS_LM70=m
5088 +CONFIG_SENSORS_LM73=m
5089 +CONFIG_SENSORS_LM75=m
5090 +CONFIG_SENSORS_LM77=m
5091 +CONFIG_SENSORS_LM78=m
5092 +CONFIG_SENSORS_LM80=m
5093 +CONFIG_SENSORS_LM83=m
5094 +CONFIG_SENSORS_LM85=m
5095 +CONFIG_SENSORS_LM87=m
5096 +CONFIG_SENSORS_LM90=m
5097 +CONFIG_SENSORS_LM92=m
5098 +CONFIG_SENSORS_LM93=m
5099 +CONFIG_SENSORS_LM95234=m
5100 +CONFIG_SENSORS_LM95241=m
5101 +CONFIG_SENSORS_LM95245=m
5102 +CONFIG_SENSORS_PC87360=m
5103 +CONFIG_SENSORS_PC87427=m
5104 +CONFIG_SENSORS_NTC_THERMISTOR=m
5105 +CONFIG_SENSORS_NCT6683=m
5106 +CONFIG_SENSORS_NCT6775=m
5107 +CONFIG_SENSORS_NCT7802=m
5108 +CONFIG_SENSORS_NCT7904=m
5109 +CONFIG_SENSORS_NPCM7XX=m
5110 +CONFIG_SENSORS_PCF8591=m
5111 +CONFIG_PMBUS=m
5112 +CONFIG_SENSORS_PMBUS=m
5113 +CONFIG_SENSORS_ADM1266=m
5114 +CONFIG_SENSORS_ADM1275=m
5115 +CONFIG_SENSORS_BEL_PFE=m
5116 +CONFIG_SENSORS_IBM_CFFPS=m
5117 +CONFIG_SENSORS_INSPUR_IPSPS=m
5118 +CONFIG_SENSORS_IR35221=m
5119 +CONFIG_SENSORS_IR38064=m
5120 +CONFIG_SENSORS_IRPS5401=m
5121 +CONFIG_SENSORS_ISL68137=m
5122 +CONFIG_SENSORS_LM25066=m
5123 +CONFIG_SENSORS_LTC2978=m
5124 +CONFIG_SENSORS_LTC2978_REGULATOR=y
5125 +CONFIG_SENSORS_LTC3815=m
5126 +CONFIG_SENSORS_MAX16064=m
5127 +CONFIG_SENSORS_MAX16601=m
5128 +CONFIG_SENSORS_MAX20730=m
5129 +CONFIG_SENSORS_MAX20751=m
5130 +CONFIG_SENSORS_MAX31785=m
5131 +CONFIG_SENSORS_MAX34440=m
5132 +CONFIG_SENSORS_MAX8688=m
5133 +CONFIG_SENSORS_MP2975=m
5134 +CONFIG_SENSORS_PM6764TR=m
5135 +CONFIG_SENSORS_PXE1610=m
5136 +CONFIG_SENSORS_Q54SJ108A2=m
5137 +CONFIG_SENSORS_TPS40422=m
5138 +CONFIG_SENSORS_TPS53679=m
5139 +CONFIG_SENSORS_UCD9000=m
5140 +CONFIG_SENSORS_UCD9200=m
5141 +CONFIG_SENSORS_XDPE122=m
5142 +CONFIG_SENSORS_ZL6100=m
5143 +CONFIG_SENSORS_SBTSI=m
5144 +CONFIG_SENSORS_SHT15=m
5145 +CONFIG_SENSORS_SHT21=m
5146 +CONFIG_SENSORS_SHT3x=m
5147 +CONFIG_SENSORS_SHTC1=m
5148 +CONFIG_SENSORS_SIS5595=m
5149 +CONFIG_SENSORS_DME1737=m
5150 +CONFIG_SENSORS_EMC1403=m
5151 +CONFIG_SENSORS_EMC2103=m
5152 +CONFIG_SENSORS_EMC6W201=m
5153 +CONFIG_SENSORS_SMSC47M1=m
5154 +CONFIG_SENSORS_SMSC47M192=m
5155 +CONFIG_SENSORS_SMSC47B397=m
5156 +CONFIG_SENSORS_SCH56XX_COMMON=m
5157 +CONFIG_SENSORS_SCH5627=m
5158 +CONFIG_SENSORS_SCH5636=m
5159 +CONFIG_SENSORS_STTS751=m
5160 +CONFIG_SENSORS_SMM665=m
5161 +CONFIG_SENSORS_ADC128D818=m
5162 +CONFIG_SENSORS_ADS7828=m
5163 +CONFIG_SENSORS_ADS7871=m
5164 +CONFIG_SENSORS_AMC6821=m
5165 +CONFIG_SENSORS_INA209=m
5166 +CONFIG_SENSORS_INA2XX=m
5167 +CONFIG_SENSORS_INA3221=m
5168 +CONFIG_SENSORS_TC74=m
5169 +CONFIG_SENSORS_THMC50=m
5170 +CONFIG_SENSORS_TMP102=m
5171 +CONFIG_SENSORS_TMP103=m
5172 +CONFIG_SENSORS_TMP108=m
5173 +CONFIG_SENSORS_TMP401=m
5174 +CONFIG_SENSORS_TMP421=m
5175 +CONFIG_SENSORS_TMP513=m
5176 +CONFIG_SENSORS_VIA_CPUTEMP=m
5177 +CONFIG_SENSORS_VIA686A=m
5178 +CONFIG_SENSORS_VT1211=m
5179 +CONFIG_SENSORS_VT8231=m
5180 +CONFIG_SENSORS_W83773G=m
5181 +CONFIG_SENSORS_W83781D=m
5182 +CONFIG_SENSORS_W83791D=m
5183 +CONFIG_SENSORS_W83792D=m
5184 +CONFIG_SENSORS_W83793=m
5185 +CONFIG_SENSORS_W83795=m
5186 +# CONFIG_SENSORS_W83795_FANCTRL is not set
5187 +CONFIG_SENSORS_W83L785TS=m
5188 +CONFIG_SENSORS_W83L786NG=m
5189 +CONFIG_SENSORS_W83627HF=m
5190 +CONFIG_SENSORS_W83627EHF=m
5191 +CONFIG_SENSORS_WM831X=m
5192 +CONFIG_SENSORS_WM8350=m
5193 +CONFIG_SENSORS_XGENE=m
5194 +CONFIG_SENSORS_INTEL_M10_BMC_HWMON=m
5197 +# ACPI drivers
5199 +CONFIG_SENSORS_ACPI_POWER=m
5200 +CONFIG_SENSORS_ATK0110=m
5201 +CONFIG_THERMAL=y
5202 +CONFIG_THERMAL_NETLINK=y
5203 +CONFIG_THERMAL_STATISTICS=y
5204 +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0
5205 +CONFIG_THERMAL_HWMON=y
5206 +CONFIG_THERMAL_WRITABLE_TRIPS=y
5207 +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
5208 +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set
5209 +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set
5210 +# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set
5211 +CONFIG_THERMAL_GOV_FAIR_SHARE=y
5212 +CONFIG_THERMAL_GOV_STEP_WISE=y
5213 +CONFIG_THERMAL_GOV_BANG_BANG=y
5214 +CONFIG_THERMAL_GOV_USER_SPACE=y
5215 +CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
5216 +CONFIG_DEVFREQ_THERMAL=y
5217 +CONFIG_THERMAL_EMULATION=y
5220 +# Intel thermal drivers
5222 +CONFIG_INTEL_POWERCLAMP=m
5223 +CONFIG_X86_THERMAL_VECTOR=y
5224 +CONFIG_X86_PKG_TEMP_THERMAL=m
5225 +CONFIG_INTEL_SOC_DTS_IOSF_CORE=m
5226 +CONFIG_INTEL_SOC_DTS_THERMAL=m
5229 +# ACPI INT340X thermal drivers
5231 +CONFIG_INT340X_THERMAL=m
5232 +CONFIG_ACPI_THERMAL_REL=m
5233 +CONFIG_INT3406_THERMAL=m
5234 +CONFIG_PROC_THERMAL_MMIO_RAPL=m
5235 +# end of ACPI INT340X thermal drivers
5237 +CONFIG_INTEL_BXT_PMIC_THERMAL=m
5238 +CONFIG_INTEL_PCH_THERMAL=m
5239 +# end of Intel thermal drivers
5241 +CONFIG_GENERIC_ADC_THERMAL=m
5242 +CONFIG_WATCHDOG=y
5243 +CONFIG_WATCHDOG_CORE=y
5244 +# CONFIG_WATCHDOG_NOWAYOUT is not set
5245 +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y
5246 +CONFIG_WATCHDOG_OPEN_TIMEOUT=0
5247 +CONFIG_WATCHDOG_SYSFS=y
5250 +# Watchdog Pretimeout Governors
5252 +CONFIG_WATCHDOG_PRETIMEOUT_GOV=y
5253 +CONFIG_WATCHDOG_PRETIMEOUT_GOV_SEL=m
5254 +CONFIG_WATCHDOG_PRETIMEOUT_GOV_NOOP=y
5255 +CONFIG_WATCHDOG_PRETIMEOUT_GOV_PANIC=m
5256 +CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP=y
5257 +# CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC is not set
5260 +# Watchdog Device Drivers
5262 +CONFIG_SOFT_WATCHDOG=m
5263 +CONFIG_SOFT_WATCHDOG_PRETIMEOUT=y
5264 +CONFIG_DA9052_WATCHDOG=m
5265 +CONFIG_DA9055_WATCHDOG=m
5266 +CONFIG_DA9063_WATCHDOG=m
5267 +CONFIG_DA9062_WATCHDOG=m
5268 +CONFIG_MENF21BMC_WATCHDOG=m
5269 +CONFIG_MENZ069_WATCHDOG=m
5270 +CONFIG_WDAT_WDT=m
5271 +CONFIG_WM831X_WATCHDOG=m
5272 +CONFIG_WM8350_WATCHDOG=m
5273 +CONFIG_XILINX_WATCHDOG=m
5274 +CONFIG_ZIIRAVE_WATCHDOG=m
5275 +CONFIG_RAVE_SP_WATCHDOG=m
5276 +CONFIG_MLX_WDT=m
5277 +CONFIG_CADENCE_WATCHDOG=m
5278 +CONFIG_DW_WATCHDOG=m
5279 +CONFIG_TWL4030_WATCHDOG=m
5280 +CONFIG_MAX63XX_WATCHDOG=m
5281 +CONFIG_RETU_WATCHDOG=m
5282 +CONFIG_ACQUIRE_WDT=m
5283 +CONFIG_ADVANTECH_WDT=m
5284 +CONFIG_ALIM1535_WDT=m
5285 +CONFIG_ALIM7101_WDT=m
5286 +CONFIG_EBC_C384_WDT=m
5287 +CONFIG_F71808E_WDT=m
5288 +CONFIG_SP5100_TCO=m
5289 +CONFIG_SBC_FITPC2_WATCHDOG=m
5290 +CONFIG_EUROTECH_WDT=m
5291 +CONFIG_IB700_WDT=m
5292 +CONFIG_IBMASR=m
5293 +CONFIG_WAFER_WDT=m
5294 +CONFIG_I6300ESB_WDT=m
5295 +CONFIG_IE6XX_WDT=m
5296 +CONFIG_ITCO_WDT=m
5297 +CONFIG_ITCO_VENDOR_SUPPORT=y
5298 +CONFIG_IT8712F_WDT=m
5299 +CONFIG_IT87_WDT=m
5300 +CONFIG_HP_WATCHDOG=m
5301 +CONFIG_HPWDT_NMI_DECODING=y
5302 +CONFIG_KEMPLD_WDT=m
5303 +CONFIG_SC1200_WDT=m
5304 +CONFIG_PC87413_WDT=m
5305 +CONFIG_NV_TCO=m
5306 +CONFIG_60XX_WDT=m
5307 +CONFIG_CPU5_WDT=m
5308 +CONFIG_SMSC_SCH311X_WDT=m
5309 +CONFIG_SMSC37B787_WDT=m
5310 +CONFIG_TQMX86_WDT=m
5311 +CONFIG_VIA_WDT=m
5312 +CONFIG_W83627HF_WDT=m
5313 +CONFIG_W83877F_WDT=m
5314 +CONFIG_W83977F_WDT=m
5315 +CONFIG_MACHZ_WDT=m
5316 +CONFIG_SBC_EPX_C3_WATCHDOG=m
5317 +CONFIG_INTEL_MEI_WDT=m
5318 +CONFIG_NI903X_WDT=m
5319 +CONFIG_NIC7018_WDT=m
5320 +CONFIG_MEN_A21_WDT=m
5321 +CONFIG_XEN_WDT=m
5324 +# PCI-based Watchdog Cards
5326 +CONFIG_PCIPCWATCHDOG=m
5327 +CONFIG_WDTPCI=m
5330 +# USB-based Watchdog Cards
5332 +CONFIG_USBPCWATCHDOG=m
5333 +CONFIG_SSB_POSSIBLE=y
5334 +CONFIG_SSB=m
5335 +CONFIG_SSB_SPROM=y
5336 +CONFIG_SSB_BLOCKIO=y
5337 +CONFIG_SSB_PCIHOST_POSSIBLE=y
5338 +CONFIG_SSB_PCIHOST=y
5339 +CONFIG_SSB_B43_PCI_BRIDGE=y
5340 +CONFIG_SSB_PCMCIAHOST_POSSIBLE=y
5341 +# CONFIG_SSB_PCMCIAHOST is not set
5342 +CONFIG_SSB_SDIOHOST_POSSIBLE=y
5343 +CONFIG_SSB_SDIOHOST=y
5344 +CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
5345 +CONFIG_SSB_DRIVER_PCICORE=y
5346 +CONFIG_SSB_DRIVER_GPIO=y
5347 +CONFIG_BCMA_POSSIBLE=y
5348 +CONFIG_BCMA=m
5349 +CONFIG_BCMA_BLOCKIO=y
5350 +CONFIG_BCMA_HOST_PCI_POSSIBLE=y
5351 +CONFIG_BCMA_HOST_PCI=y
5352 +CONFIG_BCMA_HOST_SOC=y
5353 +CONFIG_BCMA_DRIVER_PCI=y
5354 +CONFIG_BCMA_SFLASH=y
5355 +CONFIG_BCMA_DRIVER_GMAC_CMN=y
5356 +CONFIG_BCMA_DRIVER_GPIO=y
5357 +# CONFIG_BCMA_DEBUG is not set
5360 +# Multifunction device drivers
5362 +CONFIG_MFD_CORE=y
5363 +CONFIG_MFD_AS3711=y
5364 +CONFIG_PMIC_ADP5520=y
5365 +CONFIG_MFD_AAT2870_CORE=y
5366 +CONFIG_MFD_BCM590XX=m
5367 +CONFIG_MFD_BD9571MWV=m
5368 +CONFIG_MFD_AXP20X=m
5369 +CONFIG_MFD_AXP20X_I2C=m
5370 +CONFIG_MFD_CROS_EC_DEV=m
5371 +CONFIG_MFD_MADERA=m
5372 +CONFIG_MFD_MADERA_I2C=m
5373 +CONFIG_MFD_MADERA_SPI=m
5374 +CONFIG_MFD_CS47L15=y
5375 +CONFIG_MFD_CS47L35=y
5376 +CONFIG_MFD_CS47L85=y
5377 +CONFIG_MFD_CS47L90=y
5378 +CONFIG_MFD_CS47L92=y
5379 +CONFIG_PMIC_DA903X=y
5380 +CONFIG_PMIC_DA9052=y
5381 +CONFIG_MFD_DA9052_SPI=y
5382 +CONFIG_MFD_DA9052_I2C=y
5383 +CONFIG_MFD_DA9055=y
5384 +CONFIG_MFD_DA9062=m
5385 +CONFIG_MFD_DA9063=y
5386 +CONFIG_MFD_DA9150=m
5387 +CONFIG_MFD_DLN2=m
5388 +CONFIG_MFD_MC13XXX=m
5389 +CONFIG_MFD_MC13XXX_SPI=m
5390 +CONFIG_MFD_MC13XXX_I2C=m
5391 +CONFIG_MFD_MP2629=m
5392 +CONFIG_HTC_PASIC3=m
5393 +CONFIG_HTC_I2CPLD=y
5394 +CONFIG_MFD_INTEL_QUARK_I2C_GPIO=m
5395 +CONFIG_LPC_ICH=m
5396 +CONFIG_LPC_SCH=m
5397 +CONFIG_INTEL_SOC_PMIC=y
5398 +CONFIG_INTEL_SOC_PMIC_BXTWC=m
5399 +CONFIG_INTEL_SOC_PMIC_CHTWC=y
5400 +CONFIG_INTEL_SOC_PMIC_CHTDC_TI=m
5401 +CONFIG_INTEL_SOC_PMIC_MRFLD=m
5402 +CONFIG_MFD_INTEL_LPSS=m
5403 +CONFIG_MFD_INTEL_LPSS_ACPI=m
5404 +CONFIG_MFD_INTEL_LPSS_PCI=m
5405 +CONFIG_MFD_INTEL_PMC_BXT=m
5406 +CONFIG_MFD_INTEL_PMT=m
5407 +CONFIG_MFD_IQS62X=m
5408 +CONFIG_MFD_JANZ_CMODIO=m
5409 +CONFIG_MFD_KEMPLD=m
5410 +CONFIG_MFD_88PM800=m
5411 +CONFIG_MFD_88PM805=m
5412 +CONFIG_MFD_88PM860X=y
5413 +CONFIG_MFD_MAX14577=y
5414 +CONFIG_MFD_MAX77693=y
5415 +CONFIG_MFD_MAX77843=y
5416 +CONFIG_MFD_MAX8907=m
5417 +CONFIG_MFD_MAX8925=y
5418 +CONFIG_MFD_MAX8997=y
5419 +CONFIG_MFD_MAX8998=y
5420 +CONFIG_MFD_MT6360=m
5421 +CONFIG_MFD_MT6397=m
5422 +CONFIG_MFD_MENF21BMC=m
5423 +CONFIG_EZX_PCAP=y
5424 +CONFIG_MFD_VIPERBOARD=m
5425 +CONFIG_MFD_RETU=m
5426 +CONFIG_MFD_PCF50633=m
5427 +CONFIG_PCF50633_ADC=m
5428 +CONFIG_PCF50633_GPIO=m
5429 +CONFIG_UCB1400_CORE=m
5430 +CONFIG_MFD_RDC321X=m
5431 +CONFIG_MFD_RT5033=m
5432 +CONFIG_MFD_RC5T583=y
5433 +CONFIG_MFD_SEC_CORE=y
5434 +CONFIG_MFD_SI476X_CORE=m
5435 +CONFIG_MFD_SM501=m
5436 +CONFIG_MFD_SM501_GPIO=y
5437 +CONFIG_MFD_SKY81452=m
5438 +CONFIG_ABX500_CORE=y
5439 +CONFIG_AB3100_CORE=y
5440 +CONFIG_AB3100_OTP=m
5441 +CONFIG_MFD_SYSCON=y
5442 +CONFIG_MFD_TI_AM335X_TSCADC=m
5443 +CONFIG_MFD_LP3943=m
5444 +CONFIG_MFD_LP8788=y
5445 +CONFIG_MFD_TI_LMU=m
5446 +CONFIG_MFD_PALMAS=y
5447 +CONFIG_TPS6105X=m
5448 +CONFIG_TPS65010=m
5449 +CONFIG_TPS6507X=m
5450 +CONFIG_MFD_TPS65086=m
5451 +CONFIG_MFD_TPS65090=y
5452 +CONFIG_MFD_TPS68470=y
5453 +CONFIG_MFD_TI_LP873X=m
5454 +CONFIG_MFD_TPS6586X=y
5455 +CONFIG_MFD_TPS65910=y
5456 +CONFIG_MFD_TPS65912=y
5457 +CONFIG_MFD_TPS65912_I2C=y
5458 +CONFIG_MFD_TPS65912_SPI=y
5459 +CONFIG_MFD_TPS80031=y
5460 +CONFIG_TWL4030_CORE=y
5461 +CONFIG_MFD_TWL4030_AUDIO=y
5462 +CONFIG_TWL6040_CORE=y
5463 +CONFIG_MFD_WL1273_CORE=m
5464 +CONFIG_MFD_LM3533=m
5465 +CONFIG_MFD_TQMX86=m
5466 +CONFIG_MFD_VX855=m
5467 +CONFIG_MFD_ARIZONA=y
5468 +CONFIG_MFD_ARIZONA_I2C=m
5469 +CONFIG_MFD_ARIZONA_SPI=m
5470 +CONFIG_MFD_CS47L24=y
5471 +CONFIG_MFD_WM5102=y
5472 +CONFIG_MFD_WM5110=y
5473 +CONFIG_MFD_WM8997=y
5474 +CONFIG_MFD_WM8998=y
5475 +CONFIG_MFD_WM8400=y
5476 +CONFIG_MFD_WM831X=y
5477 +CONFIG_MFD_WM831X_I2C=y
5478 +CONFIG_MFD_WM831X_SPI=y
5479 +CONFIG_MFD_WM8350=y
5480 +CONFIG_MFD_WM8350_I2C=y
5481 +CONFIG_MFD_WM8994=m
5482 +CONFIG_MFD_WCD934X=m
5483 +CONFIG_RAVE_SP_CORE=m
5484 +CONFIG_MFD_INTEL_M10_BMC=m
5485 +# end of Multifunction device drivers
5487 +CONFIG_REGULATOR=y
5488 +# CONFIG_REGULATOR_DEBUG is not set
5489 +CONFIG_REGULATOR_FIXED_VOLTAGE=m
5490 +CONFIG_REGULATOR_VIRTUAL_CONSUMER=m
5491 +CONFIG_REGULATOR_USERSPACE_CONSUMER=m
5492 +CONFIG_REGULATOR_88PG86X=m
5493 +CONFIG_REGULATOR_88PM800=m
5494 +CONFIG_REGULATOR_88PM8607=m
5495 +CONFIG_REGULATOR_ACT8865=m
5496 +CONFIG_REGULATOR_AD5398=m
5497 +CONFIG_REGULATOR_AAT2870=m
5498 +CONFIG_REGULATOR_ARIZONA_LDO1=m
5499 +CONFIG_REGULATOR_ARIZONA_MICSUPP=m
5500 +CONFIG_REGULATOR_AS3711=m
5501 +CONFIG_REGULATOR_AXP20X=m
5502 +CONFIG_REGULATOR_BCM590XX=m
5503 +CONFIG_REGULATOR_BD9571MWV=m
5504 +CONFIG_REGULATOR_DA903X=m
5505 +CONFIG_REGULATOR_DA9052=m
5506 +CONFIG_REGULATOR_DA9055=m
5507 +CONFIG_REGULATOR_DA9062=m
5508 +CONFIG_REGULATOR_DA9210=m
5509 +CONFIG_REGULATOR_DA9211=m
5510 +CONFIG_REGULATOR_FAN53555=m
5511 +CONFIG_REGULATOR_GPIO=m
5512 +CONFIG_REGULATOR_ISL9305=m
5513 +CONFIG_REGULATOR_ISL6271A=m
5514 +CONFIG_REGULATOR_LM363X=m
5515 +CONFIG_REGULATOR_LP3971=m
5516 +CONFIG_REGULATOR_LP3972=m
5517 +CONFIG_REGULATOR_LP872X=m
5518 +CONFIG_REGULATOR_LP8755=m
5519 +CONFIG_REGULATOR_LP8788=m
5520 +CONFIG_REGULATOR_LTC3589=m
5521 +CONFIG_REGULATOR_LTC3676=m
5522 +CONFIG_REGULATOR_MAX14577=m
5523 +CONFIG_REGULATOR_MAX1586=m
5524 +CONFIG_REGULATOR_MAX8649=m
5525 +CONFIG_REGULATOR_MAX8660=m
5526 +CONFIG_REGULATOR_MAX8907=m
5527 +CONFIG_REGULATOR_MAX8925=m
5528 +CONFIG_REGULATOR_MAX8952=m
5529 +CONFIG_REGULATOR_MAX8997=m
5530 +CONFIG_REGULATOR_MAX8998=m
5531 +CONFIG_REGULATOR_MAX77693=m
5532 +CONFIG_REGULATOR_MAX77826=m
5533 +CONFIG_REGULATOR_MC13XXX_CORE=m
5534 +CONFIG_REGULATOR_MC13783=m
5535 +CONFIG_REGULATOR_MC13892=m
5536 +CONFIG_REGULATOR_MP8859=m
5537 +CONFIG_REGULATOR_MT6311=m
5538 +CONFIG_REGULATOR_MT6315=m
5539 +CONFIG_REGULATOR_MT6323=m
5540 +CONFIG_REGULATOR_MT6358=m
5541 +CONFIG_REGULATOR_MT6360=m
5542 +CONFIG_REGULATOR_MT6397=m
5543 +CONFIG_REGULATOR_PALMAS=m
5544 +CONFIG_REGULATOR_PCA9450=m
5545 +CONFIG_REGULATOR_PCAP=m
5546 +CONFIG_REGULATOR_PCF50633=m
5547 +CONFIG_REGULATOR_PV88060=m
5548 +CONFIG_REGULATOR_PV88080=m
5549 +CONFIG_REGULATOR_PV88090=m
5550 +CONFIG_REGULATOR_PWM=m
5551 +CONFIG_REGULATOR_QCOM_SPMI=m
5552 +CONFIG_REGULATOR_QCOM_USB_VBUS=m
5553 +CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY=m
5554 +CONFIG_REGULATOR_RC5T583=m
5555 +CONFIG_REGULATOR_RT4801=m
5556 +CONFIG_REGULATOR_RT5033=m
5557 +CONFIG_REGULATOR_RTMV20=m
5558 +CONFIG_REGULATOR_S2MPA01=m
5559 +CONFIG_REGULATOR_S2MPS11=m
5560 +CONFIG_REGULATOR_S5M8767=m
5561 +CONFIG_REGULATOR_SKY81452=m
5562 +CONFIG_REGULATOR_SLG51000=m
5563 +CONFIG_REGULATOR_TPS51632=m
5564 +CONFIG_REGULATOR_TPS6105X=m
5565 +CONFIG_REGULATOR_TPS62360=m
5566 +CONFIG_REGULATOR_TPS65023=m
5567 +CONFIG_REGULATOR_TPS6507X=m
5568 +CONFIG_REGULATOR_TPS65086=m
5569 +CONFIG_REGULATOR_TPS65090=m
5570 +CONFIG_REGULATOR_TPS65132=m
5571 +CONFIG_REGULATOR_TPS6524X=m
5572 +CONFIG_REGULATOR_TPS6586X=m
5573 +CONFIG_REGULATOR_TPS65910=m
5574 +CONFIG_REGULATOR_TPS65912=m
5575 +CONFIG_REGULATOR_TPS80031=m
5576 +CONFIG_REGULATOR_TWL4030=m
5577 +CONFIG_REGULATOR_WM831X=m
5578 +CONFIG_REGULATOR_WM8350=m
5579 +CONFIG_REGULATOR_WM8400=m
5580 +CONFIG_REGULATOR_WM8994=m
5581 +CONFIG_REGULATOR_QCOM_LABIBB=m
5582 +CONFIG_RC_CORE=m
5583 +CONFIG_RC_MAP=m
5584 +CONFIG_LIRC=y
5585 +CONFIG_RC_DECODERS=y
5586 +CONFIG_IR_NEC_DECODER=m
5587 +CONFIG_IR_RC5_DECODER=m
5588 +CONFIG_IR_RC6_DECODER=m
5589 +CONFIG_IR_JVC_DECODER=m
5590 +CONFIG_IR_SONY_DECODER=m
5591 +CONFIG_IR_SANYO_DECODER=m
5592 +CONFIG_IR_SHARP_DECODER=m
5593 +CONFIG_IR_MCE_KBD_DECODER=m
5594 +CONFIG_IR_XMP_DECODER=m
5595 +CONFIG_IR_IMON_DECODER=m
5596 +CONFIG_IR_RCMM_DECODER=m
5597 +CONFIG_RC_DEVICES=y
5598 +CONFIG_RC_ATI_REMOTE=m
5599 +CONFIG_IR_ENE=m
5600 +CONFIG_IR_IMON=m
5601 +CONFIG_IR_IMON_RAW=m
5602 +CONFIG_IR_MCEUSB=m
5603 +CONFIG_IR_ITE_CIR=m
5604 +CONFIG_IR_FINTEK=m
5605 +CONFIG_IR_NUVOTON=m
5606 +CONFIG_IR_REDRAT3=m
5607 +CONFIG_IR_STREAMZAP=m
5608 +CONFIG_IR_WINBOND_CIR=m
5609 +CONFIG_IR_IGORPLUGUSB=m
5610 +CONFIG_IR_IGUANA=m
5611 +CONFIG_IR_TTUSBIR=m
5612 +CONFIG_RC_LOOPBACK=m
5613 +CONFIG_IR_SERIAL=m
5614 +CONFIG_IR_SERIAL_TRANSMITTER=y
5615 +CONFIG_IR_SIR=m
5616 +CONFIG_RC_XBOX_DVD=m
5617 +CONFIG_IR_TOY=m
5618 +CONFIG_CEC_CORE=m
5619 +CONFIG_CEC_NOTIFIER=y
5620 +CONFIG_CEC_PIN=y
5621 +CONFIG_MEDIA_CEC_RC=y
5622 +# CONFIG_CEC_PIN_ERROR_INJ is not set
5623 +CONFIG_MEDIA_CEC_SUPPORT=y
5624 +CONFIG_CEC_CH7322=m
5625 +CONFIG_CEC_CROS_EC=m
5626 +CONFIG_CEC_GPIO=m
5627 +CONFIG_CEC_SECO=m
5628 +CONFIG_CEC_SECO_RC=y
5629 +CONFIG_USB_PULSE8_CEC=m
5630 +CONFIG_USB_RAINSHADOW_CEC=m
5631 +CONFIG_MEDIA_SUPPORT=m
5632 +CONFIG_MEDIA_SUPPORT_FILTER=y
5633 +CONFIG_MEDIA_SUBDRV_AUTOSELECT=y
5636 +# Media device types
5638 +CONFIG_MEDIA_CAMERA_SUPPORT=y
5639 +CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
5640 +CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
5641 +CONFIG_MEDIA_RADIO_SUPPORT=y
5642 +CONFIG_MEDIA_SDR_SUPPORT=y
5643 +CONFIG_MEDIA_PLATFORM_SUPPORT=y
5644 +CONFIG_MEDIA_TEST_SUPPORT=y
5645 +# end of Media device types
5647 +CONFIG_VIDEO_DEV=m
5648 +CONFIG_MEDIA_CONTROLLER=y
5649 +CONFIG_DVB_CORE=m
5652 +# Video4Linux options
5654 +CONFIG_VIDEO_V4L2=m
5655 +CONFIG_VIDEO_V4L2_I2C=y
5656 +CONFIG_VIDEO_V4L2_SUBDEV_API=y
5657 +# CONFIG_VIDEO_ADV_DEBUG is not set
5658 +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
5659 +CONFIG_VIDEO_TUNER=m
5660 +CONFIG_V4L2_MEM2MEM_DEV=m
5661 +CONFIG_V4L2_FLASH_LED_CLASS=m
5662 +CONFIG_V4L2_FWNODE=m
5663 +CONFIG_VIDEOBUF_GEN=m
5664 +CONFIG_VIDEOBUF_DMA_SG=m
5665 +CONFIG_VIDEOBUF_VMALLOC=m
5666 +# end of Video4Linux options
5669 +# Media controller options
5671 +CONFIG_MEDIA_CONTROLLER_DVB=y
5672 +CONFIG_MEDIA_CONTROLLER_REQUEST_API=y
5675 +# Please notice that the enabled Media controller Request API is EXPERIMENTAL
5677 +# end of Media controller options
5680 +# Digital TV options
5682 +# CONFIG_DVB_MMAP is not set
5683 +CONFIG_DVB_NET=y
5684 +CONFIG_DVB_MAX_ADAPTERS=8
5685 +CONFIG_DVB_DYNAMIC_MINORS=y
5686 +# CONFIG_DVB_DEMUX_SECTION_LOSS_LOG is not set
5687 +# CONFIG_DVB_ULE_DEBUG is not set
5688 +# end of Digital TV options
5691 +# Media drivers
5695 +# Drivers filtered as selected at 'Filter media drivers'
5697 +CONFIG_TTPCI_EEPROM=m
5698 +CONFIG_MEDIA_USB_SUPPORT=y
5701 +# Webcam devices
5703 +CONFIG_USB_VIDEO_CLASS=m
5704 +CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
5705 +CONFIG_USB_GSPCA=m
5706 +CONFIG_USB_M5602=m
5707 +CONFIG_USB_STV06XX=m
5708 +CONFIG_USB_GL860=m
5709 +CONFIG_USB_GSPCA_BENQ=m
5710 +CONFIG_USB_GSPCA_CONEX=m
5711 +CONFIG_USB_GSPCA_CPIA1=m
5712 +CONFIG_USB_GSPCA_DTCS033=m
5713 +CONFIG_USB_GSPCA_ETOMS=m
5714 +CONFIG_USB_GSPCA_FINEPIX=m
5715 +CONFIG_USB_GSPCA_JEILINJ=m
5716 +CONFIG_USB_GSPCA_JL2005BCD=m
5717 +CONFIG_USB_GSPCA_KINECT=m
5718 +CONFIG_USB_GSPCA_KONICA=m
5719 +CONFIG_USB_GSPCA_MARS=m
5720 +CONFIG_USB_GSPCA_MR97310A=m
5721 +CONFIG_USB_GSPCA_NW80X=m
5722 +CONFIG_USB_GSPCA_OV519=m
5723 +CONFIG_USB_GSPCA_OV534=m
5724 +CONFIG_USB_GSPCA_OV534_9=m
5725 +CONFIG_USB_GSPCA_PAC207=m
5726 +CONFIG_USB_GSPCA_PAC7302=m
5727 +CONFIG_USB_GSPCA_PAC7311=m
5728 +CONFIG_USB_GSPCA_SE401=m
5729 +CONFIG_USB_GSPCA_SN9C2028=m
5730 +CONFIG_USB_GSPCA_SN9C20X=m
5731 +CONFIG_USB_GSPCA_SONIXB=m
5732 +CONFIG_USB_GSPCA_SONIXJ=m
5733 +CONFIG_USB_GSPCA_SPCA500=m
5734 +CONFIG_USB_GSPCA_SPCA501=m
5735 +CONFIG_USB_GSPCA_SPCA505=m
5736 +CONFIG_USB_GSPCA_SPCA506=m
5737 +CONFIG_USB_GSPCA_SPCA508=m
5738 +CONFIG_USB_GSPCA_SPCA561=m
5739 +CONFIG_USB_GSPCA_SPCA1528=m
5740 +CONFIG_USB_GSPCA_SQ905=m
5741 +CONFIG_USB_GSPCA_SQ905C=m
5742 +CONFIG_USB_GSPCA_SQ930X=m
5743 +CONFIG_USB_GSPCA_STK014=m
5744 +CONFIG_USB_GSPCA_STK1135=m
5745 +CONFIG_USB_GSPCA_STV0680=m
5746 +CONFIG_USB_GSPCA_SUNPLUS=m
5747 +CONFIG_USB_GSPCA_T613=m
5748 +CONFIG_USB_GSPCA_TOPRO=m
5749 +CONFIG_USB_GSPCA_TOUPTEK=m
5750 +CONFIG_USB_GSPCA_TV8532=m
5751 +CONFIG_USB_GSPCA_VC032X=m
5752 +CONFIG_USB_GSPCA_VICAM=m
5753 +CONFIG_USB_GSPCA_XIRLINK_CIT=m
5754 +CONFIG_USB_GSPCA_ZC3XX=m
5755 +CONFIG_USB_PWC=m
5756 +# CONFIG_USB_PWC_DEBUG is not set
5757 +CONFIG_USB_PWC_INPUT_EVDEV=y
5758 +CONFIG_VIDEO_CPIA2=m
5759 +CONFIG_USB_ZR364XX=m
5760 +CONFIG_USB_STKWEBCAM=m
5761 +CONFIG_USB_S2255=m
5762 +CONFIG_VIDEO_USBTV=m
5765 +# Analog TV USB devices
5767 +CONFIG_VIDEO_PVRUSB2=m
5768 +CONFIG_VIDEO_PVRUSB2_SYSFS=y
5769 +CONFIG_VIDEO_PVRUSB2_DVB=y
5770 +# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set
5771 +CONFIG_VIDEO_HDPVR=m
5772 +CONFIG_VIDEO_STK1160_COMMON=m
5773 +CONFIG_VIDEO_STK1160=m
5774 +CONFIG_VIDEO_GO7007=m
5775 +CONFIG_VIDEO_GO7007_USB=m
5776 +CONFIG_VIDEO_GO7007_LOADER=m
5777 +CONFIG_VIDEO_GO7007_USB_S2250_BOARD=m
5780 +# Analog/digital TV USB devices
5782 +CONFIG_VIDEO_AU0828=m
5783 +CONFIG_VIDEO_AU0828_V4L2=y
5784 +CONFIG_VIDEO_AU0828_RC=y
5785 +CONFIG_VIDEO_CX231XX=m
5786 +CONFIG_VIDEO_CX231XX_RC=y
5787 +CONFIG_VIDEO_CX231XX_ALSA=m
5788 +CONFIG_VIDEO_CX231XX_DVB=m
5789 +CONFIG_VIDEO_TM6000=m
5790 +CONFIG_VIDEO_TM6000_ALSA=m
5791 +CONFIG_VIDEO_TM6000_DVB=m
5794 +# Digital TV USB devices
5796 +CONFIG_DVB_USB=m
5797 +# CONFIG_DVB_USB_DEBUG is not set
5798 +CONFIG_DVB_USB_DIB3000MC=m
5799 +CONFIG_DVB_USB_A800=m
5800 +CONFIG_DVB_USB_DIBUSB_MB=m
5801 +# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set
5802 +CONFIG_DVB_USB_DIBUSB_MC=m
5803 +CONFIG_DVB_USB_DIB0700=m
5804 +CONFIG_DVB_USB_UMT_010=m
5805 +CONFIG_DVB_USB_CXUSB=m
5806 +CONFIG_DVB_USB_CXUSB_ANALOG=y
5807 +CONFIG_DVB_USB_M920X=m
5808 +CONFIG_DVB_USB_DIGITV=m
5809 +CONFIG_DVB_USB_VP7045=m
5810 +CONFIG_DVB_USB_VP702X=m
5811 +CONFIG_DVB_USB_GP8PSK=m
5812 +CONFIG_DVB_USB_NOVA_T_USB2=m
5813 +CONFIG_DVB_USB_TTUSB2=m
5814 +CONFIG_DVB_USB_DTT200U=m
5815 +CONFIG_DVB_USB_OPERA1=m
5816 +CONFIG_DVB_USB_AF9005=m
5817 +CONFIG_DVB_USB_AF9005_REMOTE=m
5818 +CONFIG_DVB_USB_PCTV452E=m
5819 +CONFIG_DVB_USB_DW2102=m
5820 +CONFIG_DVB_USB_CINERGY_T2=m
5821 +CONFIG_DVB_USB_DTV5100=m
5822 +CONFIG_DVB_USB_AZ6027=m
5823 +CONFIG_DVB_USB_TECHNISAT_USB2=m
5824 +CONFIG_DVB_USB_V2=m
5825 +CONFIG_DVB_USB_AF9015=m
5826 +CONFIG_DVB_USB_AF9035=m
5827 +CONFIG_DVB_USB_ANYSEE=m
5828 +CONFIG_DVB_USB_AU6610=m
5829 +CONFIG_DVB_USB_AZ6007=m
5830 +CONFIG_DVB_USB_CE6230=m
5831 +CONFIG_DVB_USB_EC168=m
5832 +CONFIG_DVB_USB_GL861=m
5833 +CONFIG_DVB_USB_LME2510=m
5834 +CONFIG_DVB_USB_MXL111SF=m
5835 +CONFIG_DVB_USB_RTL28XXU=m
5836 +CONFIG_DVB_USB_DVBSKY=m
5837 +CONFIG_DVB_USB_ZD1301=m
5838 +CONFIG_DVB_TTUSB_BUDGET=m
5839 +CONFIG_DVB_TTUSB_DEC=m
5840 +CONFIG_SMS_USB_DRV=m
5841 +CONFIG_DVB_B2C2_FLEXCOP_USB=m
5842 +# CONFIG_DVB_B2C2_FLEXCOP_USB_DEBUG is not set
5843 +CONFIG_DVB_AS102=m
5846 +# Webcam, TV (analog/digital) USB devices
5848 +CONFIG_VIDEO_EM28XX=m
5849 +CONFIG_VIDEO_EM28XX_V4L2=m
5850 +CONFIG_VIDEO_EM28XX_ALSA=m
5851 +CONFIG_VIDEO_EM28XX_DVB=m
5852 +CONFIG_VIDEO_EM28XX_RC=m
5855 +# Software defined radio USB devices
5857 +CONFIG_USB_AIRSPY=m
5858 +CONFIG_USB_HACKRF=m
5859 +CONFIG_USB_MSI2500=m
5860 +CONFIG_MEDIA_PCI_SUPPORT=y
5863 +# Media capture support
5865 +CONFIG_VIDEO_MEYE=m
5866 +CONFIG_VIDEO_SOLO6X10=m
5867 +CONFIG_VIDEO_TW5864=m
5868 +CONFIG_VIDEO_TW68=m
5869 +CONFIG_VIDEO_TW686X=m
5872 +# Media capture/analog TV support
5874 +CONFIG_VIDEO_IVTV=m
5875 +# CONFIG_VIDEO_IVTV_DEPRECATED_IOCTLS is not set
5876 +CONFIG_VIDEO_IVTV_ALSA=m
5877 +CONFIG_VIDEO_FB_IVTV=m
5878 +CONFIG_VIDEO_FB_IVTV_FORCE_PAT=y
5879 +CONFIG_VIDEO_HEXIUM_GEMINI=m
5880 +CONFIG_VIDEO_HEXIUM_ORION=m
5881 +CONFIG_VIDEO_MXB=m
5882 +CONFIG_VIDEO_DT3155=m
5885 +# Media capture/analog/hybrid TV support
5887 +CONFIG_VIDEO_CX18=m
5888 +CONFIG_VIDEO_CX18_ALSA=m
5889 +CONFIG_VIDEO_CX23885=m
5890 +CONFIG_MEDIA_ALTERA_CI=m
5891 +CONFIG_VIDEO_CX25821=m
5892 +CONFIG_VIDEO_CX25821_ALSA=m
5893 +CONFIG_VIDEO_CX88=m
5894 +CONFIG_VIDEO_CX88_ALSA=m
5895 +CONFIG_VIDEO_CX88_BLACKBIRD=m
5896 +CONFIG_VIDEO_CX88_DVB=m
5897 +CONFIG_VIDEO_CX88_ENABLE_VP3054=y
5898 +CONFIG_VIDEO_CX88_VP3054=m
5899 +CONFIG_VIDEO_CX88_MPEG=m
5900 +CONFIG_VIDEO_BT848=m
5901 +CONFIG_DVB_BT8XX=m
5902 +CONFIG_VIDEO_SAA7134=m
5903 +CONFIG_VIDEO_SAA7134_ALSA=m
5904 +CONFIG_VIDEO_SAA7134_RC=y
5905 +CONFIG_VIDEO_SAA7134_DVB=m
5906 +CONFIG_VIDEO_SAA7134_GO7007=m
5907 +CONFIG_VIDEO_SAA7164=m
5908 +CONFIG_VIDEO_COBALT=m
5911 +# Media digital TV PCI Adapters
5913 +CONFIG_DVB_AV7110_IR=y
5914 +CONFIG_DVB_AV7110=m
5915 +CONFIG_DVB_AV7110_OSD=y
5916 +CONFIG_DVB_BUDGET_CORE=m
5917 +CONFIG_DVB_BUDGET=m
5918 +CONFIG_DVB_BUDGET_CI=m
5919 +CONFIG_DVB_BUDGET_AV=m
5920 +CONFIG_DVB_BUDGET_PATCH=m
5921 +CONFIG_DVB_B2C2_FLEXCOP_PCI=m
5922 +# CONFIG_DVB_B2C2_FLEXCOP_PCI_DEBUG is not set
5923 +CONFIG_DVB_PLUTO2=m
5924 +CONFIG_DVB_DM1105=m
5925 +CONFIG_DVB_PT1=m
5926 +CONFIG_DVB_PT3=m
5927 +CONFIG_MANTIS_CORE=m
5928 +CONFIG_DVB_MANTIS=m
5929 +CONFIG_DVB_HOPPER=m
5930 +CONFIG_DVB_NGENE=m
5931 +CONFIG_DVB_DDBRIDGE=m
5932 +# CONFIG_DVB_DDBRIDGE_MSIENABLE is not set
5933 +CONFIG_DVB_SMIPCIE=m
5934 +CONFIG_DVB_NETUP_UNIDVB=m
5935 +CONFIG_VIDEO_IPU3_CIO2=m
5936 +CONFIG_CIO2_BRIDGE=y
5937 +# CONFIG_VIDEO_PCI_SKELETON is not set
5938 +CONFIG_RADIO_ADAPTERS=y
5939 +CONFIG_RADIO_TEA575X=m
5940 +CONFIG_RADIO_SI470X=m
5941 +CONFIG_USB_SI470X=m
5942 +CONFIG_I2C_SI470X=m
5943 +CONFIG_RADIO_SI4713=m
5944 +CONFIG_USB_SI4713=m
5945 +CONFIG_PLATFORM_SI4713=m
5946 +CONFIG_I2C_SI4713=m
5947 +CONFIG_RADIO_SI476X=m
5948 +CONFIG_USB_MR800=m
5949 +CONFIG_USB_DSBR=m
5950 +CONFIG_RADIO_MAXIRADIO=m
5951 +CONFIG_RADIO_SHARK=m
5952 +CONFIG_RADIO_SHARK2=m
5953 +CONFIG_USB_KEENE=m
5954 +CONFIG_USB_RAREMONO=m
5955 +CONFIG_USB_MA901=m
5956 +CONFIG_RADIO_TEA5764=m
5957 +CONFIG_RADIO_SAA7706H=m
5958 +CONFIG_RADIO_TEF6862=m
5959 +CONFIG_RADIO_WL1273=m
5960 +CONFIG_RADIO_WL128X=m
5961 +CONFIG_MEDIA_COMMON_OPTIONS=y
5964 +# common driver options
5966 +CONFIG_VIDEO_CX2341X=m
5967 +CONFIG_VIDEO_TVEEPROM=m
5968 +CONFIG_CYPRESS_FIRMWARE=m
5969 +CONFIG_VIDEOBUF2_CORE=m
5970 +CONFIG_VIDEOBUF2_V4L2=m
5971 +CONFIG_VIDEOBUF2_MEMOPS=m
5972 +CONFIG_VIDEOBUF2_DMA_CONTIG=m
5973 +CONFIG_VIDEOBUF2_VMALLOC=m
5974 +CONFIG_VIDEOBUF2_DMA_SG=m
5975 +CONFIG_VIDEOBUF2_DVB=m
5976 +CONFIG_DVB_B2C2_FLEXCOP=m
5977 +CONFIG_VIDEO_SAA7146=m
5978 +CONFIG_VIDEO_SAA7146_VV=m
5979 +CONFIG_SMS_SIANO_MDTV=m
5980 +CONFIG_SMS_SIANO_RC=y
5981 +CONFIG_SMS_SIANO_DEBUGFS=y
5982 +CONFIG_VIDEO_V4L2_TPG=m
5983 +CONFIG_V4L_PLATFORM_DRIVERS=y
5984 +CONFIG_VIDEO_CAFE_CCIC=m
5985 +CONFIG_VIDEO_VIA_CAMERA=m
5986 +CONFIG_VIDEO_CADENCE=y
5987 +CONFIG_VIDEO_CADENCE_CSI2RX=m
5988 +CONFIG_VIDEO_CADENCE_CSI2TX=m
5989 +CONFIG_VIDEO_ASPEED=m
5990 +CONFIG_V4L_MEM2MEM_DRIVERS=y
5991 +CONFIG_VIDEO_MEM2MEM_DEINTERLACE=m
5992 +CONFIG_DVB_PLATFORM_DRIVERS=y
5993 +CONFIG_SDR_PLATFORM_DRIVERS=y
5996 +# MMC/SDIO DVB adapters
5998 +CONFIG_SMS_SDIO_DRV=m
5999 +CONFIG_V4L_TEST_DRIVERS=y
6000 +CONFIG_VIDEO_VIMC=m
6001 +CONFIG_VIDEO_VIVID=m
6002 +CONFIG_VIDEO_VIVID_CEC=y
6003 +CONFIG_VIDEO_VIVID_MAX_DEVS=64
6004 +CONFIG_VIDEO_VIM2M=m
6005 +CONFIG_VIDEO_VICODEC=m
6006 +# CONFIG_DVB_TEST_DRIVERS is not set
6009 +# FireWire (IEEE 1394) Adapters
6011 +CONFIG_DVB_FIREDTV=m
6012 +CONFIG_DVB_FIREDTV_INPUT=y
6013 +# end of Media drivers
6016 +# Media ancillary drivers
6018 +CONFIG_MEDIA_ATTACH=y
6021 +# IR I2C driver auto-selected by 'Autoselect ancillary drivers'
6023 +CONFIG_VIDEO_IR_I2C=m
6026 +# Audio decoders, processors and mixers
6028 +CONFIG_VIDEO_TVAUDIO=m
6029 +CONFIG_VIDEO_TDA7432=m
6030 +CONFIG_VIDEO_TDA9840=m
6031 +CONFIG_VIDEO_TDA1997X=m
6032 +CONFIG_VIDEO_TEA6415C=m
6033 +CONFIG_VIDEO_TEA6420=m
6034 +CONFIG_VIDEO_MSP3400=m
6035 +CONFIG_VIDEO_CS3308=m
6036 +CONFIG_VIDEO_CS5345=m
6037 +CONFIG_VIDEO_CS53L32A=m
6038 +CONFIG_VIDEO_TLV320AIC23B=m
6039 +CONFIG_VIDEO_UDA1342=m
6040 +CONFIG_VIDEO_WM8775=m
6041 +CONFIG_VIDEO_WM8739=m
6042 +CONFIG_VIDEO_VP27SMPX=m
6043 +CONFIG_VIDEO_SONY_BTF_MPX=m
6044 +# end of Audio decoders, processors and mixers
6047 +# RDS decoders
6049 +CONFIG_VIDEO_SAA6588=m
6050 +# end of RDS decoders
6053 +# Video decoders
6055 +CONFIG_VIDEO_ADV7180=m
6056 +CONFIG_VIDEO_ADV7183=m
6057 +CONFIG_VIDEO_ADV7604=m
6058 +CONFIG_VIDEO_ADV7604_CEC=y
6059 +CONFIG_VIDEO_ADV7842=m
6060 +CONFIG_VIDEO_ADV7842_CEC=y
6061 +CONFIG_VIDEO_BT819=m
6062 +CONFIG_VIDEO_BT856=m
6063 +CONFIG_VIDEO_BT866=m
6064 +CONFIG_VIDEO_KS0127=m
6065 +CONFIG_VIDEO_ML86V7667=m
6066 +CONFIG_VIDEO_SAA7110=m
6067 +CONFIG_VIDEO_SAA711X=m
6068 +CONFIG_VIDEO_TC358743=m
6069 +CONFIG_VIDEO_TC358743_CEC=y
6070 +CONFIG_VIDEO_TVP514X=m
6071 +CONFIG_VIDEO_TVP5150=m
6072 +CONFIG_VIDEO_TVP7002=m
6073 +CONFIG_VIDEO_TW2804=m
6074 +CONFIG_VIDEO_TW9903=m
6075 +CONFIG_VIDEO_TW9906=m
6076 +CONFIG_VIDEO_TW9910=m
6077 +CONFIG_VIDEO_VPX3220=m
6080 +# Video and audio decoders
6082 +CONFIG_VIDEO_SAA717X=m
6083 +CONFIG_VIDEO_CX25840=m
6084 +# end of Video decoders
6087 +# Video encoders
6089 +CONFIG_VIDEO_SAA7127=m
6090 +CONFIG_VIDEO_SAA7185=m
6091 +CONFIG_VIDEO_ADV7170=m
6092 +CONFIG_VIDEO_ADV7175=m
6093 +CONFIG_VIDEO_ADV7343=m
6094 +CONFIG_VIDEO_ADV7393=m
6095 +CONFIG_VIDEO_ADV7511=m
6096 +CONFIG_VIDEO_ADV7511_CEC=y
6097 +CONFIG_VIDEO_AD9389B=m
6098 +CONFIG_VIDEO_AK881X=m
6099 +CONFIG_VIDEO_THS8200=m
6100 +# end of Video encoders
6103 +# Video improvement chips
6105 +CONFIG_VIDEO_UPD64031A=m
6106 +CONFIG_VIDEO_UPD64083=m
6107 +# end of Video improvement chips
6110 +# Audio/Video compression chips
6112 +CONFIG_VIDEO_SAA6752HS=m
6113 +# end of Audio/Video compression chips
6116 +# SDR tuner chips
6118 +CONFIG_SDR_MAX2175=m
6119 +# end of SDR tuner chips
6122 +# Miscellaneous helper chips
6124 +CONFIG_VIDEO_THS7303=m
6125 +CONFIG_VIDEO_M52790=m
6126 +CONFIG_VIDEO_I2C=m
6127 +CONFIG_VIDEO_ST_MIPID02=m
6128 +# end of Miscellaneous helper chips
6131 +# Camera sensor devices
6133 +CONFIG_VIDEO_APTINA_PLL=m
6134 +CONFIG_VIDEO_CCS_PLL=m
6135 +CONFIG_VIDEO_HI556=m
6136 +CONFIG_VIDEO_IMX214=m
6137 +CONFIG_VIDEO_IMX219=m
6138 +CONFIG_VIDEO_IMX258=m
6139 +CONFIG_VIDEO_IMX274=m
6140 +CONFIG_VIDEO_IMX290=m
6141 +CONFIG_VIDEO_IMX319=m
6142 +CONFIG_VIDEO_IMX355=m
6143 +CONFIG_VIDEO_OV02A10=m
6144 +CONFIG_VIDEO_OV2640=m
6145 +CONFIG_VIDEO_OV2659=m
6146 +CONFIG_VIDEO_OV2680=m
6147 +CONFIG_VIDEO_OV2685=m
6148 +CONFIG_VIDEO_OV2740=m
6149 +CONFIG_VIDEO_OV5647=m
6150 +CONFIG_VIDEO_OV5648=m
6151 +CONFIG_VIDEO_OV6650=m
6152 +CONFIG_VIDEO_OV5670=m
6153 +CONFIG_VIDEO_OV5675=m
6154 +CONFIG_VIDEO_OV5695=m
6155 +CONFIG_VIDEO_OV7251=m
6156 +CONFIG_VIDEO_OV772X=m
6157 +CONFIG_VIDEO_OV7640=m
6158 +CONFIG_VIDEO_OV7670=m
6159 +CONFIG_VIDEO_OV7740=m
6160 +CONFIG_VIDEO_OV8856=m
6161 +CONFIG_VIDEO_OV8865=m
6162 +CONFIG_VIDEO_OV9640=m
6163 +CONFIG_VIDEO_OV9650=m
6164 +CONFIG_VIDEO_OV9734=m
6165 +CONFIG_VIDEO_OV13858=m
6166 +CONFIG_VIDEO_VS6624=m
6167 +CONFIG_VIDEO_MT9M001=m
6168 +CONFIG_VIDEO_MT9M032=m
6169 +CONFIG_VIDEO_MT9M111=m
6170 +CONFIG_VIDEO_MT9P031=m
6171 +CONFIG_VIDEO_MT9T001=m
6172 +CONFIG_VIDEO_MT9T112=m
6173 +CONFIG_VIDEO_MT9V011=m
6174 +CONFIG_VIDEO_MT9V032=m
6175 +CONFIG_VIDEO_MT9V111=m
6176 +CONFIG_VIDEO_SR030PC30=m
6177 +CONFIG_VIDEO_NOON010PC30=m
6178 +CONFIG_VIDEO_M5MOLS=m
6179 +CONFIG_VIDEO_MAX9271_LIB=m
6180 +CONFIG_VIDEO_RDACM20=m
6181 +CONFIG_VIDEO_RDACM21=m
6182 +CONFIG_VIDEO_RJ54N1=m
6183 +CONFIG_VIDEO_S5K6AA=m
6184 +CONFIG_VIDEO_S5K6A3=m
6185 +CONFIG_VIDEO_S5K4ECGX=m
6186 +CONFIG_VIDEO_S5K5BAF=m
6187 +CONFIG_VIDEO_CCS=m
6188 +CONFIG_VIDEO_ET8EK8=m
6189 +CONFIG_VIDEO_S5C73M3=m
6190 +# end of Camera sensor devices
6193 +# Lens drivers
6195 +CONFIG_VIDEO_AD5820=m
6196 +CONFIG_VIDEO_AK7375=m
6197 +CONFIG_VIDEO_DW9714=m
6198 +CONFIG_VIDEO_DW9768=m
6199 +CONFIG_VIDEO_DW9807_VCM=m
6200 +# end of Lens drivers
6203 +# Flash devices
6205 +CONFIG_VIDEO_ADP1653=m
6206 +CONFIG_VIDEO_LM3560=m
6207 +CONFIG_VIDEO_LM3646=m
6208 +# end of Flash devices
6211 +# SPI helper chips
6213 +CONFIG_VIDEO_GS1662=m
6214 +# end of SPI helper chips
6217 +# Media SPI Adapters
6219 +CONFIG_CXD2880_SPI_DRV=m
6220 +# end of Media SPI Adapters
6222 +CONFIG_MEDIA_TUNER=m
6225 +# Customize TV tuners
6227 +CONFIG_MEDIA_TUNER_SIMPLE=m
6228 +CONFIG_MEDIA_TUNER_TDA18250=m
6229 +CONFIG_MEDIA_TUNER_TDA8290=m
6230 +CONFIG_MEDIA_TUNER_TDA827X=m
6231 +CONFIG_MEDIA_TUNER_TDA18271=m
6232 +CONFIG_MEDIA_TUNER_TDA9887=m
6233 +CONFIG_MEDIA_TUNER_TEA5761=m
6234 +CONFIG_MEDIA_TUNER_TEA5767=m
6235 +CONFIG_MEDIA_TUNER_MSI001=m
6236 +CONFIG_MEDIA_TUNER_MT20XX=m
6237 +CONFIG_MEDIA_TUNER_MT2060=m
6238 +CONFIG_MEDIA_TUNER_MT2063=m
6239 +CONFIG_MEDIA_TUNER_MT2266=m
6240 +CONFIG_MEDIA_TUNER_MT2131=m
6241 +CONFIG_MEDIA_TUNER_QT1010=m
6242 +CONFIG_MEDIA_TUNER_XC2028=m
6243 +CONFIG_MEDIA_TUNER_XC5000=m
6244 +CONFIG_MEDIA_TUNER_XC4000=m
6245 +CONFIG_MEDIA_TUNER_MXL5005S=m
6246 +CONFIG_MEDIA_TUNER_MXL5007T=m
6247 +CONFIG_MEDIA_TUNER_MC44S803=m
6248 +CONFIG_MEDIA_TUNER_MAX2165=m
6249 +CONFIG_MEDIA_TUNER_TDA18218=m
6250 +CONFIG_MEDIA_TUNER_FC0011=m
6251 +CONFIG_MEDIA_TUNER_FC0012=m
6252 +CONFIG_MEDIA_TUNER_FC0013=m
6253 +CONFIG_MEDIA_TUNER_TDA18212=m
6254 +CONFIG_MEDIA_TUNER_E4000=m
6255 +CONFIG_MEDIA_TUNER_FC2580=m
6256 +CONFIG_MEDIA_TUNER_M88RS6000T=m
6257 +CONFIG_MEDIA_TUNER_TUA9001=m
6258 +CONFIG_MEDIA_TUNER_SI2157=m
6259 +CONFIG_MEDIA_TUNER_IT913X=m
6260 +CONFIG_MEDIA_TUNER_R820T=m
6261 +CONFIG_MEDIA_TUNER_MXL301RF=m
6262 +CONFIG_MEDIA_TUNER_QM1D1C0042=m
6263 +CONFIG_MEDIA_TUNER_QM1D1B0004=m
6264 +# end of Customize TV tuners
6267 +# Customise DVB Frontends
6271 +# Multistandard (satellite) frontends
6273 +CONFIG_DVB_STB0899=m
6274 +CONFIG_DVB_STB6100=m
6275 +CONFIG_DVB_STV090x=m
6276 +CONFIG_DVB_STV0910=m
6277 +CONFIG_DVB_STV6110x=m
6278 +CONFIG_DVB_STV6111=m
6279 +CONFIG_DVB_MXL5XX=m
6280 +CONFIG_DVB_M88DS3103=m
6283 +# Multistandard (cable + terrestrial) frontends
6285 +CONFIG_DVB_DRXK=m
6286 +CONFIG_DVB_TDA18271C2DD=m
6287 +CONFIG_DVB_SI2165=m
6288 +CONFIG_DVB_MN88472=m
6289 +CONFIG_DVB_MN88473=m
6292 +# DVB-S (satellite) frontends
6294 +CONFIG_DVB_CX24110=m
6295 +CONFIG_DVB_CX24123=m
6296 +CONFIG_DVB_MT312=m
6297 +CONFIG_DVB_ZL10036=m
6298 +CONFIG_DVB_ZL10039=m
6299 +CONFIG_DVB_S5H1420=m
6300 +CONFIG_DVB_STV0288=m
6301 +CONFIG_DVB_STB6000=m
6302 +CONFIG_DVB_STV0299=m
6303 +CONFIG_DVB_STV6110=m
6304 +CONFIG_DVB_STV0900=m
6305 +CONFIG_DVB_TDA8083=m
6306 +CONFIG_DVB_TDA10086=m
6307 +CONFIG_DVB_TDA8261=m
6308 +CONFIG_DVB_VES1X93=m
6309 +CONFIG_DVB_TUNER_ITD1000=m
6310 +CONFIG_DVB_TUNER_CX24113=m
6311 +CONFIG_DVB_TDA826X=m
6312 +CONFIG_DVB_TUA6100=m
6313 +CONFIG_DVB_CX24116=m
6314 +CONFIG_DVB_CX24117=m
6315 +CONFIG_DVB_CX24120=m
6316 +CONFIG_DVB_SI21XX=m
6317 +CONFIG_DVB_TS2020=m
6318 +CONFIG_DVB_DS3000=m
6319 +CONFIG_DVB_MB86A16=m
6320 +CONFIG_DVB_TDA10071=m
6323 +# DVB-T (terrestrial) frontends
6325 +CONFIG_DVB_SP8870=m
6326 +CONFIG_DVB_SP887X=m
6327 +CONFIG_DVB_CX22700=m
6328 +CONFIG_DVB_CX22702=m
6329 +CONFIG_DVB_S5H1432=m
6330 +CONFIG_DVB_DRXD=m
6331 +CONFIG_DVB_L64781=m
6332 +CONFIG_DVB_TDA1004X=m
6333 +CONFIG_DVB_NXT6000=m
6334 +CONFIG_DVB_MT352=m
6335 +CONFIG_DVB_ZL10353=m
6336 +CONFIG_DVB_DIB3000MB=m
6337 +CONFIG_DVB_DIB3000MC=m
6338 +CONFIG_DVB_DIB7000M=m
6339 +CONFIG_DVB_DIB7000P=m
6340 +CONFIG_DVB_DIB9000=m
6341 +CONFIG_DVB_TDA10048=m
6342 +CONFIG_DVB_AF9013=m
6343 +CONFIG_DVB_EC100=m
6344 +CONFIG_DVB_STV0367=m
6345 +CONFIG_DVB_CXD2820R=m
6346 +CONFIG_DVB_CXD2841ER=m
6347 +CONFIG_DVB_RTL2830=m
6348 +CONFIG_DVB_RTL2832=m
6349 +CONFIG_DVB_RTL2832_SDR=m
6350 +CONFIG_DVB_SI2168=m
6351 +CONFIG_DVB_AS102_FE=m
6352 +CONFIG_DVB_ZD1301_DEMOD=m
6353 +CONFIG_DVB_GP8PSK_FE=m
6354 +CONFIG_DVB_CXD2880=m
6357 +# DVB-C (cable) frontends
6359 +CONFIG_DVB_VES1820=m
6360 +CONFIG_DVB_TDA10021=m
6361 +CONFIG_DVB_TDA10023=m
6362 +CONFIG_DVB_STV0297=m
6365 +# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
6367 +CONFIG_DVB_NXT200X=m
6368 +CONFIG_DVB_OR51211=m
6369 +CONFIG_DVB_OR51132=m
6370 +CONFIG_DVB_BCM3510=m
6371 +CONFIG_DVB_LGDT330X=m
6372 +CONFIG_DVB_LGDT3305=m
6373 +CONFIG_DVB_LGDT3306A=m
6374 +CONFIG_DVB_LG2160=m
6375 +CONFIG_DVB_S5H1409=m
6376 +CONFIG_DVB_AU8522=m
6377 +CONFIG_DVB_AU8522_DTV=m
6378 +CONFIG_DVB_AU8522_V4L=m
6379 +CONFIG_DVB_S5H1411=m
6380 +CONFIG_DVB_MXL692=m
6383 +# ISDB-T (terrestrial) frontends
6385 +CONFIG_DVB_S921=m
6386 +CONFIG_DVB_DIB8000=m
6387 +CONFIG_DVB_MB86A20S=m
6390 +# ISDB-S (satellite) & ISDB-T (terrestrial) frontends
6392 +CONFIG_DVB_TC90522=m
6393 +CONFIG_DVB_MN88443X=m
6396 +# Digital terrestrial only tuners/PLL
6398 +CONFIG_DVB_PLL=m
6399 +CONFIG_DVB_TUNER_DIB0070=m
6400 +CONFIG_DVB_TUNER_DIB0090=m
6403 +# SEC control devices for DVB-S
6405 +CONFIG_DVB_DRX39XYJ=m
6406 +CONFIG_DVB_LNBH25=m
6407 +CONFIG_DVB_LNBH29=m
6408 +CONFIG_DVB_LNBP21=m
6409 +CONFIG_DVB_LNBP22=m
6410 +CONFIG_DVB_ISL6405=m
6411 +CONFIG_DVB_ISL6421=m
6412 +CONFIG_DVB_ISL6423=m
6413 +CONFIG_DVB_A8293=m
6414 +CONFIG_DVB_LGS8GL5=m
6415 +CONFIG_DVB_LGS8GXX=m
6416 +CONFIG_DVB_ATBM8830=m
6417 +CONFIG_DVB_TDA665x=m
6418 +CONFIG_DVB_IX2505V=m
6419 +CONFIG_DVB_M88RS2000=m
6420 +CONFIG_DVB_AF9033=m
6421 +CONFIG_DVB_HORUS3A=m
6422 +CONFIG_DVB_ASCOT2E=m
6423 +CONFIG_DVB_HELENE=m
6426 +# Common Interface (EN50221) controller drivers
6428 +CONFIG_DVB_CXD2099=m
6429 +CONFIG_DVB_SP2=m
6430 +# end of Customise DVB Frontends
6433 +# Tools to develop new frontends
6435 +CONFIG_DVB_DUMMY_FE=m
6436 +# end of Media ancillary drivers
6439 +# Graphics support
6441 +CONFIG_AGP=y
6442 +CONFIG_AGP_AMD64=y
6443 +CONFIG_AGP_INTEL=y
6444 +CONFIG_AGP_SIS=m
6445 +CONFIG_AGP_VIA=y
6446 +CONFIG_INTEL_GTT=y
6447 +CONFIG_VGA_ARB=y
6448 +CONFIG_VGA_ARB_MAX_GPUS=16
6449 +CONFIG_VGA_SWITCHEROO=y
6450 +CONFIG_DRM=m
6451 +CONFIG_DRM_MIPI_DBI=m
6452 +CONFIG_DRM_MIPI_DSI=y
6453 +CONFIG_DRM_DP_AUX_CHARDEV=y
6454 +# CONFIG_DRM_DEBUG_SELFTEST is not set
6455 +CONFIG_DRM_KMS_HELPER=m
6456 +CONFIG_DRM_KMS_FB_HELPER=y
6457 +# CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS is not set
6458 +CONFIG_DRM_FBDEV_EMULATION=y
6459 +CONFIG_DRM_FBDEV_OVERALLOC=100
6460 +# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set
6461 +CONFIG_DRM_LOAD_EDID_FIRMWARE=y
6462 +CONFIG_DRM_DP_CEC=y
6463 +CONFIG_DRM_TTM=m
6464 +CONFIG_DRM_VRAM_HELPER=m
6465 +CONFIG_DRM_TTM_HELPER=m
6466 +CONFIG_DRM_GEM_CMA_HELPER=y
6467 +CONFIG_DRM_KMS_CMA_HELPER=y
6468 +CONFIG_DRM_GEM_SHMEM_HELPER=y
6469 +CONFIG_DRM_SCHED=m
6472 +# I2C encoder or helper chips
6474 +CONFIG_DRM_I2C_CH7006=m
6475 +CONFIG_DRM_I2C_SIL164=m
6476 +CONFIG_DRM_I2C_NXP_TDA998X=m
6477 +CONFIG_DRM_I2C_NXP_TDA9950=m
6478 +# end of I2C encoder or helper chips
6481 +# ARM devices
6483 +# end of ARM devices
6485 +CONFIG_DRM_RADEON=m
6486 +# CONFIG_DRM_RADEON_USERPTR is not set
6487 +CONFIG_DRM_AMDGPU=m
6488 +CONFIG_DRM_AMDGPU_SI=y
6489 +CONFIG_DRM_AMDGPU_CIK=y
6490 +CONFIG_DRM_AMDGPU_USERPTR=y
6491 +# CONFIG_DRM_AMDGPU_GART_DEBUGFS is not set
6494 +# ACP (Audio CoProcessor) Configuration
6496 +CONFIG_DRM_AMD_ACP=y
6497 +# end of ACP (Audio CoProcessor) Configuration
6500 +# Display Engine Configuration
6502 +CONFIG_DRM_AMD_DC=y
6503 +CONFIG_DRM_AMD_DC_DCN=y
6504 +CONFIG_DRM_AMD_DC_HDCP=y
6505 +CONFIG_DRM_AMD_DC_SI=y
6506 +# CONFIG_DEBUG_KERNEL_DC is not set
6507 +# end of Display Engine Configuration
6509 +CONFIG_HSA_AMD=y
6510 +CONFIG_DRM_NOUVEAU=m
6511 +# CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT is not set
6512 +CONFIG_NOUVEAU_DEBUG=5
6513 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3
6514 +# CONFIG_NOUVEAU_DEBUG_MMU is not set
6515 +# CONFIG_NOUVEAU_DEBUG_PUSH is not set
6516 +CONFIG_DRM_NOUVEAU_BACKLIGHT=y
6517 +# CONFIG_DRM_NOUVEAU_SVM is not set
6518 +CONFIG_DRM_I915=m
6519 +CONFIG_DRM_I915_FORCE_PROBE=""
6520 +CONFIG_DRM_I915_CAPTURE_ERROR=y
6521 +CONFIG_DRM_I915_COMPRESS_ERROR=y
6522 +CONFIG_DRM_I915_USERPTR=y
6523 +CONFIG_DRM_I915_GVT=y
6524 +CONFIG_DRM_I915_GVT_KVMGT=m
6527 +# drm/i915 Debugging
6529 +# CONFIG_DRM_I915_WERROR is not set
6530 +# CONFIG_DRM_I915_DEBUG is not set
6531 +# CONFIG_DRM_I915_DEBUG_MMIO is not set
6532 +# CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS is not set
6533 +# CONFIG_DRM_I915_SW_FENCE_CHECK_DAG is not set
6534 +# CONFIG_DRM_I915_DEBUG_GUC is not set
6535 +# CONFIG_DRM_I915_SELFTEST is not set
6536 +# CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS is not set
6537 +# CONFIG_DRM_I915_DEBUG_VBLANK_EVADE is not set
6538 +# CONFIG_DRM_I915_DEBUG_RUNTIME_PM is not set
6539 +# end of drm/i915 Debugging
6542 +# drm/i915 Profile Guided Optimisation
6544 +CONFIG_DRM_I915_FENCE_TIMEOUT=10000
6545 +CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND=250
6546 +CONFIG_DRM_I915_HEARTBEAT_INTERVAL=2500
6547 +CONFIG_DRM_I915_PREEMPT_TIMEOUT=640
6548 +CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT=8000
6549 +CONFIG_DRM_I915_STOP_TIMEOUT=100
6550 +CONFIG_DRM_I915_TIMESLICE_DURATION=1
6551 +# end of drm/i915 Profile Guided Optimisation
6553 +CONFIG_DRM_VGEM=m
6554 +CONFIG_DRM_VKMS=m
6555 +CONFIG_DRM_VMWGFX=m
6556 +CONFIG_DRM_VMWGFX_FBCON=y
6557 +CONFIG_DRM_GMA500=m
6558 +CONFIG_DRM_GMA600=y
6559 +CONFIG_DRM_UDL=m
6560 +CONFIG_DRM_AST=m
6561 +CONFIG_DRM_MGAG200=m
6562 +CONFIG_DRM_QXL=m
6563 +CONFIG_DRM_BOCHS=m
6564 +CONFIG_DRM_VIRTIO_GPU=m
6565 +CONFIG_DRM_PANEL=y
6568 +# Display Panels
6570 +CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN=m
6571 +# end of Display Panels
6573 +CONFIG_DRM_BRIDGE=y
6574 +CONFIG_DRM_PANEL_BRIDGE=y
6577 +# Display Interface Bridges
6579 +CONFIG_DRM_ANALOGIX_ANX78XX=m
6580 +CONFIG_DRM_ANALOGIX_DP=m
6581 +# end of Display Interface Bridges
6583 +# CONFIG_DRM_ETNAVIV is not set
6584 +CONFIG_DRM_CIRRUS_QEMU=m
6585 +CONFIG_DRM_GM12U320=m
6586 +CONFIG_TINYDRM_HX8357D=m
6587 +CONFIG_TINYDRM_ILI9225=m
6588 +CONFIG_TINYDRM_ILI9341=m
6589 +CONFIG_TINYDRM_ILI9486=m
6590 +CONFIG_TINYDRM_MI0283QT=m
6591 +CONFIG_TINYDRM_REPAPER=m
6592 +CONFIG_TINYDRM_ST7586=m
6593 +CONFIG_TINYDRM_ST7735R=m
6594 +CONFIG_DRM_XEN=y
6595 +CONFIG_DRM_XEN_FRONTEND=m
6596 +CONFIG_DRM_VBOXVIDEO=m
6597 +# CONFIG_DRM_LEGACY is not set
6598 +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y
6601 +# Frame buffer Devices
6603 +CONFIG_FB_CMDLINE=y
6604 +CONFIG_FB_NOTIFY=y
6605 +CONFIG_FB=y
6606 +CONFIG_FIRMWARE_EDID=y
6607 +CONFIG_FB_DDC=m
6608 +CONFIG_FB_BOOT_VESA_SUPPORT=y
6609 +CONFIG_FB_CFB_FILLRECT=y
6610 +CONFIG_FB_CFB_COPYAREA=y
6611 +CONFIG_FB_CFB_IMAGEBLIT=y
6612 +CONFIG_FB_SYS_FILLRECT=m
6613 +CONFIG_FB_SYS_COPYAREA=m
6614 +CONFIG_FB_SYS_IMAGEBLIT=m
6615 +# CONFIG_FB_FOREIGN_ENDIAN is not set
6616 +CONFIG_FB_SYS_FOPS=m
6617 +CONFIG_FB_DEFERRED_IO=y
6618 +CONFIG_FB_HECUBA=m
6619 +CONFIG_FB_SVGALIB=m
6620 +CONFIG_FB_BACKLIGHT=m
6621 +CONFIG_FB_MODE_HELPERS=y
6622 +CONFIG_FB_TILEBLITTING=y
6625 +# Frame buffer hardware drivers
6627 +CONFIG_FB_CIRRUS=m
6628 +CONFIG_FB_PM2=m
6629 +CONFIG_FB_PM2_FIFO_DISCONNECT=y
6630 +CONFIG_FB_CYBER2000=m
6631 +CONFIG_FB_CYBER2000_DDC=y
6632 +CONFIG_FB_ARC=m
6633 +CONFIG_FB_ASILIANT=y
6634 +CONFIG_FB_IMSTT=y
6635 +CONFIG_FB_VGA16=m
6636 +CONFIG_FB_UVESA=m
6637 +CONFIG_FB_VESA=y
6638 +CONFIG_FB_EFI=y
6639 +CONFIG_FB_N411=m
6640 +CONFIG_FB_HGA=m
6641 +CONFIG_FB_OPENCORES=m
6642 +CONFIG_FB_S1D13XXX=m
6643 +CONFIG_FB_NVIDIA=m
6644 +CONFIG_FB_NVIDIA_I2C=y
6645 +# CONFIG_FB_NVIDIA_DEBUG is not set
6646 +CONFIG_FB_NVIDIA_BACKLIGHT=y
6647 +CONFIG_FB_RIVA=m
6648 +CONFIG_FB_RIVA_I2C=y
6649 +# CONFIG_FB_RIVA_DEBUG is not set
6650 +CONFIG_FB_RIVA_BACKLIGHT=y
6651 +CONFIG_FB_I740=m
6652 +CONFIG_FB_LE80578=m
6653 +CONFIG_FB_CARILLO_RANCH=m
6654 +CONFIG_FB_INTEL=m
6655 +# CONFIG_FB_INTEL_DEBUG is not set
6656 +CONFIG_FB_INTEL_I2C=y
6657 +CONFIG_FB_MATROX=m
6658 +CONFIG_FB_MATROX_MILLENIUM=y
6659 +CONFIG_FB_MATROX_MYSTIQUE=y
6660 +CONFIG_FB_MATROX_G=y
6661 +CONFIG_FB_MATROX_I2C=m
6662 +CONFIG_FB_MATROX_MAVEN=m
6663 +CONFIG_FB_RADEON=m
6664 +CONFIG_FB_RADEON_I2C=y
6665 +CONFIG_FB_RADEON_BACKLIGHT=y
6666 +# CONFIG_FB_RADEON_DEBUG is not set
6667 +CONFIG_FB_ATY128=m
6668 +CONFIG_FB_ATY128_BACKLIGHT=y
6669 +CONFIG_FB_ATY=m
6670 +CONFIG_FB_ATY_CT=y
6671 +# CONFIG_FB_ATY_GENERIC_LCD is not set
6672 +CONFIG_FB_ATY_GX=y
6673 +CONFIG_FB_ATY_BACKLIGHT=y
6674 +CONFIG_FB_S3=m
6675 +CONFIG_FB_S3_DDC=y
6676 +CONFIG_FB_SAVAGE=m
6677 +CONFIG_FB_SAVAGE_I2C=y
6678 +# CONFIG_FB_SAVAGE_ACCEL is not set
6679 +CONFIG_FB_SIS=m
6680 +CONFIG_FB_SIS_300=y
6681 +CONFIG_FB_SIS_315=y
6682 +CONFIG_FB_VIA=m
6683 +# CONFIG_FB_VIA_DIRECT_PROCFS is not set
6684 +CONFIG_FB_VIA_X_COMPATIBILITY=y
6685 +CONFIG_FB_NEOMAGIC=m
6686 +CONFIG_FB_KYRO=m
6687 +CONFIG_FB_3DFX=m
6688 +# CONFIG_FB_3DFX_ACCEL is not set
6689 +# CONFIG_FB_3DFX_I2C is not set
6690 +CONFIG_FB_VOODOO1=m
6691 +CONFIG_FB_VT8623=m
6692 +CONFIG_FB_TRIDENT=m
6693 +CONFIG_FB_ARK=m
6694 +CONFIG_FB_PM3=m
6695 +CONFIG_FB_CARMINE=m
6696 +CONFIG_FB_CARMINE_DRAM_EVAL=y
6697 +# CONFIG_CARMINE_DRAM_CUSTOM is not set
6698 +CONFIG_FB_SM501=m
6699 +CONFIG_FB_SMSCUFX=m
6700 +CONFIG_FB_UDL=m
6701 +# CONFIG_FB_IBM_GXT4500 is not set
6702 +# CONFIG_FB_VIRTUAL is not set
6703 +CONFIG_XEN_FBDEV_FRONTEND=m
6704 +CONFIG_FB_METRONOME=m
6705 +CONFIG_FB_MB862XX=m
6706 +CONFIG_FB_MB862XX_PCI_GDC=y
6707 +CONFIG_FB_MB862XX_I2C=y
6708 +CONFIG_FB_HYPERV=m
6709 +CONFIG_FB_SIMPLE=y
6710 +CONFIG_FB_SM712=m
6711 +# end of Frame buffer Devices
6714 +# Backlight & LCD device support
6716 +CONFIG_LCD_CLASS_DEVICE=m
6717 +CONFIG_LCD_L4F00242T03=m
6718 +CONFIG_LCD_LMS283GF05=m
6719 +CONFIG_LCD_LTV350QV=m
6720 +CONFIG_LCD_ILI922X=m
6721 +CONFIG_LCD_ILI9320=m
6722 +CONFIG_LCD_TDO24M=m
6723 +CONFIG_LCD_VGG2432A4=m
6724 +CONFIG_LCD_PLATFORM=m
6725 +CONFIG_LCD_AMS369FG06=m
6726 +CONFIG_LCD_LMS501KF03=m
6727 +CONFIG_LCD_HX8357=m
6728 +CONFIG_LCD_OTM3225A=m
6729 +CONFIG_BACKLIGHT_CLASS_DEVICE=y
6730 +CONFIG_BACKLIGHT_KTD253=m
6731 +CONFIG_BACKLIGHT_LM3533=m
6732 +CONFIG_BACKLIGHT_CARILLO_RANCH=m
6733 +CONFIG_BACKLIGHT_PWM=m
6734 +CONFIG_BACKLIGHT_DA903X=m
6735 +CONFIG_BACKLIGHT_DA9052=m
6736 +CONFIG_BACKLIGHT_MAX8925=m
6737 +CONFIG_BACKLIGHT_APPLE=m
6738 +CONFIG_BACKLIGHT_QCOM_WLED=m
6739 +CONFIG_BACKLIGHT_SAHARA=m
6740 +CONFIG_BACKLIGHT_WM831X=m
6741 +CONFIG_BACKLIGHT_ADP5520=m
6742 +CONFIG_BACKLIGHT_ADP8860=m
6743 +CONFIG_BACKLIGHT_ADP8870=m
6744 +CONFIG_BACKLIGHT_88PM860X=m
6745 +CONFIG_BACKLIGHT_PCF50633=m
6746 +CONFIG_BACKLIGHT_AAT2870=m
6747 +CONFIG_BACKLIGHT_LM3630A=m
6748 +CONFIG_BACKLIGHT_LM3639=m
6749 +CONFIG_BACKLIGHT_LP855X=m
6750 +CONFIG_BACKLIGHT_LP8788=m
6751 +CONFIG_BACKLIGHT_PANDORA=m
6752 +CONFIG_BACKLIGHT_SKY81452=m
6753 +CONFIG_BACKLIGHT_AS3711=m
6754 +CONFIG_BACKLIGHT_GPIO=m
6755 +CONFIG_BACKLIGHT_LV5207LP=m
6756 +CONFIG_BACKLIGHT_BD6107=m
6757 +CONFIG_BACKLIGHT_ARCXCNN=m
6758 +CONFIG_BACKLIGHT_RAVE_SP=m
6759 +# end of Backlight & LCD device support
6761 +CONFIG_VGASTATE=m
6762 +CONFIG_VIDEOMODE_HELPERS=y
6763 +CONFIG_HDMI=y
6766 +# Console display driver support
6768 +CONFIG_VGA_CONSOLE=y
6769 +CONFIG_DUMMY_CONSOLE=y
6770 +CONFIG_DUMMY_CONSOLE_COLUMNS=80
6771 +CONFIG_DUMMY_CONSOLE_ROWS=25
6772 +CONFIG_FRAMEBUFFER_CONSOLE=y
6773 +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
6774 +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
6775 +CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER=y
6776 +# end of Console display driver support
6778 +# CONFIG_LOGO is not set
6779 +# end of Graphics support
6781 +CONFIG_SOUND=m
6782 +CONFIG_SOUND_OSS_CORE=y
6783 +# CONFIG_SOUND_OSS_CORE_PRECLAIM is not set
6784 +CONFIG_SND=m
6785 +CONFIG_SND_TIMER=m
6786 +CONFIG_SND_PCM=m
6787 +CONFIG_SND_PCM_ELD=y
6788 +CONFIG_SND_PCM_IEC958=y
6789 +CONFIG_SND_DMAENGINE_PCM=m
6790 +CONFIG_SND_HWDEP=m
6791 +CONFIG_SND_SEQ_DEVICE=m
6792 +CONFIG_SND_RAWMIDI=m
6793 +CONFIG_SND_COMPRESS_OFFLOAD=m
6794 +CONFIG_SND_JACK=y
6795 +CONFIG_SND_JACK_INPUT_DEV=y
6796 +CONFIG_SND_OSSEMUL=y
6797 +CONFIG_SND_MIXER_OSS=m
6798 +# CONFIG_SND_PCM_OSS is not set
6799 +CONFIG_SND_PCM_TIMER=y
6800 +CONFIG_SND_HRTIMER=m
6801 +CONFIG_SND_DYNAMIC_MINORS=y
6802 +CONFIG_SND_MAX_CARDS=32
6803 +CONFIG_SND_SUPPORT_OLD_API=y
6804 +CONFIG_SND_PROC_FS=y
6805 +CONFIG_SND_VERBOSE_PROCFS=y
6806 +# CONFIG_SND_VERBOSE_PRINTK is not set
6807 +# CONFIG_SND_DEBUG is not set
6808 +CONFIG_SND_VMASTER=y
6809 +CONFIG_SND_DMA_SGBUF=y
6810 +CONFIG_SND_SEQUENCER=m
6811 +CONFIG_SND_SEQ_DUMMY=m
6812 +# CONFIG_SND_SEQUENCER_OSS is not set
6813 +CONFIG_SND_SEQ_HRTIMER_DEFAULT=y
6814 +CONFIG_SND_SEQ_MIDI_EVENT=m
6815 +CONFIG_SND_SEQ_MIDI=m
6816 +CONFIG_SND_SEQ_MIDI_EMUL=m
6817 +CONFIG_SND_SEQ_VIRMIDI=m
6818 +CONFIG_SND_MPU401_UART=m
6819 +CONFIG_SND_OPL3_LIB=m
6820 +CONFIG_SND_OPL3_LIB_SEQ=m
6821 +CONFIG_SND_VX_LIB=m
6822 +CONFIG_SND_AC97_CODEC=m
6823 +CONFIG_SND_DRIVERS=y
6824 +CONFIG_SND_PCSP=m
6825 +CONFIG_SND_DUMMY=m
6826 +CONFIG_SND_ALOOP=m
6827 +CONFIG_SND_VIRMIDI=m
6828 +CONFIG_SND_MTPAV=m
6829 +CONFIG_SND_MTS64=m
6830 +CONFIG_SND_SERIAL_U16550=m
6831 +CONFIG_SND_MPU401=m
6832 +CONFIG_SND_PORTMAN2X4=m
6833 +CONFIG_SND_AC97_POWER_SAVE=y
6834 +CONFIG_SND_AC97_POWER_SAVE_DEFAULT=0
6835 +CONFIG_SND_SB_COMMON=m
6836 +CONFIG_SND_PCI=y
6837 +CONFIG_SND_AD1889=m
6838 +CONFIG_SND_ALS300=m
6839 +CONFIG_SND_ALS4000=m
6840 +CONFIG_SND_ALI5451=m
6841 +CONFIG_SND_ASIHPI=m
6842 +CONFIG_SND_ATIIXP=m
6843 +CONFIG_SND_ATIIXP_MODEM=m
6844 +CONFIG_SND_AU8810=m
6845 +CONFIG_SND_AU8820=m
6846 +CONFIG_SND_AU8830=m
6847 +CONFIG_SND_AW2=m
6848 +CONFIG_SND_AZT3328=m
6849 +CONFIG_SND_BT87X=m
6850 +# CONFIG_SND_BT87X_OVERCLOCK is not set
6851 +CONFIG_SND_CA0106=m
6852 +CONFIG_SND_CMIPCI=m
6853 +CONFIG_SND_OXYGEN_LIB=m
6854 +CONFIG_SND_OXYGEN=m
6855 +CONFIG_SND_CS4281=m
6856 +CONFIG_SND_CS46XX=m
6857 +CONFIG_SND_CS46XX_NEW_DSP=y
6858 +CONFIG_SND_CTXFI=m
6859 +CONFIG_SND_DARLA20=m
6860 +CONFIG_SND_GINA20=m
6861 +CONFIG_SND_LAYLA20=m
6862 +CONFIG_SND_DARLA24=m
6863 +CONFIG_SND_GINA24=m
6864 +CONFIG_SND_LAYLA24=m
6865 +CONFIG_SND_MONA=m
6866 +CONFIG_SND_MIA=m
6867 +CONFIG_SND_ECHO3G=m
6868 +CONFIG_SND_INDIGO=m
6869 +CONFIG_SND_INDIGOIO=m
6870 +CONFIG_SND_INDIGODJ=m
6871 +CONFIG_SND_INDIGOIOX=m
6872 +CONFIG_SND_INDIGODJX=m
6873 +CONFIG_SND_EMU10K1=m
6874 +CONFIG_SND_EMU10K1_SEQ=m
6875 +CONFIG_SND_EMU10K1X=m
6876 +CONFIG_SND_ENS1370=m
6877 +CONFIG_SND_ENS1371=m
6878 +CONFIG_SND_ES1938=m
6879 +CONFIG_SND_ES1968=m
6880 +CONFIG_SND_ES1968_INPUT=y
6881 +CONFIG_SND_ES1968_RADIO=y
6882 +CONFIG_SND_FM801=m
6883 +CONFIG_SND_FM801_TEA575X_BOOL=y
6884 +CONFIG_SND_HDSP=m
6885 +CONFIG_SND_HDSPM=m
6886 +CONFIG_SND_ICE1712=m
6887 +CONFIG_SND_ICE1724=m
6888 +CONFIG_SND_INTEL8X0=m
6889 +CONFIG_SND_INTEL8X0M=m
6890 +CONFIG_SND_KORG1212=m
6891 +CONFIG_SND_LOLA=m
6892 +CONFIG_SND_LX6464ES=m
6893 +CONFIG_SND_MAESTRO3=m
6894 +CONFIG_SND_MAESTRO3_INPUT=y
6895 +CONFIG_SND_MIXART=m
6896 +CONFIG_SND_NM256=m
6897 +CONFIG_SND_PCXHR=m
6898 +CONFIG_SND_RIPTIDE=m
6899 +CONFIG_SND_RME32=m
6900 +CONFIG_SND_RME96=m
6901 +CONFIG_SND_RME9652=m
6902 +CONFIG_SND_SONICVIBES=m
6903 +CONFIG_SND_TRIDENT=m
6904 +CONFIG_SND_VIA82XX=m
6905 +CONFIG_SND_VIA82XX_MODEM=m
6906 +CONFIG_SND_VIRTUOSO=m
6907 +CONFIG_SND_VX222=m
6908 +CONFIG_SND_YMFPCI=m
6911 +# HD-Audio
6913 +CONFIG_SND_HDA=m
6914 +CONFIG_SND_HDA_GENERIC_LEDS=y
6915 +CONFIG_SND_HDA_INTEL=m
6916 +CONFIG_SND_HDA_HWDEP=y
6917 +CONFIG_SND_HDA_RECONFIG=y
6918 +CONFIG_SND_HDA_INPUT_BEEP=y
6919 +CONFIG_SND_HDA_INPUT_BEEP_MODE=0
6920 +CONFIG_SND_HDA_PATCH_LOADER=y
6921 +CONFIG_SND_HDA_CODEC_REALTEK=m
6922 +CONFIG_SND_HDA_CODEC_ANALOG=m
6923 +CONFIG_SND_HDA_CODEC_SIGMATEL=m
6924 +CONFIG_SND_HDA_CODEC_VIA=m
6925 +CONFIG_SND_HDA_CODEC_HDMI=m
6926 +CONFIG_SND_HDA_CODEC_CIRRUS=m
6927 +CONFIG_SND_HDA_CODEC_CONEXANT=m
6928 +CONFIG_SND_HDA_CODEC_CA0110=m
6929 +CONFIG_SND_HDA_CODEC_CA0132=m
6930 +CONFIG_SND_HDA_CODEC_CA0132_DSP=y
6931 +CONFIG_SND_HDA_CODEC_CMEDIA=m
6932 +CONFIG_SND_HDA_CODEC_SI3054=m
6933 +CONFIG_SND_HDA_GENERIC=m
6934 +CONFIG_SND_HDA_POWER_SAVE_DEFAULT=1
6935 +# CONFIG_SND_HDA_INTEL_HDMI_SILENT_STREAM is not set
6936 +# end of HD-Audio
6938 +CONFIG_SND_HDA_CORE=m
6939 +CONFIG_SND_HDA_DSP_LOADER=y
6940 +CONFIG_SND_HDA_COMPONENT=y
6941 +CONFIG_SND_HDA_I915=y
6942 +CONFIG_SND_HDA_EXT_CORE=m
6943 +CONFIG_SND_HDA_PREALLOC_SIZE=0
6944 +CONFIG_SND_INTEL_NHLT=y
6945 +CONFIG_SND_INTEL_DSP_CONFIG=m
6946 +CONFIG_SND_INTEL_SOUNDWIRE_ACPI=m
6947 +CONFIG_SND_INTEL_BYT_PREFER_SOF=y
6948 +CONFIG_SND_SPI=y
6949 +CONFIG_SND_USB=y
6950 +CONFIG_SND_USB_AUDIO=m
6951 +CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER=y
6952 +CONFIG_SND_USB_UA101=m
6953 +CONFIG_SND_USB_USX2Y=m
6954 +CONFIG_SND_USB_CAIAQ=m
6955 +CONFIG_SND_USB_CAIAQ_INPUT=y
6956 +CONFIG_SND_USB_US122L=m
6957 +CONFIG_SND_USB_6FIRE=m
6958 +CONFIG_SND_USB_HIFACE=m
6959 +CONFIG_SND_BCD2000=m
6960 +CONFIG_SND_USB_LINE6=m
6961 +CONFIG_SND_USB_POD=m
6962 +CONFIG_SND_USB_PODHD=m
6963 +CONFIG_SND_USB_TONEPORT=m
6964 +CONFIG_SND_USB_VARIAX=m
6965 +CONFIG_SND_FIREWIRE=y
6966 +CONFIG_SND_FIREWIRE_LIB=m
6967 +CONFIG_SND_DICE=m
6968 +CONFIG_SND_OXFW=m
6969 +CONFIG_SND_ISIGHT=m
6970 +CONFIG_SND_FIREWORKS=m
6971 +CONFIG_SND_BEBOB=m
6972 +CONFIG_SND_FIREWIRE_DIGI00X=m
6973 +CONFIG_SND_FIREWIRE_TASCAM=m
6974 +CONFIG_SND_FIREWIRE_MOTU=m
6975 +CONFIG_SND_FIREFACE=m
6976 +CONFIG_SND_PCMCIA=y
6977 +CONFIG_SND_VXPOCKET=m
6978 +CONFIG_SND_PDAUDIOCF=m
6979 +CONFIG_SND_SOC=m
6980 +CONFIG_SND_SOC_AC97_BUS=y
6981 +CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM=y
6982 +CONFIG_SND_SOC_COMPRESS=y
6983 +CONFIG_SND_SOC_TOPOLOGY=y
6984 +CONFIG_SND_SOC_ACPI=m
6985 +CONFIG_SND_SOC_ADI=m
6986 +CONFIG_SND_SOC_ADI_AXI_I2S=m
6987 +CONFIG_SND_SOC_ADI_AXI_SPDIF=m
6988 +CONFIG_SND_SOC_AMD_ACP=m
6989 +CONFIG_SND_SOC_AMD_CZ_DA7219MX98357_MACH=m
6990 +CONFIG_SND_SOC_AMD_CZ_RT5645_MACH=m
6991 +CONFIG_SND_SOC_AMD_ACP3x=m
6992 +CONFIG_SND_SOC_AMD_RV_RT5682_MACH=m
6993 +CONFIG_SND_SOC_AMD_RENOIR=m
6994 +CONFIG_SND_SOC_AMD_RENOIR_MACH=m
6995 +CONFIG_SND_ATMEL_SOC=m
6996 +CONFIG_SND_BCM63XX_I2S_WHISTLER=m
6997 +CONFIG_SND_DESIGNWARE_I2S=m
6998 +CONFIG_SND_DESIGNWARE_PCM=y
7001 +# SoC Audio for Freescale CPUs
7005 +# Common SoC Audio options for Freescale CPUs:
7007 +CONFIG_SND_SOC_FSL_ASRC=m
7008 +CONFIG_SND_SOC_FSL_SAI=m
7009 +CONFIG_SND_SOC_FSL_MQS=m
7010 +CONFIG_SND_SOC_FSL_AUDMIX=m
7011 +CONFIG_SND_SOC_FSL_SSI=m
7012 +CONFIG_SND_SOC_FSL_SPDIF=m
7013 +CONFIG_SND_SOC_FSL_ESAI=m
7014 +CONFIG_SND_SOC_FSL_MICFIL=m
7015 +CONFIG_SND_SOC_FSL_EASRC=m
7016 +CONFIG_SND_SOC_FSL_XCVR=m
7017 +CONFIG_SND_SOC_IMX_AUDMUX=m
7018 +# end of SoC Audio for Freescale CPUs
7020 +CONFIG_SND_I2S_HI6210_I2S=m
7021 +CONFIG_SND_SOC_IMG=y
7022 +CONFIG_SND_SOC_IMG_I2S_IN=m
7023 +CONFIG_SND_SOC_IMG_I2S_OUT=m
7024 +CONFIG_SND_SOC_IMG_PARALLEL_OUT=m
7025 +CONFIG_SND_SOC_IMG_SPDIF_IN=m
7026 +CONFIG_SND_SOC_IMG_SPDIF_OUT=m
7027 +CONFIG_SND_SOC_IMG_PISTACHIO_INTERNAL_DAC=m
7028 +CONFIG_SND_SOC_INTEL_SST_TOPLEVEL=y
7029 +CONFIG_SND_SOC_INTEL_SST=m
7030 +CONFIG_SND_SOC_INTEL_CATPT=m
7031 +CONFIG_SND_SST_ATOM_HIFI2_PLATFORM=m
7032 +CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_PCI=m
7033 +CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_ACPI=m
7034 +# CONFIG_SND_SOC_INTEL_SKYLAKE is not set
7035 +CONFIG_SND_SOC_INTEL_SKL=m
7036 +CONFIG_SND_SOC_INTEL_APL=m
7037 +CONFIG_SND_SOC_INTEL_KBL=m
7038 +CONFIG_SND_SOC_INTEL_GLK=m
7039 +# CONFIG_SND_SOC_INTEL_CNL is not set
7040 +# CONFIG_SND_SOC_INTEL_CFL is not set
7041 +# CONFIG_SND_SOC_INTEL_CML_H is not set
7042 +# CONFIG_SND_SOC_INTEL_CML_LP is not set
7043 +CONFIG_SND_SOC_INTEL_SKYLAKE_FAMILY=m
7044 +CONFIG_SND_SOC_INTEL_SKYLAKE_SSP_CLK=m
7045 +# CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC is not set
7046 +CONFIG_SND_SOC_INTEL_SKYLAKE_COMMON=m
7047 +CONFIG_SND_SOC_ACPI_INTEL_MATCH=m
7048 +CONFIG_SND_SOC_INTEL_MACH=y
7049 +# CONFIG_SND_SOC_INTEL_USER_FRIENDLY_LONG_NAMES is not set
7050 +CONFIG_SND_SOC_INTEL_HASWELL_MACH=m
7051 +CONFIG_SND_SOC_INTEL_BDW_RT5650_MACH=m
7052 +CONFIG_SND_SOC_INTEL_BDW_RT5677_MACH=m
7053 +CONFIG_SND_SOC_INTEL_BROADWELL_MACH=m
7054 +CONFIG_SND_SOC_INTEL_BYTCR_RT5640_MACH=m
7055 +CONFIG_SND_SOC_INTEL_BYTCR_RT5651_MACH=m
7056 +CONFIG_SND_SOC_INTEL_BYTCR_WM5102_MACH=m
7057 +CONFIG_SND_SOC_INTEL_CHT_BSW_RT5672_MACH=m
7058 +CONFIG_SND_SOC_INTEL_CHT_BSW_RT5645_MACH=m
7059 +CONFIG_SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH=m
7060 +CONFIG_SND_SOC_INTEL_CHT_BSW_NAU8824_MACH=m
7061 +CONFIG_SND_SOC_INTEL_BYT_CHT_CX2072X_MACH=m
7062 +CONFIG_SND_SOC_INTEL_BYT_CHT_DA7213_MACH=m
7063 +CONFIG_SND_SOC_INTEL_BYT_CHT_ES8316_MACH=m
7064 +# CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH is not set
7065 +CONFIG_SND_SOC_INTEL_SKL_RT286_MACH=m
7066 +CONFIG_SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH=m
7067 +CONFIG_SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH=m
7068 +CONFIG_SND_SOC_INTEL_DA7219_MAX98357A_GENERIC=m
7069 +CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_COMMON=m
7070 +CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH=m
7071 +CONFIG_SND_SOC_INTEL_BXT_RT298_MACH=m
7072 +CONFIG_SND_SOC_INTEL_SOF_WM8804_MACH=m
7073 +CONFIG_SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH=m
7074 +CONFIG_SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH=m
7075 +CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98357A_MACH=m
7076 +CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98927_MACH=m
7077 +CONFIG_SND_SOC_INTEL_KBL_RT5660_MACH=m
7078 +CONFIG_SND_SOC_INTEL_GLK_DA7219_MAX98357A_MACH=m
7079 +CONFIG_SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH=m
7080 +CONFIG_SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH=m
7081 +CONFIG_SND_SOC_INTEL_SOF_RT5682_MACH=m
7082 +CONFIG_SND_SOC_INTEL_SOF_PCM512x_MACH=m
7083 +CONFIG_SND_SOC_INTEL_CML_LP_DA7219_MAX98357A_MACH=m
7084 +CONFIG_SND_SOC_INTEL_SOF_CML_RT1011_RT5682_MACH=m
7085 +CONFIG_SND_SOC_INTEL_SOF_DA7219_MAX98373_MACH=m
7086 +CONFIG_SND_SOC_INTEL_EHL_RT5660_MACH=m
7087 +CONFIG_SND_SOC_MTK_BTCVSD=m
7088 +CONFIG_SND_SOC_SOF_TOPLEVEL=y
7089 +CONFIG_SND_SOC_SOF_PCI_DEV=m
7090 +CONFIG_SND_SOC_SOF_PCI=m
7091 +CONFIG_SND_SOC_SOF_ACPI=m
7092 +CONFIG_SND_SOC_SOF_ACPI_DEV=m
7093 +# CONFIG_SND_SOC_SOF_DEBUG_PROBES is not set
7094 +# CONFIG_SND_SOC_SOF_DEVELOPER_SUPPORT is not set
7095 +CONFIG_SND_SOC_SOF=m
7096 +CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE=y
7097 +CONFIG_SND_SOC_SOF_INTEL_TOPLEVEL=y
7098 +CONFIG_SND_SOC_SOF_INTEL_HIFI_EP_IPC=m
7099 +CONFIG_SND_SOC_SOF_INTEL_ATOM_HIFI_EP=m
7100 +CONFIG_SND_SOC_SOF_INTEL_COMMON=m
7101 +CONFIG_SND_SOC_SOF_BAYTRAIL=m
7102 +CONFIG_SND_SOC_SOF_BROADWELL=m
7103 +CONFIG_SND_SOC_SOF_MERRIFIELD=m
7104 +CONFIG_SND_SOC_SOF_INTEL_APL=m
7105 +CONFIG_SND_SOC_SOF_APOLLOLAKE=m
7106 +CONFIG_SND_SOC_SOF_GEMINILAKE=m
7107 +CONFIG_SND_SOC_SOF_INTEL_CNL=m
7108 +CONFIG_SND_SOC_SOF_CANNONLAKE=m
7109 +CONFIG_SND_SOC_SOF_COFFEELAKE=m
7110 +CONFIG_SND_SOC_SOF_COMETLAKE=m
7111 +CONFIG_SND_SOC_SOF_INTEL_ICL=m
7112 +CONFIG_SND_SOC_SOF_ICELAKE=m
7113 +CONFIG_SND_SOC_SOF_JASPERLAKE=m
7114 +CONFIG_SND_SOC_SOF_INTEL_TGL=m
7115 +CONFIG_SND_SOC_SOF_TIGERLAKE=m
7116 +CONFIG_SND_SOC_SOF_ELKHARTLAKE=m
7117 +CONFIG_SND_SOC_SOF_ALDERLAKE=m
7118 +CONFIG_SND_SOC_SOF_HDA_COMMON=m
7119 +CONFIG_SND_SOC_SOF_HDA_LINK=y
7120 +CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC=y
7121 +# CONFIG_SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1 is not set
7122 +CONFIG_SND_SOC_SOF_HDA_LINK_BASELINE=m
7123 +CONFIG_SND_SOC_SOF_HDA=m
7124 +CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE=m
7125 +CONFIG_SND_SOC_SOF_INTEL_SOUNDWIRE=m
7126 +CONFIG_SND_SOC_SOF_XTENSA=m
7129 +# STMicroelectronics STM32 SOC audio support
7131 +# end of STMicroelectronics STM32 SOC audio support
7133 +CONFIG_SND_SOC_XILINX_I2S=m
7134 +CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER=m
7135 +CONFIG_SND_SOC_XILINX_SPDIF=m
7136 +CONFIG_SND_SOC_XTFPGA_I2S=m
7137 +CONFIG_SND_SOC_I2C_AND_SPI=m
7140 +# CODEC drivers
7142 +CONFIG_SND_SOC_ARIZONA=m
7143 +CONFIG_SND_SOC_WM_ADSP=m
7144 +CONFIG_SND_SOC_AC97_CODEC=m
7145 +CONFIG_SND_SOC_ADAU_UTILS=m
7146 +CONFIG_SND_SOC_ADAU1372=m
7147 +CONFIG_SND_SOC_ADAU1372_I2C=m
7148 +CONFIG_SND_SOC_ADAU1372_SPI=m
7149 +CONFIG_SND_SOC_ADAU1701=m
7150 +CONFIG_SND_SOC_ADAU17X1=m
7151 +CONFIG_SND_SOC_ADAU1761=m
7152 +CONFIG_SND_SOC_ADAU1761_I2C=m
7153 +CONFIG_SND_SOC_ADAU1761_SPI=m
7154 +CONFIG_SND_SOC_ADAU7002=m
7155 +CONFIG_SND_SOC_ADAU7118=m
7156 +CONFIG_SND_SOC_ADAU7118_HW=m
7157 +CONFIG_SND_SOC_ADAU7118_I2C=m
7158 +CONFIG_SND_SOC_AK4104=m
7159 +CONFIG_SND_SOC_AK4118=m
7160 +CONFIG_SND_SOC_AK4458=m
7161 +CONFIG_SND_SOC_AK4554=m
7162 +CONFIG_SND_SOC_AK4613=m
7163 +CONFIG_SND_SOC_AK4642=m
7164 +CONFIG_SND_SOC_AK5386=m
7165 +CONFIG_SND_SOC_AK5558=m
7166 +CONFIG_SND_SOC_ALC5623=m
7167 +CONFIG_SND_SOC_BD28623=m
7168 +CONFIG_SND_SOC_BT_SCO=m
7169 +CONFIG_SND_SOC_CROS_EC_CODEC=m
7170 +CONFIG_SND_SOC_CS35L32=m
7171 +CONFIG_SND_SOC_CS35L33=m
7172 +CONFIG_SND_SOC_CS35L34=m
7173 +CONFIG_SND_SOC_CS35L35=m
7174 +CONFIG_SND_SOC_CS35L36=m
7175 +CONFIG_SND_SOC_CS42L42=m
7176 +CONFIG_SND_SOC_CS42L51=m
7177 +CONFIG_SND_SOC_CS42L51_I2C=m
7178 +CONFIG_SND_SOC_CS42L52=m
7179 +CONFIG_SND_SOC_CS42L56=m
7180 +CONFIG_SND_SOC_CS42L73=m
7181 +CONFIG_SND_SOC_CS4234=m
7182 +CONFIG_SND_SOC_CS4265=m
7183 +CONFIG_SND_SOC_CS4270=m
7184 +CONFIG_SND_SOC_CS4271=m
7185 +CONFIG_SND_SOC_CS4271_I2C=m
7186 +CONFIG_SND_SOC_CS4271_SPI=m
7187 +CONFIG_SND_SOC_CS42XX8=m
7188 +CONFIG_SND_SOC_CS42XX8_I2C=m
7189 +CONFIG_SND_SOC_CS43130=m
7190 +CONFIG_SND_SOC_CS4341=m
7191 +CONFIG_SND_SOC_CS4349=m
7192 +CONFIG_SND_SOC_CS53L30=m
7193 +CONFIG_SND_SOC_CX2072X=m
7194 +CONFIG_SND_SOC_DA7213=m
7195 +CONFIG_SND_SOC_DA7219=m
7196 +CONFIG_SND_SOC_DMIC=m
7197 +CONFIG_SND_SOC_HDMI_CODEC=m
7198 +CONFIG_SND_SOC_ES7134=m
7199 +CONFIG_SND_SOC_ES7241=m
7200 +CONFIG_SND_SOC_ES8316=m
7201 +CONFIG_SND_SOC_ES8328=m
7202 +CONFIG_SND_SOC_ES8328_I2C=m
7203 +CONFIG_SND_SOC_ES8328_SPI=m
7204 +CONFIG_SND_SOC_GTM601=m
7205 +CONFIG_SND_SOC_HDAC_HDMI=m
7206 +CONFIG_SND_SOC_HDAC_HDA=m
7207 +CONFIG_SND_SOC_INNO_RK3036=m
7208 +CONFIG_SND_SOC_MAX98088=m
7209 +CONFIG_SND_SOC_MAX98090=m
7210 +CONFIG_SND_SOC_MAX98357A=m
7211 +CONFIG_SND_SOC_MAX98504=m
7212 +CONFIG_SND_SOC_MAX9867=m
7213 +CONFIG_SND_SOC_MAX98927=m
7214 +CONFIG_SND_SOC_MAX98373=m
7215 +CONFIG_SND_SOC_MAX98373_I2C=m
7216 +CONFIG_SND_SOC_MAX98373_SDW=m
7217 +CONFIG_SND_SOC_MAX98390=m
7218 +CONFIG_SND_SOC_MAX9860=m
7219 +CONFIG_SND_SOC_MSM8916_WCD_ANALOG=m
7220 +CONFIG_SND_SOC_MSM8916_WCD_DIGITAL=m
7221 +CONFIG_SND_SOC_PCM1681=m
7222 +CONFIG_SND_SOC_PCM1789=m
7223 +CONFIG_SND_SOC_PCM1789_I2C=m
7224 +CONFIG_SND_SOC_PCM179X=m
7225 +CONFIG_SND_SOC_PCM179X_I2C=m
7226 +CONFIG_SND_SOC_PCM179X_SPI=m
7227 +CONFIG_SND_SOC_PCM186X=m
7228 +CONFIG_SND_SOC_PCM186X_I2C=m
7229 +CONFIG_SND_SOC_PCM186X_SPI=m
7230 +CONFIG_SND_SOC_PCM3060=m
7231 +CONFIG_SND_SOC_PCM3060_I2C=m
7232 +CONFIG_SND_SOC_PCM3060_SPI=m
7233 +CONFIG_SND_SOC_PCM3168A=m
7234 +CONFIG_SND_SOC_PCM3168A_I2C=m
7235 +CONFIG_SND_SOC_PCM3168A_SPI=m
7236 +CONFIG_SND_SOC_PCM5102A=m
7237 +CONFIG_SND_SOC_PCM512x=m
7238 +CONFIG_SND_SOC_PCM512x_I2C=m
7239 +CONFIG_SND_SOC_PCM512x_SPI=m
7240 +CONFIG_SND_SOC_RK3328=m
7241 +CONFIG_SND_SOC_RL6231=m
7242 +CONFIG_SND_SOC_RL6347A=m
7243 +CONFIG_SND_SOC_RT286=m
7244 +CONFIG_SND_SOC_RT298=m
7245 +CONFIG_SND_SOC_RT1011=m
7246 +CONFIG_SND_SOC_RT1015=m
7247 +CONFIG_SND_SOC_RT1308_SDW=m
7248 +CONFIG_SND_SOC_RT5514=m
7249 +CONFIG_SND_SOC_RT5514_SPI=m
7250 +CONFIG_SND_SOC_RT5616=m
7251 +CONFIG_SND_SOC_RT5631=m
7252 +CONFIG_SND_SOC_RT5640=m
7253 +CONFIG_SND_SOC_RT5645=m
7254 +CONFIG_SND_SOC_RT5651=m
7255 +CONFIG_SND_SOC_RT5659=m
7256 +CONFIG_SND_SOC_RT5660=m
7257 +CONFIG_SND_SOC_RT5663=m
7258 +CONFIG_SND_SOC_RT5670=m
7259 +CONFIG_SND_SOC_RT5677=m
7260 +CONFIG_SND_SOC_RT5677_SPI=m
7261 +CONFIG_SND_SOC_RT5682=m
7262 +CONFIG_SND_SOC_RT5682_I2C=m
7263 +CONFIG_SND_SOC_RT5682_SDW=m
7264 +CONFIG_SND_SOC_RT700=m
7265 +CONFIG_SND_SOC_RT700_SDW=m
7266 +CONFIG_SND_SOC_RT711=m
7267 +CONFIG_SND_SOC_RT711_SDW=m
7268 +CONFIG_SND_SOC_RT715=m
7269 +CONFIG_SND_SOC_RT715_SDW=m
7270 +CONFIG_SND_SOC_SGTL5000=m
7271 +CONFIG_SND_SOC_SI476X=m
7272 +CONFIG_SND_SOC_SIGMADSP=m
7273 +CONFIG_SND_SOC_SIGMADSP_I2C=m
7274 +CONFIG_SND_SOC_SIGMADSP_REGMAP=m
7275 +CONFIG_SND_SOC_SIMPLE_AMPLIFIER=m
7276 +CONFIG_SND_SOC_SIMPLE_MUX=m
7277 +CONFIG_SND_SOC_SPDIF=m
7278 +CONFIG_SND_SOC_SSM2305=m
7279 +CONFIG_SND_SOC_SSM2602=m
7280 +CONFIG_SND_SOC_SSM2602_SPI=m
7281 +CONFIG_SND_SOC_SSM2602_I2C=m
7282 +CONFIG_SND_SOC_SSM4567=m
7283 +CONFIG_SND_SOC_STA32X=m
7284 +CONFIG_SND_SOC_STA350=m
7285 +CONFIG_SND_SOC_STI_SAS=m
7286 +CONFIG_SND_SOC_TAS2552=m
7287 +CONFIG_SND_SOC_TAS2562=m
7288 +CONFIG_SND_SOC_TAS2764=m
7289 +CONFIG_SND_SOC_TAS2770=m
7290 +CONFIG_SND_SOC_TAS5086=m
7291 +CONFIG_SND_SOC_TAS571X=m
7292 +CONFIG_SND_SOC_TAS5720=m
7293 +CONFIG_SND_SOC_TAS6424=m
7294 +CONFIG_SND_SOC_TDA7419=m
7295 +CONFIG_SND_SOC_TFA9879=m
7296 +CONFIG_SND_SOC_TLV320AIC23=m
7297 +CONFIG_SND_SOC_TLV320AIC23_I2C=m
7298 +CONFIG_SND_SOC_TLV320AIC23_SPI=m
7299 +CONFIG_SND_SOC_TLV320AIC31XX=m
7300 +CONFIG_SND_SOC_TLV320AIC32X4=m
7301 +CONFIG_SND_SOC_TLV320AIC32X4_I2C=m
7302 +CONFIG_SND_SOC_TLV320AIC32X4_SPI=m
7303 +CONFIG_SND_SOC_TLV320AIC3X=m
7304 +CONFIG_SND_SOC_TLV320ADCX140=m
7305 +CONFIG_SND_SOC_TS3A227E=m
7306 +CONFIG_SND_SOC_TSCS42XX=m
7307 +CONFIG_SND_SOC_TSCS454=m
7308 +CONFIG_SND_SOC_UDA1334=m
7309 +CONFIG_SND_SOC_WCD9335=m
7310 +CONFIG_SND_SOC_WCD934X=m
7311 +CONFIG_SND_SOC_WM5102=m
7312 +CONFIG_SND_SOC_WM8510=m
7313 +CONFIG_SND_SOC_WM8523=m
7314 +CONFIG_SND_SOC_WM8524=m
7315 +CONFIG_SND_SOC_WM8580=m
7316 +CONFIG_SND_SOC_WM8711=m
7317 +CONFIG_SND_SOC_WM8728=m
7318 +CONFIG_SND_SOC_WM8731=m
7319 +CONFIG_SND_SOC_WM8737=m
7320 +CONFIG_SND_SOC_WM8741=m
7321 +CONFIG_SND_SOC_WM8750=m
7322 +CONFIG_SND_SOC_WM8753=m
7323 +CONFIG_SND_SOC_WM8770=m
7324 +CONFIG_SND_SOC_WM8776=m
7325 +CONFIG_SND_SOC_WM8782=m
7326 +CONFIG_SND_SOC_WM8804=m
7327 +CONFIG_SND_SOC_WM8804_I2C=m
7328 +CONFIG_SND_SOC_WM8804_SPI=m
7329 +CONFIG_SND_SOC_WM8903=m
7330 +CONFIG_SND_SOC_WM8904=m
7331 +CONFIG_SND_SOC_WM8960=m
7332 +CONFIG_SND_SOC_WM8962=m
7333 +CONFIG_SND_SOC_WM8974=m
7334 +CONFIG_SND_SOC_WM8978=m
7335 +CONFIG_SND_SOC_WM8985=m
7336 +CONFIG_SND_SOC_WSA881X=m
7337 +CONFIG_SND_SOC_ZL38060=m
7338 +CONFIG_SND_SOC_ZX_AUD96P22=m
7339 +CONFIG_SND_SOC_MAX9759=m
7340 +CONFIG_SND_SOC_MT6351=m
7341 +CONFIG_SND_SOC_MT6358=m
7342 +CONFIG_SND_SOC_MT6660=m
7343 +CONFIG_SND_SOC_NAU8315=m
7344 +CONFIG_SND_SOC_NAU8540=m
7345 +CONFIG_SND_SOC_NAU8810=m
7346 +CONFIG_SND_SOC_NAU8822=m
7347 +CONFIG_SND_SOC_NAU8824=m
7348 +CONFIG_SND_SOC_NAU8825=m
7349 +CONFIG_SND_SOC_TPA6130A2=m
7350 +CONFIG_SND_SOC_LPASS_WSA_MACRO=m
7351 +CONFIG_SND_SOC_LPASS_VA_MACRO=m
7352 +CONFIG_SND_SOC_LPASS_RX_MACRO=m
7353 +CONFIG_SND_SOC_LPASS_TX_MACRO=m
7354 +# end of CODEC drivers
7356 +CONFIG_SND_SIMPLE_CARD_UTILS=m
7357 +CONFIG_SND_SIMPLE_CARD=m
7358 +CONFIG_SND_X86=y
7359 +CONFIG_HDMI_LPE_AUDIO=m
7360 +CONFIG_SND_SYNTH_EMUX=m
7361 +CONFIG_SND_XEN_FRONTEND=m
7362 +CONFIG_AC97_BUS=m
7365 +# HID support
7367 +CONFIG_HID=m
7368 +CONFIG_HID_BATTERY_STRENGTH=y
7369 +CONFIG_HIDRAW=y
7370 +CONFIG_UHID=m
7371 +CONFIG_HID_GENERIC=m
7374 +# Special HID drivers
7376 +CONFIG_HID_A4TECH=m
7377 +CONFIG_HID_ACCUTOUCH=m
7378 +CONFIG_HID_ACRUX=m
7379 +CONFIG_HID_ACRUX_FF=y
7380 +CONFIG_HID_APPLE=m
7381 +CONFIG_HID_APPLEIR=m
7382 +CONFIG_HID_ASUS=m
7383 +CONFIG_HID_AUREAL=m
7384 +CONFIG_HID_BELKIN=m
7385 +CONFIG_HID_BETOP_FF=m
7386 +CONFIG_HID_BIGBEN_FF=m
7387 +CONFIG_HID_CHERRY=m
7388 +CONFIG_HID_CHICONY=m
7389 +CONFIG_HID_CORSAIR=m
7390 +CONFIG_HID_COUGAR=m
7391 +CONFIG_HID_MACALLY=m
7392 +CONFIG_HID_PRODIKEYS=m
7393 +CONFIG_HID_CMEDIA=m
7394 +CONFIG_HID_CP2112=m
7395 +CONFIG_HID_CREATIVE_SB0540=m
7396 +CONFIG_HID_CYPRESS=m
7397 +CONFIG_HID_DRAGONRISE=m
7398 +CONFIG_DRAGONRISE_FF=y
7399 +CONFIG_HID_EMS_FF=m
7400 +CONFIG_HID_ELAN=m
7401 +CONFIG_HID_ELECOM=m
7402 +CONFIG_HID_ELO=m
7403 +CONFIG_HID_EZKEY=m
7404 +CONFIG_HID_GEMBIRD=m
7405 +CONFIG_HID_GFRM=m
7406 +CONFIG_HID_GLORIOUS=m
7407 +CONFIG_HID_HOLTEK=m
7408 +CONFIG_HOLTEK_FF=y
7409 +CONFIG_HID_GOOGLE_HAMMER=m
7410 +CONFIG_HID_VIVALDI=m
7411 +CONFIG_HID_GT683R=m
7412 +CONFIG_HID_KEYTOUCH=m
7413 +CONFIG_HID_KYE=m
7414 +CONFIG_HID_UCLOGIC=m
7415 +CONFIG_HID_WALTOP=m
7416 +CONFIG_HID_VIEWSONIC=m
7417 +CONFIG_HID_GYRATION=m
7418 +CONFIG_HID_ICADE=m
7419 +CONFIG_HID_ITE=m
7420 +CONFIG_HID_JABRA=m
7421 +CONFIG_HID_TWINHAN=m
7422 +CONFIG_HID_KENSINGTON=m
7423 +CONFIG_HID_LCPOWER=m
7424 +CONFIG_HID_LED=m
7425 +CONFIG_HID_LENOVO=m
7426 +CONFIG_HID_LOGITECH=m
7427 +CONFIG_HID_LOGITECH_DJ=m
7428 +CONFIG_HID_LOGITECH_HIDPP=m
7429 +CONFIG_LOGITECH_FF=y
7430 +CONFIG_LOGIRUMBLEPAD2_FF=y
7431 +CONFIG_LOGIG940_FF=y
7432 +CONFIG_LOGIWHEELS_FF=y
7433 +CONFIG_HID_MAGICMOUSE=m
7434 +CONFIG_HID_MALTRON=m
7435 +CONFIG_HID_MAYFLASH=m
7436 +CONFIG_HID_REDRAGON=m
7437 +CONFIG_HID_MICROSOFT=m
7438 +CONFIG_HID_MONTEREY=m
7439 +CONFIG_HID_MULTITOUCH=m
7440 +CONFIG_HID_NTI=m
7441 +CONFIG_HID_NTRIG=m
7442 +CONFIG_HID_ORTEK=m
7443 +CONFIG_HID_PANTHERLORD=m
7444 +CONFIG_PANTHERLORD_FF=y
7445 +CONFIG_HID_PENMOUNT=m
7446 +CONFIG_HID_PETALYNX=m
7447 +CONFIG_HID_PICOLCD=m
7448 +CONFIG_HID_PICOLCD_FB=y
7449 +CONFIG_HID_PICOLCD_BACKLIGHT=y
7450 +CONFIG_HID_PICOLCD_LCD=y
7451 +CONFIG_HID_PICOLCD_LEDS=y
7452 +CONFIG_HID_PICOLCD_CIR=y
7453 +CONFIG_HID_PLANTRONICS=m
7454 +CONFIG_HID_PLAYSTATION=m
7455 +CONFIG_PLAYSTATION_FF=y
7456 +CONFIG_HID_PRIMAX=m
7457 +CONFIG_HID_RETRODE=m
7458 +CONFIG_HID_ROCCAT=m
7459 +CONFIG_HID_SAITEK=m
7460 +CONFIG_HID_SAMSUNG=m
7461 +CONFIG_HID_SONY=m
7462 +CONFIG_SONY_FF=y
7463 +CONFIG_HID_SPEEDLINK=m
7464 +CONFIG_HID_STEAM=m
7465 +CONFIG_HID_STEELSERIES=m
7466 +CONFIG_HID_SUNPLUS=m
7467 +CONFIG_HID_RMI=m
7468 +CONFIG_HID_GREENASIA=m
7469 +CONFIG_GREENASIA_FF=y
7470 +CONFIG_HID_HYPERV_MOUSE=m
7471 +CONFIG_HID_SMARTJOYPLUS=m
7472 +CONFIG_SMARTJOYPLUS_FF=y
7473 +CONFIG_HID_TIVO=m
7474 +CONFIG_HID_TOPSEED=m
7475 +CONFIG_HID_THINGM=m
7476 +CONFIG_HID_THRUSTMASTER=m
7477 +CONFIG_THRUSTMASTER_FF=y
7478 +CONFIG_HID_UDRAW_PS3=m
7479 +CONFIG_HID_U2FZERO=m
7480 +CONFIG_HID_WACOM=m
7481 +CONFIG_HID_WIIMOTE=m
7482 +CONFIG_HID_XINMO=m
7483 +CONFIG_HID_ZEROPLUS=m
7484 +CONFIG_ZEROPLUS_FF=y
7485 +CONFIG_HID_ZYDACRON=m
7486 +CONFIG_HID_SENSOR_HUB=m
7487 +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m
7488 +CONFIG_HID_ALPS=m
7489 +CONFIG_HID_MCP2221=m
7490 +# end of Special HID drivers
7493 +# USB HID support
7495 +CONFIG_USB_HID=m
7496 +CONFIG_HID_PID=y
7497 +CONFIG_USB_HIDDEV=y
7500 +# USB HID Boot Protocol drivers
7502 +CONFIG_USB_KBD=m
7503 +CONFIG_USB_MOUSE=m
7504 +# end of USB HID Boot Protocol drivers
7505 +# end of USB HID support
7508 +# I2C HID support
7510 +CONFIG_I2C_HID_ACPI=m
7511 +# end of I2C HID support
7513 +CONFIG_I2C_HID_CORE=m
7516 +# Intel ISH HID support
7518 +CONFIG_INTEL_ISH_HID=m
7519 +CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER=m
7520 +# end of Intel ISH HID support
7523 +# AMD SFH HID Support
7525 +CONFIG_AMD_SFH_HID=m
7526 +# end of AMD SFH HID Support
7527 +# end of HID support
7529 +CONFIG_USB_OHCI_LITTLE_ENDIAN=y
7530 +CONFIG_USB_SUPPORT=y
7531 +CONFIG_USB_COMMON=y
7532 +CONFIG_USB_LED_TRIG=y
7533 +CONFIG_USB_ULPI_BUS=m
7534 +CONFIG_USB_CONN_GPIO=m
7535 +CONFIG_USB_ARCH_HAS_HCD=y
7536 +CONFIG_USB=y
7537 +CONFIG_USB_PCI=y
7538 +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
7541 +# Miscellaneous USB options
7543 +CONFIG_USB_DEFAULT_PERSIST=y
7544 +# CONFIG_USB_FEW_INIT_RETRIES is not set
7545 +CONFIG_USB_DYNAMIC_MINORS=y
7546 +# CONFIG_USB_OTG is not set
7547 +# CONFIG_USB_OTG_PRODUCTLIST is not set
7548 +# CONFIG_USB_OTG_DISABLE_EXTERNAL_HUB is not set
7549 +CONFIG_USB_LEDS_TRIGGER_USBPORT=m
7550 +CONFIG_USB_AUTOSUSPEND_DELAY=2
7551 +CONFIG_USB_MON=m
7554 +# USB Host Controller Drivers
7556 +CONFIG_USB_C67X00_HCD=m
7557 +CONFIG_USB_XHCI_HCD=y
7558 +CONFIG_USB_XHCI_DBGCAP=y
7559 +CONFIG_USB_XHCI_PCI=m
7560 +CONFIG_USB_XHCI_PCI_RENESAS=m
7561 +CONFIG_USB_XHCI_PLATFORM=m
7562 +CONFIG_USB_EHCI_HCD=y
7563 +CONFIG_USB_EHCI_ROOT_HUB_TT=y
7564 +CONFIG_USB_EHCI_TT_NEWSCHED=y
7565 +CONFIG_USB_EHCI_PCI=y
7566 +CONFIG_USB_EHCI_FSL=m
7567 +CONFIG_USB_EHCI_HCD_PLATFORM=y
7568 +CONFIG_USB_OXU210HP_HCD=m
7569 +CONFIG_USB_ISP116X_HCD=m
7570 +CONFIG_USB_FOTG210_HCD=m
7571 +CONFIG_USB_MAX3421_HCD=m
7572 +CONFIG_USB_OHCI_HCD=y
7573 +CONFIG_USB_OHCI_HCD_PCI=y
7574 +CONFIG_USB_OHCI_HCD_PLATFORM=y
7575 +CONFIG_USB_UHCI_HCD=y
7576 +CONFIG_USB_U132_HCD=m
7577 +CONFIG_USB_SL811_HCD=m
7578 +CONFIG_USB_SL811_HCD_ISO=y
7579 +CONFIG_USB_SL811_CS=m
7580 +CONFIG_USB_R8A66597_HCD=m
7581 +CONFIG_USB_HCD_BCMA=m
7582 +CONFIG_USB_HCD_SSB=m
7583 +# CONFIG_USB_HCD_TEST_MODE is not set
7586 +# USB Device Class drivers
7588 +CONFIG_USB_ACM=m
7589 +CONFIG_USB_PRINTER=m
7590 +CONFIG_USB_WDM=m
7591 +CONFIG_USB_TMC=m
7594 +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
7598 +# also be needed; see USB_STORAGE Help for more info
7600 +CONFIG_USB_STORAGE=m
7601 +# CONFIG_USB_STORAGE_DEBUG is not set
7602 +CONFIG_USB_STORAGE_REALTEK=m
7603 +CONFIG_REALTEK_AUTOPM=y
7604 +CONFIG_USB_STORAGE_DATAFAB=m
7605 +CONFIG_USB_STORAGE_FREECOM=m
7606 +CONFIG_USB_STORAGE_ISD200=m
7607 +CONFIG_USB_STORAGE_USBAT=m
7608 +CONFIG_USB_STORAGE_SDDR09=m
7609 +CONFIG_USB_STORAGE_SDDR55=m
7610 +CONFIG_USB_STORAGE_JUMPSHOT=m
7611 +CONFIG_USB_STORAGE_ALAUDA=m
7612 +CONFIG_USB_STORAGE_ONETOUCH=m
7613 +CONFIG_USB_STORAGE_KARMA=m
7614 +CONFIG_USB_STORAGE_CYPRESS_ATACB=m
7615 +CONFIG_USB_STORAGE_ENE_UB6250=m
7616 +CONFIG_USB_UAS=m
7619 +# USB Imaging devices
7621 +CONFIG_USB_MDC800=m
7622 +CONFIG_USB_MICROTEK=m
7623 +CONFIG_USBIP_CORE=m
7624 +CONFIG_USBIP_VHCI_HCD=m
7625 +CONFIG_USBIP_VHCI_HC_PORTS=8
7626 +CONFIG_USBIP_VHCI_NR_HCS=1
7627 +CONFIG_USBIP_HOST=m
7628 +CONFIG_USBIP_VUDC=m
7629 +# CONFIG_USBIP_DEBUG is not set
7630 +CONFIG_USB_CDNS_SUPPORT=m
7631 +CONFIG_USB_CDNS_HOST=y
7632 +CONFIG_USB_CDNS3=m
7633 +CONFIG_USB_CDNS3_GADGET=y
7634 +CONFIG_USB_CDNS3_HOST=y
7635 +CONFIG_USB_CDNS3_PCI_WRAP=m
7636 +CONFIG_USB_CDNSP_PCI=m
7637 +CONFIG_USB_CDNSP_GADGET=y
7638 +CONFIG_USB_CDNSP_HOST=y
7639 +CONFIG_USB_MUSB_HDRC=m
7640 +# CONFIG_USB_MUSB_HOST is not set
7641 +# CONFIG_USB_MUSB_GADGET is not set
7642 +CONFIG_USB_MUSB_DUAL_ROLE=y
7645 +# Platform Glue Layer
7649 +# MUSB DMA mode
7651 +CONFIG_MUSB_PIO_ONLY=y
7652 +CONFIG_USB_DWC3=m
7653 +CONFIG_USB_DWC3_ULPI=y
7654 +# CONFIG_USB_DWC3_HOST is not set
7655 +# CONFIG_USB_DWC3_GADGET is not set
7656 +CONFIG_USB_DWC3_DUAL_ROLE=y
7659 +# Platform Glue Driver Support
7661 +CONFIG_USB_DWC3_PCI=m
7662 +CONFIG_USB_DWC3_HAPS=m
7663 +CONFIG_USB_DWC2=y
7664 +CONFIG_USB_DWC2_HOST=y
7667 +# Gadget/Dual-role mode requires USB Gadget support to be enabled
7669 +CONFIG_USB_DWC2_PCI=m
7670 +# CONFIG_USB_DWC2_DEBUG is not set
7671 +# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set
7672 +CONFIG_USB_CHIPIDEA=m
7673 +CONFIG_USB_CHIPIDEA_UDC=y
7674 +CONFIG_USB_CHIPIDEA_HOST=y
7675 +CONFIG_USB_CHIPIDEA_PCI=m
7676 +CONFIG_USB_CHIPIDEA_MSM=m
7677 +CONFIG_USB_CHIPIDEA_GENERIC=m
7678 +CONFIG_USB_ISP1760=m
7679 +CONFIG_USB_ISP1760_HCD=y
7680 +CONFIG_USB_ISP1761_UDC=y
7681 +# CONFIG_USB_ISP1760_HOST_ROLE is not set
7682 +# CONFIG_USB_ISP1760_GADGET_ROLE is not set
7683 +CONFIG_USB_ISP1760_DUAL_ROLE=y
7686 +# USB port drivers
7688 +CONFIG_USB_USS720=m
7689 +CONFIG_USB_SERIAL=m
7690 +CONFIG_USB_SERIAL_GENERIC=y
7691 +CONFIG_USB_SERIAL_SIMPLE=m
7692 +CONFIG_USB_SERIAL_AIRCABLE=m
7693 +CONFIG_USB_SERIAL_ARK3116=m
7694 +CONFIG_USB_SERIAL_BELKIN=m
7695 +CONFIG_USB_SERIAL_CH341=m
7696 +CONFIG_USB_SERIAL_WHITEHEAT=m
7697 +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
7698 +CONFIG_USB_SERIAL_CP210X=m
7699 +CONFIG_USB_SERIAL_CYPRESS_M8=m
7700 +CONFIG_USB_SERIAL_EMPEG=m
7701 +CONFIG_USB_SERIAL_FTDI_SIO=m
7702 +CONFIG_USB_SERIAL_VISOR=m
7703 +CONFIG_USB_SERIAL_IPAQ=m
7704 +CONFIG_USB_SERIAL_IR=m
7705 +CONFIG_USB_SERIAL_EDGEPORT=m
7706 +CONFIG_USB_SERIAL_EDGEPORT_TI=m
7707 +CONFIG_USB_SERIAL_F81232=m
7708 +CONFIG_USB_SERIAL_F8153X=m
7709 +CONFIG_USB_SERIAL_GARMIN=m
7710 +CONFIG_USB_SERIAL_IPW=m
7711 +CONFIG_USB_SERIAL_IUU=m
7712 +CONFIG_USB_SERIAL_KEYSPAN_PDA=m
7713 +CONFIG_USB_SERIAL_KEYSPAN=m
7714 +CONFIG_USB_SERIAL_KLSI=m
7715 +CONFIG_USB_SERIAL_KOBIL_SCT=m
7716 +CONFIG_USB_SERIAL_MCT_U232=m
7717 +CONFIG_USB_SERIAL_METRO=m
7718 +CONFIG_USB_SERIAL_MOS7720=m
7719 +CONFIG_USB_SERIAL_MOS7715_PARPORT=y
7720 +CONFIG_USB_SERIAL_MOS7840=m
7721 +CONFIG_USB_SERIAL_MXUPORT=m
7722 +CONFIG_USB_SERIAL_NAVMAN=m
7723 +CONFIG_USB_SERIAL_PL2303=m
7724 +CONFIG_USB_SERIAL_OTI6858=m
7725 +CONFIG_USB_SERIAL_QCAUX=m
7726 +CONFIG_USB_SERIAL_QUALCOMM=m
7727 +CONFIG_USB_SERIAL_SPCP8X5=m
7728 +CONFIG_USB_SERIAL_SAFE=m
7729 +# CONFIG_USB_SERIAL_SAFE_PADDED is not set
7730 +CONFIG_USB_SERIAL_SIERRAWIRELESS=m
7731 +CONFIG_USB_SERIAL_SYMBOL=m
7732 +CONFIG_USB_SERIAL_TI=m
7733 +CONFIG_USB_SERIAL_CYBERJACK=m
7734 +CONFIG_USB_SERIAL_WWAN=m
7735 +CONFIG_USB_SERIAL_OPTION=m
7736 +CONFIG_USB_SERIAL_OMNINET=m
7737 +CONFIG_USB_SERIAL_OPTICON=m
7738 +CONFIG_USB_SERIAL_XSENS_MT=m
7739 +CONFIG_USB_SERIAL_WISHBONE=m
7740 +CONFIG_USB_SERIAL_SSU100=m
7741 +CONFIG_USB_SERIAL_QT2=m
7742 +CONFIG_USB_SERIAL_UPD78F0730=m
7743 +CONFIG_USB_SERIAL_XR=m
7744 +CONFIG_USB_SERIAL_DEBUG=m
7747 +# USB Miscellaneous drivers
7749 +CONFIG_USB_EMI62=m
7750 +CONFIG_USB_EMI26=m
7751 +CONFIG_USB_ADUTUX=m
7752 +CONFIG_USB_SEVSEG=m
7753 +CONFIG_USB_LEGOTOWER=m
7754 +CONFIG_USB_LCD=m
7755 +CONFIG_USB_CYPRESS_CY7C63=m
7756 +CONFIG_USB_CYTHERM=m
7757 +CONFIG_USB_IDMOUSE=m
7758 +CONFIG_USB_FTDI_ELAN=m
7759 +CONFIG_USB_APPLEDISPLAY=m
7760 +CONFIG_APPLE_MFI_FASTCHARGE=m
7761 +CONFIG_USB_SISUSBVGA=m
7762 +CONFIG_USB_LD=m
7763 +CONFIG_USB_TRANCEVIBRATOR=m
7764 +CONFIG_USB_IOWARRIOR=m
7765 +CONFIG_USB_TEST=m
7766 +CONFIG_USB_EHSET_TEST_FIXTURE=m
7767 +CONFIG_USB_ISIGHTFW=m
7768 +CONFIG_USB_YUREX=m
7769 +CONFIG_USB_EZUSB_FX2=m
7770 +CONFIG_USB_HUB_USB251XB=m
7771 +CONFIG_USB_HSIC_USB3503=m
7772 +CONFIG_USB_HSIC_USB4604=m
7773 +CONFIG_USB_LINK_LAYER_TEST=m
7774 +CONFIG_USB_CHAOSKEY=m
7775 +CONFIG_USB_ATM=m
7776 +CONFIG_USB_SPEEDTOUCH=m
7777 +CONFIG_USB_CXACRU=m
7778 +CONFIG_USB_UEAGLEATM=m
7779 +CONFIG_USB_XUSBATM=m
7782 +# USB Physical Layer drivers
7784 +CONFIG_USB_PHY=y
7785 +CONFIG_NOP_USB_XCEIV=m
7786 +CONFIG_USB_GPIO_VBUS=m
7787 +CONFIG_TAHVO_USB=m
7788 +CONFIG_TAHVO_USB_HOST_BY_DEFAULT=y
7789 +CONFIG_USB_ISP1301=m
7790 +# end of USB Physical Layer drivers
7792 +CONFIG_USB_GADGET=m
7793 +# CONFIG_USB_GADGET_DEBUG is not set
7794 +# CONFIG_USB_GADGET_DEBUG_FILES is not set
7795 +# CONFIG_USB_GADGET_DEBUG_FS is not set
7796 +CONFIG_USB_GADGET_VBUS_DRAW=2
7797 +CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2
7798 +CONFIG_U_SERIAL_CONSOLE=y
7801 +# USB Peripheral Controller
7803 +CONFIG_USB_FOTG210_UDC=m
7804 +CONFIG_USB_GR_UDC=m
7805 +CONFIG_USB_R8A66597=m
7806 +CONFIG_USB_PXA27X=m
7807 +CONFIG_USB_MV_UDC=m
7808 +CONFIG_USB_MV_U3D=m
7809 +CONFIG_USB_SNP_CORE=m
7810 +# CONFIG_USB_M66592 is not set
7811 +CONFIG_USB_BDC_UDC=m
7812 +CONFIG_USB_AMD5536UDC=m
7813 +CONFIG_USB_NET2272=m
7814 +CONFIG_USB_NET2272_DMA=y
7815 +CONFIG_USB_NET2280=m
7816 +CONFIG_USB_GOKU=m
7817 +CONFIG_USB_EG20T=m
7818 +CONFIG_USB_MAX3420_UDC=m
7819 +# CONFIG_USB_DUMMY_HCD is not set
7820 +# end of USB Peripheral Controller
7822 +CONFIG_USB_LIBCOMPOSITE=m
7823 +CONFIG_USB_F_ACM=m
7824 +CONFIG_USB_F_SS_LB=m
7825 +CONFIG_USB_U_SERIAL=m
7826 +CONFIG_USB_U_ETHER=m
7827 +CONFIG_USB_U_AUDIO=m
7828 +CONFIG_USB_F_SERIAL=m
7829 +CONFIG_USB_F_OBEX=m
7830 +CONFIG_USB_F_NCM=m
7831 +CONFIG_USB_F_ECM=m
7832 +CONFIG_USB_F_PHONET=m
7833 +CONFIG_USB_F_EEM=m
7834 +CONFIG_USB_F_SUBSET=m
7835 +CONFIG_USB_F_RNDIS=m
7836 +CONFIG_USB_F_MASS_STORAGE=m
7837 +CONFIG_USB_F_FS=m
7838 +CONFIG_USB_F_UAC1=m
7839 +CONFIG_USB_F_UAC1_LEGACY=m
7840 +CONFIG_USB_F_UAC2=m
7841 +CONFIG_USB_F_UVC=m
7842 +CONFIG_USB_F_MIDI=m
7843 +CONFIG_USB_F_HID=m
7844 +CONFIG_USB_F_PRINTER=m
7845 +CONFIG_USB_F_TCM=m
7846 +CONFIG_USB_CONFIGFS=m
7847 +CONFIG_USB_CONFIGFS_SERIAL=y
7848 +CONFIG_USB_CONFIGFS_ACM=y
7849 +CONFIG_USB_CONFIGFS_OBEX=y
7850 +CONFIG_USB_CONFIGFS_NCM=y
7851 +CONFIG_USB_CONFIGFS_ECM=y
7852 +CONFIG_USB_CONFIGFS_ECM_SUBSET=y
7853 +CONFIG_USB_CONFIGFS_RNDIS=y
7854 +CONFIG_USB_CONFIGFS_EEM=y
7855 +CONFIG_USB_CONFIGFS_PHONET=y
7856 +CONFIG_USB_CONFIGFS_MASS_STORAGE=y
7857 +CONFIG_USB_CONFIGFS_F_LB_SS=y
7858 +CONFIG_USB_CONFIGFS_F_FS=y
7859 +CONFIG_USB_CONFIGFS_F_UAC1=y
7860 +CONFIG_USB_CONFIGFS_F_UAC1_LEGACY=y
7861 +CONFIG_USB_CONFIGFS_F_UAC2=y
7862 +CONFIG_USB_CONFIGFS_F_MIDI=y
7863 +CONFIG_USB_CONFIGFS_F_HID=y
7864 +CONFIG_USB_CONFIGFS_F_UVC=y
7865 +CONFIG_USB_CONFIGFS_F_PRINTER=y
7866 +CONFIG_USB_CONFIGFS_F_TCM=y
7869 +# USB Gadget precomposed configurations
7871 +CONFIG_USB_ZERO=m
7872 +CONFIG_USB_AUDIO=m
7873 +CONFIG_GADGET_UAC1=y
7874 +# CONFIG_GADGET_UAC1_LEGACY is not set
7875 +CONFIG_USB_ETH=m
7876 +CONFIG_USB_ETH_RNDIS=y
7877 +CONFIG_USB_ETH_EEM=y
7878 +CONFIG_USB_G_NCM=m
7879 +CONFIG_USB_GADGETFS=m
7880 +CONFIG_USB_FUNCTIONFS=m
7881 +CONFIG_USB_FUNCTIONFS_ETH=y
7882 +CONFIG_USB_FUNCTIONFS_RNDIS=y
7883 +CONFIG_USB_FUNCTIONFS_GENERIC=y
7884 +CONFIG_USB_MASS_STORAGE=m
7885 +CONFIG_USB_GADGET_TARGET=m
7886 +CONFIG_USB_G_SERIAL=m
7887 +CONFIG_USB_MIDI_GADGET=m
7888 +CONFIG_USB_G_PRINTER=m
7889 +CONFIG_USB_CDC_COMPOSITE=m
7890 +CONFIG_USB_G_NOKIA=m
7891 +CONFIG_USB_G_ACM_MS=m
7892 +# CONFIG_USB_G_MULTI is not set
7893 +CONFIG_USB_G_HID=m
7894 +CONFIG_USB_G_DBGP=m
7895 +# CONFIG_USB_G_DBGP_PRINTK is not set
7896 +CONFIG_USB_G_DBGP_SERIAL=y
7897 +CONFIG_USB_G_WEBCAM=m
7898 +CONFIG_USB_RAW_GADGET=m
7899 +# end of USB Gadget precomposed configurations
7901 +CONFIG_TYPEC=m
7902 +CONFIG_TYPEC_TCPM=m
7903 +CONFIG_TYPEC_TCPCI=m
7904 +CONFIG_TYPEC_RT1711H=m
7905 +CONFIG_TYPEC_MT6360=m
7906 +CONFIG_TYPEC_TCPCI_MAXIM=m
7907 +CONFIG_TYPEC_FUSB302=m
7908 +# CONFIG_TYPEC_WCOVE is not set
7909 +CONFIG_TYPEC_UCSI=m
7910 +CONFIG_UCSI_CCG=m
7911 +CONFIG_UCSI_ACPI=m
7912 +CONFIG_TYPEC_HD3SS3220=m
7913 +CONFIG_TYPEC_TPS6598X=m
7914 +CONFIG_TYPEC_STUSB160X=m
7917 +# USB Type-C Multiplexer/DeMultiplexer Switch support
7919 +CONFIG_TYPEC_MUX_PI3USB30532=m
7920 +CONFIG_TYPEC_MUX_INTEL_PMC=m
7921 +# end of USB Type-C Multiplexer/DeMultiplexer Switch support
7924 +# USB Type-C Alternate Mode drivers
7926 +CONFIG_TYPEC_DP_ALTMODE=m
7927 +CONFIG_TYPEC_NVIDIA_ALTMODE=m
7928 +# end of USB Type-C Alternate Mode drivers
7930 +CONFIG_USB_ROLE_SWITCH=y
7931 +CONFIG_USB_ROLES_INTEL_XHCI=m
7932 +CONFIG_MMC=y
7933 +CONFIG_MMC_BLOCK=m
7934 +CONFIG_MMC_BLOCK_MINORS=8
7935 +CONFIG_SDIO_UART=m
7936 +# CONFIG_MMC_TEST is not set
7937 +CONFIG_MMC_CRYPTO=y
7940 +# MMC/SD/SDIO Host Controller Drivers
7942 +# CONFIG_MMC_DEBUG is not set
7943 +CONFIG_MMC_SDHCI=m
7944 +CONFIG_MMC_SDHCI_IO_ACCESSORS=y
7945 +CONFIG_MMC_SDHCI_PCI=m
7946 +CONFIG_MMC_RICOH_MMC=y
7947 +CONFIG_MMC_SDHCI_ACPI=m
7948 +CONFIG_MMC_SDHCI_PLTFM=m
7949 +CONFIG_MMC_SDHCI_F_SDH30=m
7950 +CONFIG_MMC_WBSD=m
7951 +CONFIG_MMC_ALCOR=m
7952 +CONFIG_MMC_TIFM_SD=m
7953 +CONFIG_MMC_SPI=m
7954 +CONFIG_MMC_SDRICOH_CS=m
7955 +CONFIG_MMC_CB710=m
7956 +CONFIG_MMC_VIA_SDMMC=m
7957 +CONFIG_MMC_VUB300=m
7958 +CONFIG_MMC_USHC=m
7959 +CONFIG_MMC_USDHI6ROL0=m
7960 +CONFIG_MMC_REALTEK_PCI=m
7961 +CONFIG_MMC_REALTEK_USB=m
7962 +CONFIG_MMC_CQHCI=m
7963 +# CONFIG_MMC_HSQ is not set
7964 +CONFIG_MMC_TOSHIBA_PCI=m
7965 +CONFIG_MMC_MTK=m
7966 +CONFIG_MMC_SDHCI_XENON=m
7967 +CONFIG_MEMSTICK=m
7968 +# CONFIG_MEMSTICK_DEBUG is not set
7971 +# MemoryStick drivers
7973 +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set
7974 +CONFIG_MSPRO_BLOCK=m
7975 +CONFIG_MS_BLOCK=m
7978 +# MemoryStick Host Controller Drivers
7980 +CONFIG_MEMSTICK_TIFM_MS=m
7981 +CONFIG_MEMSTICK_JMICRON_38X=m
7982 +CONFIG_MEMSTICK_R592=m
7983 +CONFIG_MEMSTICK_REALTEK_PCI=m
7984 +CONFIG_MEMSTICK_REALTEK_USB=m
7985 +CONFIG_NEW_LEDS=y
7986 +CONFIG_LEDS_CLASS=y
7987 +CONFIG_LEDS_CLASS_FLASH=m
7988 +CONFIG_LEDS_CLASS_MULTICOLOR=m
7989 +CONFIG_LEDS_BRIGHTNESS_HW_CHANGED=y
7992 +# LED drivers
7994 +CONFIG_LEDS_88PM860X=m
7995 +CONFIG_LEDS_APU=m
7996 +CONFIG_LEDS_AS3645A=m
7997 +CONFIG_LEDS_LM3530=m
7998 +CONFIG_LEDS_LM3532=m
7999 +CONFIG_LEDS_LM3533=m
8000 +CONFIG_LEDS_LM3642=m
8001 +CONFIG_LEDS_LM3601X=m
8002 +CONFIG_LEDS_MT6323=m
8003 +CONFIG_LEDS_PCA9532=m
8004 +CONFIG_LEDS_PCA9532_GPIO=y
8005 +CONFIG_LEDS_GPIO=m
8006 +CONFIG_LEDS_LP3944=m
8007 +CONFIG_LEDS_LP3952=m
8008 +CONFIG_LEDS_LP50XX=m
8009 +CONFIG_LEDS_LP8788=m
8010 +CONFIG_LEDS_CLEVO_MAIL=m
8011 +CONFIG_LEDS_PCA955X=m
8012 +CONFIG_LEDS_PCA955X_GPIO=y
8013 +CONFIG_LEDS_PCA963X=m
8014 +CONFIG_LEDS_WM831X_STATUS=m
8015 +CONFIG_LEDS_WM8350=m
8016 +CONFIG_LEDS_DA903X=m
8017 +CONFIG_LEDS_DA9052=m
8018 +CONFIG_LEDS_DAC124S085=m
8019 +CONFIG_LEDS_PWM=m
8020 +CONFIG_LEDS_REGULATOR=m
8021 +CONFIG_LEDS_BD2802=m
8022 +CONFIG_LEDS_INTEL_SS4200=m
8023 +CONFIG_LEDS_ADP5520=m
8024 +CONFIG_LEDS_MC13783=m
8025 +CONFIG_LEDS_TCA6507=m
8026 +CONFIG_LEDS_TLC591XX=m
8027 +CONFIG_LEDS_MAX8997=m
8028 +CONFIG_LEDS_LM355x=m
8029 +CONFIG_LEDS_MENF21BMC=m
8032 +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM)
8034 +CONFIG_LEDS_BLINKM=m
8035 +CONFIG_LEDS_MLXCPLD=m
8036 +CONFIG_LEDS_MLXREG=m
8037 +CONFIG_LEDS_USER=m
8038 +CONFIG_LEDS_NIC78BX=m
8039 +CONFIG_LEDS_TI_LMU_COMMON=m
8040 +CONFIG_LEDS_LM36274=m
8041 +CONFIG_LEDS_TPS6105X=m
8042 +CONFIG_LEDS_SGM3140=m
8045 +# Flash and Torch LED drivers
8047 +CONFIG_LEDS_RT8515=m
8050 +# LED Triggers
8052 +CONFIG_LEDS_TRIGGERS=y
8053 +CONFIG_LEDS_TRIGGER_TIMER=m
8054 +CONFIG_LEDS_TRIGGER_ONESHOT=m
8055 +CONFIG_LEDS_TRIGGER_DISK=y
8056 +CONFIG_LEDS_TRIGGER_MTD=y
8057 +CONFIG_LEDS_TRIGGER_HEARTBEAT=m
8058 +CONFIG_LEDS_TRIGGER_BACKLIGHT=m
8059 +CONFIG_LEDS_TRIGGER_CPU=y
8060 +CONFIG_LEDS_TRIGGER_ACTIVITY=m
8061 +CONFIG_LEDS_TRIGGER_GPIO=m
8062 +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
8065 +# iptables trigger is under Netfilter config (LED target)
8067 +CONFIG_LEDS_TRIGGER_TRANSIENT=m
8068 +CONFIG_LEDS_TRIGGER_CAMERA=m
8069 +CONFIG_LEDS_TRIGGER_PANIC=y
8070 +CONFIG_LEDS_TRIGGER_NETDEV=m
8071 +CONFIG_LEDS_TRIGGER_PATTERN=m
8072 +CONFIG_LEDS_TRIGGER_AUDIO=m
8073 +CONFIG_LEDS_TRIGGER_TTY=m
8076 +# LED Blink
8078 +CONFIG_LEDS_BLINK=y
8079 +# CONFIG_ACCESSIBILITY is not set
8080 +CONFIG_INFINIBAND=m
8081 +CONFIG_INFINIBAND_USER_MAD=m
8082 +CONFIG_INFINIBAND_USER_ACCESS=m
8083 +CONFIG_INFINIBAND_USER_MEM=y
8084 +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y
8085 +CONFIG_INFINIBAND_ADDR_TRANS=y
8086 +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y
8087 +CONFIG_INFINIBAND_VIRT_DMA=y
8088 +CONFIG_INFINIBAND_MTHCA=m
8089 +# CONFIG_INFINIBAND_MTHCA_DEBUG is not set
8090 +CONFIG_INFINIBAND_QIB=m
8091 +CONFIG_INFINIBAND_QIB_DCA=y
8092 +CONFIG_INFINIBAND_CXGB4=m
8093 +CONFIG_INFINIBAND_EFA=m
8094 +CONFIG_INFINIBAND_I40IW=m
8095 +CONFIG_MLX4_INFINIBAND=m
8096 +CONFIG_MLX5_INFINIBAND=m
8097 +CONFIG_INFINIBAND_OCRDMA=m
8098 +CONFIG_INFINIBAND_VMWARE_PVRDMA=m
8099 +CONFIG_INFINIBAND_USNIC=m
8100 +CONFIG_INFINIBAND_BNXT_RE=m
8101 +CONFIG_INFINIBAND_HFI1=m
8102 +# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set
8103 +# CONFIG_SDMA_VERBOSITY is not set
8104 +CONFIG_INFINIBAND_QEDR=m
8105 +CONFIG_INFINIBAND_RDMAVT=m
8106 +CONFIG_RDMA_RXE=m
8107 +CONFIG_RDMA_SIW=m
8108 +CONFIG_INFINIBAND_IPOIB=m
8109 +CONFIG_INFINIBAND_IPOIB_CM=y
8110 +# CONFIG_INFINIBAND_IPOIB_DEBUG is not set
8111 +CONFIG_INFINIBAND_SRP=m
8112 +CONFIG_INFINIBAND_SRPT=m
8113 +CONFIG_INFINIBAND_ISER=m
8114 +CONFIG_INFINIBAND_ISERT=m
8115 +CONFIG_INFINIBAND_RTRS=m
8116 +CONFIG_INFINIBAND_RTRS_CLIENT=m
8117 +CONFIG_INFINIBAND_RTRS_SERVER=m
8118 +CONFIG_INFINIBAND_OPA_VNIC=m
8119 +CONFIG_EDAC_ATOMIC_SCRUB=y
8120 +CONFIG_EDAC_SUPPORT=y
8121 +CONFIG_EDAC=y
8122 +# CONFIG_EDAC_LEGACY_SYSFS is not set
8123 +# CONFIG_EDAC_DEBUG is not set
8124 +CONFIG_EDAC_DECODE_MCE=m
8125 +CONFIG_EDAC_GHES=y
8126 +CONFIG_EDAC_AMD64=m
8127 +CONFIG_EDAC_E752X=m
8128 +CONFIG_EDAC_I82975X=m
8129 +CONFIG_EDAC_I3000=m
8130 +CONFIG_EDAC_I3200=m
8131 +CONFIG_EDAC_IE31200=m
8132 +CONFIG_EDAC_X38=m
8133 +CONFIG_EDAC_I5400=m
8134 +CONFIG_EDAC_I7CORE=m
8135 +CONFIG_EDAC_I5000=m
8136 +CONFIG_EDAC_I5100=m
8137 +CONFIG_EDAC_I7300=m
8138 +CONFIG_EDAC_SBRIDGE=m
8139 +CONFIG_EDAC_SKX=m
8140 +CONFIG_EDAC_I10NM=m
8141 +CONFIG_EDAC_PND2=m
8142 +CONFIG_EDAC_IGEN6=m
8143 +CONFIG_RTC_LIB=y
8144 +CONFIG_RTC_MC146818_LIB=y
8145 +CONFIG_RTC_CLASS=y
8146 +CONFIG_RTC_HCTOSYS=y
8147 +CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
8148 +CONFIG_RTC_SYSTOHC=y
8149 +CONFIG_RTC_SYSTOHC_DEVICE="rtc0"
8150 +# CONFIG_RTC_DEBUG is not set
8151 +CONFIG_RTC_NVMEM=y
8154 +# RTC interfaces
8156 +CONFIG_RTC_INTF_SYSFS=y
8157 +CONFIG_RTC_INTF_PROC=y
8158 +CONFIG_RTC_INTF_DEV=y
8159 +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
8160 +# CONFIG_RTC_DRV_TEST is not set
8163 +# I2C RTC drivers
8165 +CONFIG_RTC_DRV_88PM860X=m
8166 +CONFIG_RTC_DRV_88PM80X=m
8167 +CONFIG_RTC_DRV_ABB5ZES3=m
8168 +CONFIG_RTC_DRV_ABEOZ9=m
8169 +CONFIG_RTC_DRV_ABX80X=m
8170 +CONFIG_RTC_DRV_DS1307=m
8171 +CONFIG_RTC_DRV_DS1307_CENTURY=y
8172 +CONFIG_RTC_DRV_DS1374=m
8173 +CONFIG_RTC_DRV_DS1374_WDT=y
8174 +CONFIG_RTC_DRV_DS1672=m
8175 +CONFIG_RTC_DRV_LP8788=m
8176 +CONFIG_RTC_DRV_MAX6900=m
8177 +CONFIG_RTC_DRV_MAX8907=m
8178 +CONFIG_RTC_DRV_MAX8925=m
8179 +CONFIG_RTC_DRV_MAX8998=m
8180 +CONFIG_RTC_DRV_MAX8997=m
8181 +CONFIG_RTC_DRV_RS5C372=m
8182 +CONFIG_RTC_DRV_ISL1208=m
8183 +CONFIG_RTC_DRV_ISL12022=m
8184 +CONFIG_RTC_DRV_X1205=m
8185 +CONFIG_RTC_DRV_PCF8523=m
8186 +CONFIG_RTC_DRV_PCF85063=m
8187 +CONFIG_RTC_DRV_PCF85363=m
8188 +CONFIG_RTC_DRV_PCF8563=m
8189 +CONFIG_RTC_DRV_PCF8583=m
8190 +CONFIG_RTC_DRV_M41T80=m
8191 +CONFIG_RTC_DRV_M41T80_WDT=y
8192 +CONFIG_RTC_DRV_BQ32K=m
8193 +CONFIG_RTC_DRV_PALMAS=m
8194 +CONFIG_RTC_DRV_TPS6586X=m
8195 +CONFIG_RTC_DRV_TPS65910=m
8196 +CONFIG_RTC_DRV_TPS80031=m
8197 +CONFIG_RTC_DRV_RC5T583=m
8198 +CONFIG_RTC_DRV_S35390A=m
8199 +CONFIG_RTC_DRV_FM3130=m
8200 +CONFIG_RTC_DRV_RX8010=m
8201 +CONFIG_RTC_DRV_RX8581=m
8202 +CONFIG_RTC_DRV_RX8025=m
8203 +CONFIG_RTC_DRV_EM3027=m
8204 +CONFIG_RTC_DRV_RV3028=m
8205 +CONFIG_RTC_DRV_RV3032=m
8206 +CONFIG_RTC_DRV_RV8803=m
8207 +CONFIG_RTC_DRV_S5M=m
8208 +CONFIG_RTC_DRV_SD3078=m
8211 +# SPI RTC drivers
8213 +CONFIG_RTC_DRV_M41T93=m
8214 +CONFIG_RTC_DRV_M41T94=m
8215 +CONFIG_RTC_DRV_DS1302=m
8216 +CONFIG_RTC_DRV_DS1305=m
8217 +CONFIG_RTC_DRV_DS1343=m
8218 +CONFIG_RTC_DRV_DS1347=m
8219 +CONFIG_RTC_DRV_DS1390=m
8220 +CONFIG_RTC_DRV_MAX6916=m
8221 +CONFIG_RTC_DRV_R9701=m
8222 +CONFIG_RTC_DRV_RX4581=m
8223 +CONFIG_RTC_DRV_RS5C348=m
8224 +CONFIG_RTC_DRV_MAX6902=m
8225 +CONFIG_RTC_DRV_PCF2123=m
8226 +CONFIG_RTC_DRV_MCP795=m
8227 +CONFIG_RTC_I2C_AND_SPI=y
8230 +# SPI and I2C RTC drivers
8232 +CONFIG_RTC_DRV_DS3232=m
8233 +CONFIG_RTC_DRV_DS3232_HWMON=y
8234 +CONFIG_RTC_DRV_PCF2127=m
8235 +CONFIG_RTC_DRV_RV3029C2=m
8236 +CONFIG_RTC_DRV_RV3029_HWMON=y
8237 +CONFIG_RTC_DRV_RX6110=m
8240 +# Platform RTC drivers
8242 +CONFIG_RTC_DRV_CMOS=y
8243 +CONFIG_RTC_DRV_DS1286=m
8244 +CONFIG_RTC_DRV_DS1511=m
8245 +CONFIG_RTC_DRV_DS1553=m
8246 +CONFIG_RTC_DRV_DS1685_FAMILY=m
8247 +CONFIG_RTC_DRV_DS1685=y
8248 +# CONFIG_RTC_DRV_DS1689 is not set
8249 +# CONFIG_RTC_DRV_DS17285 is not set
8250 +# CONFIG_RTC_DRV_DS17485 is not set
8251 +# CONFIG_RTC_DRV_DS17885 is not set
8252 +CONFIG_RTC_DRV_DS1742=m
8253 +CONFIG_RTC_DRV_DS2404=m
8254 +CONFIG_RTC_DRV_DA9052=m
8255 +CONFIG_RTC_DRV_DA9055=m
8256 +CONFIG_RTC_DRV_DA9063=m
8257 +CONFIG_RTC_DRV_STK17TA8=m
8258 +CONFIG_RTC_DRV_M48T86=m
8259 +CONFIG_RTC_DRV_M48T35=m
8260 +CONFIG_RTC_DRV_M48T59=m
8261 +CONFIG_RTC_DRV_MSM6242=m
8262 +CONFIG_RTC_DRV_BQ4802=m
8263 +CONFIG_RTC_DRV_RP5C01=m
8264 +CONFIG_RTC_DRV_V3020=m
8265 +CONFIG_RTC_DRV_WM831X=m
8266 +CONFIG_RTC_DRV_WM8350=m
8267 +CONFIG_RTC_DRV_PCF50633=m
8268 +CONFIG_RTC_DRV_CROS_EC=m
8271 +# on-CPU RTC drivers
8273 +CONFIG_RTC_DRV_FTRTC010=m
8274 +CONFIG_RTC_DRV_PCAP=m
8275 +CONFIG_RTC_DRV_MC13XXX=m
8276 +CONFIG_RTC_DRV_MT6397=m
8279 +# HID Sensor RTC drivers
8281 +CONFIG_RTC_DRV_HID_SENSOR_TIME=m
8282 +CONFIG_RTC_DRV_WILCO_EC=m
8283 +CONFIG_DMADEVICES=y
8284 +# CONFIG_DMADEVICES_DEBUG is not set
8287 +# DMA Devices
8289 +CONFIG_DMA_ENGINE=y
8290 +CONFIG_DMA_VIRTUAL_CHANNELS=y
8291 +CONFIG_DMA_ACPI=y
8292 +CONFIG_ALTERA_MSGDMA=m
8293 +CONFIG_INTEL_IDMA64=m
8294 +CONFIG_INTEL_IDXD=m
8295 +CONFIG_INTEL_IDXD_SVM=y
8296 +CONFIG_INTEL_IOATDMA=m
8297 +CONFIG_PLX_DMA=m
8298 +CONFIG_XILINX_ZYNQMP_DPDMA=m
8299 +CONFIG_QCOM_HIDMA_MGMT=m
8300 +CONFIG_QCOM_HIDMA=m
8301 +CONFIG_DW_DMAC_CORE=m
8302 +CONFIG_DW_DMAC=m
8303 +CONFIG_DW_DMAC_PCI=m
8304 +CONFIG_DW_EDMA=m
8305 +CONFIG_DW_EDMA_PCIE=m
8306 +CONFIG_HSU_DMA=m
8307 +CONFIG_SF_PDMA=m
8308 +CONFIG_INTEL_LDMA=y
8311 +# DMA Clients
8313 +CONFIG_ASYNC_TX_DMA=y
8314 +# CONFIG_DMATEST is not set
8315 +CONFIG_DMA_ENGINE_RAID=y
8318 +# DMABUF options
8320 +CONFIG_SYNC_FILE=y
8321 +CONFIG_SW_SYNC=y
8322 +CONFIG_UDMABUF=y
8323 +# CONFIG_DMABUF_MOVE_NOTIFY is not set
8324 +# CONFIG_DMABUF_DEBUG is not set
8325 +# CONFIG_DMABUF_SELFTESTS is not set
8326 +CONFIG_DMABUF_HEAPS=y
8327 +CONFIG_DMABUF_HEAPS_SYSTEM=y
8328 +# end of DMABUF options
8330 +CONFIG_DCA=m
8331 +CONFIG_AUXDISPLAY=y
8332 +CONFIG_CHARLCD=m
8333 +CONFIG_HD44780_COMMON=m
8334 +CONFIG_HD44780=m
8335 +CONFIG_KS0108=m
8336 +CONFIG_KS0108_PORT=0x378
8337 +CONFIG_KS0108_DELAY=2
8338 +CONFIG_CFAG12864B=m
8339 +CONFIG_CFAG12864B_RATE=20
8340 +CONFIG_IMG_ASCII_LCD=m
8341 +CONFIG_LCD2S=m
8342 +CONFIG_PARPORT_PANEL=m
8343 +CONFIG_PANEL_PARPORT=0
8344 +CONFIG_PANEL_PROFILE=5
8345 +# CONFIG_PANEL_CHANGE_MESSAGE is not set
8346 +# CONFIG_CHARLCD_BL_OFF is not set
8347 +# CONFIG_CHARLCD_BL_ON is not set
8348 +CONFIG_CHARLCD_BL_FLASH=y
8349 +CONFIG_PANEL=m
8350 +CONFIG_UIO=m
8351 +CONFIG_UIO_CIF=m
8352 +CONFIG_UIO_PDRV_GENIRQ=m
8353 +CONFIG_UIO_DMEM_GENIRQ=m
8354 +CONFIG_UIO_AEC=m
8355 +CONFIG_UIO_SERCOS3=m
8356 +CONFIG_UIO_PCI_GENERIC=m
8357 +CONFIG_UIO_NETX=m
8358 +CONFIG_UIO_PRUSS=m
8359 +CONFIG_UIO_MF624=m
8360 +CONFIG_UIO_HV_GENERIC=m
8361 +CONFIG_VFIO_IOMMU_TYPE1=y
8362 +CONFIG_VFIO_VIRQFD=y
8363 +CONFIG_VFIO=y
8364 +CONFIG_VFIO_NOIOMMU=y
8365 +CONFIG_VFIO_PCI=y
8366 +CONFIG_VFIO_PCI_VGA=y
8367 +CONFIG_VFIO_PCI_MMAP=y
8368 +CONFIG_VFIO_PCI_INTX=y
8369 +CONFIG_VFIO_PCI_IGD=y
8370 +CONFIG_VFIO_MDEV=m
8371 +CONFIG_VFIO_MDEV_DEVICE=m
8372 +CONFIG_IRQ_BYPASS_MANAGER=y
8373 +CONFIG_VIRT_DRIVERS=y
8374 +CONFIG_VBOXGUEST=m
8375 +CONFIG_NITRO_ENCLAVES=m
8376 +CONFIG_ACRN_HSM=m
8377 +CONFIG_VIRTIO=y
8378 +CONFIG_VIRTIO_PCI_LIB=y
8379 +CONFIG_VIRTIO_MENU=y
8380 +CONFIG_VIRTIO_PCI=y
8381 +CONFIG_VIRTIO_PCI_LEGACY=y
8382 +CONFIG_VIRTIO_VDPA=m
8383 +CONFIG_VIRTIO_PMEM=m
8384 +CONFIG_VIRTIO_BALLOON=y
8385 +CONFIG_VIRTIO_MEM=m
8386 +CONFIG_VIRTIO_INPUT=m
8387 +CONFIG_VIRTIO_MMIO=y
8388 +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
8389 +CONFIG_VIRTIO_DMA_SHARED_BUFFER=m
8390 +CONFIG_VDPA=m
8391 +CONFIG_VDPA_SIM=m
8392 +CONFIG_VDPA_SIM_NET=m
8393 +CONFIG_IFCVF=m
8394 +CONFIG_MLX5_VDPA=y
8395 +CONFIG_MLX5_VDPA_NET=m
8396 +CONFIG_VHOST_IOTLB=m
8397 +CONFIG_VHOST_RING=m
8398 +CONFIG_VHOST=m
8399 +CONFIG_VHOST_MENU=y
8400 +CONFIG_VHOST_NET=m
8401 +CONFIG_VHOST_SCSI=m
8402 +CONFIG_VHOST_VSOCK=m
8403 +CONFIG_VHOST_VDPA=m
8404 +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set
8407 +# Microsoft Hyper-V guest support
8409 +CONFIG_HYPERV=m
8410 +CONFIG_HYPERV_TIMER=y
8411 +CONFIG_HYPERV_UTILS=m
8412 +CONFIG_HYPERV_BALLOON=m
8413 +# end of Microsoft Hyper-V guest support
8416 +# Xen driver support
8418 +CONFIG_XEN_BALLOON=y
8419 +CONFIG_XEN_BALLOON_MEMORY_HOTPLUG=y
8420 +CONFIG_XEN_MEMORY_HOTPLUG_LIMIT=512
8421 +CONFIG_XEN_SCRUB_PAGES_DEFAULT=y
8422 +CONFIG_XEN_DEV_EVTCHN=m
8423 +CONFIG_XEN_BACKEND=y
8424 +CONFIG_XENFS=m
8425 +CONFIG_XEN_COMPAT_XENFS=y
8426 +CONFIG_XEN_SYS_HYPERVISOR=y
8427 +CONFIG_XEN_XENBUS_FRONTEND=y
8428 +CONFIG_XEN_GNTDEV=m
8429 +CONFIG_XEN_GNTDEV_DMABUF=y
8430 +CONFIG_XEN_GRANT_DEV_ALLOC=m
8431 +CONFIG_XEN_GRANT_DMA_ALLOC=y
8432 +CONFIG_SWIOTLB_XEN=y
8433 +CONFIG_XEN_PCIDEV_BACKEND=m
8434 +CONFIG_XEN_PVCALLS_FRONTEND=m
8435 +# CONFIG_XEN_PVCALLS_BACKEND is not set
8436 +CONFIG_XEN_SCSI_BACKEND=m
8437 +CONFIG_XEN_PRIVCMD=m
8438 +CONFIG_XEN_ACPI_PROCESSOR=y
8439 +CONFIG_XEN_MCE_LOG=y
8440 +CONFIG_XEN_HAVE_PVMMU=y
8441 +CONFIG_XEN_EFI=y
8442 +CONFIG_XEN_AUTO_XLATE=y
8443 +CONFIG_XEN_ACPI=y
8444 +CONFIG_XEN_SYMS=y
8445 +CONFIG_XEN_HAVE_VPMU=y
8446 +CONFIG_XEN_FRONT_PGDIR_SHBUF=m
8447 +CONFIG_XEN_UNPOPULATED_ALLOC=y
8448 +# end of Xen driver support
8450 +CONFIG_GREYBUS=m
8451 +CONFIG_GREYBUS_ES2=m
8452 +CONFIG_STAGING=y
8453 +CONFIG_PRISM2_USB=m
8454 +CONFIG_COMEDI=m
8455 +# CONFIG_COMEDI_DEBUG is not set
8456 +CONFIG_COMEDI_DEFAULT_BUF_SIZE_KB=2048
8457 +CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB=20480
8458 +CONFIG_COMEDI_MISC_DRIVERS=y
8459 +CONFIG_COMEDI_BOND=m
8460 +CONFIG_COMEDI_TEST=m
8461 +CONFIG_COMEDI_PARPORT=m
8462 +CONFIG_COMEDI_ISA_DRIVERS=y
8463 +CONFIG_COMEDI_PCL711=m
8464 +CONFIG_COMEDI_PCL724=m
8465 +CONFIG_COMEDI_PCL726=m
8466 +CONFIG_COMEDI_PCL730=m
8467 +CONFIG_COMEDI_PCL812=m
8468 +CONFIG_COMEDI_PCL816=m
8469 +CONFIG_COMEDI_PCL818=m
8470 +CONFIG_COMEDI_PCM3724=m
8471 +CONFIG_COMEDI_AMPLC_DIO200_ISA=m
8472 +CONFIG_COMEDI_AMPLC_PC236_ISA=m
8473 +CONFIG_COMEDI_AMPLC_PC263_ISA=m
8474 +CONFIG_COMEDI_RTI800=m
8475 +CONFIG_COMEDI_RTI802=m
8476 +CONFIG_COMEDI_DAC02=m
8477 +CONFIG_COMEDI_DAS16M1=m
8478 +CONFIG_COMEDI_DAS08_ISA=m
8479 +CONFIG_COMEDI_DAS16=m
8480 +CONFIG_COMEDI_DAS800=m
8481 +CONFIG_COMEDI_DAS1800=m
8482 +CONFIG_COMEDI_DAS6402=m
8483 +CONFIG_COMEDI_DT2801=m
8484 +CONFIG_COMEDI_DT2811=m
8485 +CONFIG_COMEDI_DT2814=m
8486 +CONFIG_COMEDI_DT2815=m
8487 +CONFIG_COMEDI_DT2817=m
8488 +CONFIG_COMEDI_DT282X=m
8489 +CONFIG_COMEDI_DMM32AT=m
8490 +CONFIG_COMEDI_FL512=m
8491 +CONFIG_COMEDI_AIO_AIO12_8=m
8492 +CONFIG_COMEDI_AIO_IIRO_16=m
8493 +CONFIG_COMEDI_II_PCI20KC=m
8494 +CONFIG_COMEDI_C6XDIGIO=m
8495 +CONFIG_COMEDI_MPC624=m
8496 +CONFIG_COMEDI_ADQ12B=m
8497 +CONFIG_COMEDI_NI_AT_A2150=m
8498 +CONFIG_COMEDI_NI_AT_AO=m
8499 +CONFIG_COMEDI_NI_ATMIO=m
8500 +CONFIG_COMEDI_NI_ATMIO16D=m
8501 +CONFIG_COMEDI_NI_LABPC_ISA=m
8502 +CONFIG_COMEDI_PCMAD=m
8503 +CONFIG_COMEDI_PCMDA12=m
8504 +CONFIG_COMEDI_PCMMIO=m
8505 +CONFIG_COMEDI_PCMUIO=m
8506 +CONFIG_COMEDI_MULTIQ3=m
8507 +CONFIG_COMEDI_S526=m
8508 +CONFIG_COMEDI_PCI_DRIVERS=m
8509 +CONFIG_COMEDI_8255_PCI=m
8510 +CONFIG_COMEDI_ADDI_WATCHDOG=m
8511 +CONFIG_COMEDI_ADDI_APCI_1032=m
8512 +CONFIG_COMEDI_ADDI_APCI_1500=m
8513 +CONFIG_COMEDI_ADDI_APCI_1516=m
8514 +CONFIG_COMEDI_ADDI_APCI_1564=m
8515 +CONFIG_COMEDI_ADDI_APCI_16XX=m
8516 +CONFIG_COMEDI_ADDI_APCI_2032=m
8517 +CONFIG_COMEDI_ADDI_APCI_2200=m
8518 +CONFIG_COMEDI_ADDI_APCI_3120=m
8519 +CONFIG_COMEDI_ADDI_APCI_3501=m
8520 +CONFIG_COMEDI_ADDI_APCI_3XXX=m
8521 +CONFIG_COMEDI_ADL_PCI6208=m
8522 +CONFIG_COMEDI_ADL_PCI7X3X=m
8523 +CONFIG_COMEDI_ADL_PCI8164=m
8524 +CONFIG_COMEDI_ADL_PCI9111=m
8525 +CONFIG_COMEDI_ADL_PCI9118=m
8526 +CONFIG_COMEDI_ADV_PCI1710=m
8527 +CONFIG_COMEDI_ADV_PCI1720=m
8528 +CONFIG_COMEDI_ADV_PCI1723=m
8529 +CONFIG_COMEDI_ADV_PCI1724=m
8530 +CONFIG_COMEDI_ADV_PCI1760=m
8531 +CONFIG_COMEDI_ADV_PCI_DIO=m
8532 +CONFIG_COMEDI_AMPLC_DIO200_PCI=m
8533 +CONFIG_COMEDI_AMPLC_PC236_PCI=m
8534 +CONFIG_COMEDI_AMPLC_PC263_PCI=m
8535 +CONFIG_COMEDI_AMPLC_PCI224=m
8536 +CONFIG_COMEDI_AMPLC_PCI230=m
8537 +CONFIG_COMEDI_CONTEC_PCI_DIO=m
8538 +CONFIG_COMEDI_DAS08_PCI=m
8539 +CONFIG_COMEDI_DT3000=m
8540 +CONFIG_COMEDI_DYNA_PCI10XX=m
8541 +CONFIG_COMEDI_GSC_HPDI=m
8542 +CONFIG_COMEDI_MF6X4=m
8543 +CONFIG_COMEDI_ICP_MULTI=m
8544 +CONFIG_COMEDI_DAQBOARD2000=m
8545 +CONFIG_COMEDI_JR3_PCI=m
8546 +CONFIG_COMEDI_KE_COUNTER=m
8547 +CONFIG_COMEDI_CB_PCIDAS64=m
8548 +CONFIG_COMEDI_CB_PCIDAS=m
8549 +CONFIG_COMEDI_CB_PCIDDA=m
8550 +CONFIG_COMEDI_CB_PCIMDAS=m
8551 +CONFIG_COMEDI_CB_PCIMDDA=m
8552 +CONFIG_COMEDI_ME4000=m
8553 +CONFIG_COMEDI_ME_DAQ=m
8554 +CONFIG_COMEDI_NI_6527=m
8555 +CONFIG_COMEDI_NI_65XX=m
8556 +CONFIG_COMEDI_NI_660X=m
8557 +CONFIG_COMEDI_NI_670X=m
8558 +CONFIG_COMEDI_NI_LABPC_PCI=m
8559 +CONFIG_COMEDI_NI_PCIDIO=m
8560 +CONFIG_COMEDI_NI_PCIMIO=m
8561 +CONFIG_COMEDI_RTD520=m
8562 +CONFIG_COMEDI_S626=m
8563 +CONFIG_COMEDI_MITE=m
8564 +CONFIG_COMEDI_NI_TIOCMD=m
8565 +CONFIG_COMEDI_PCMCIA_DRIVERS=m
8566 +CONFIG_COMEDI_CB_DAS16_CS=m
8567 +CONFIG_COMEDI_DAS08_CS=m
8568 +CONFIG_COMEDI_NI_DAQ_700_CS=m
8569 +CONFIG_COMEDI_NI_DAQ_DIO24_CS=m
8570 +CONFIG_COMEDI_NI_LABPC_CS=m
8571 +CONFIG_COMEDI_NI_MIO_CS=m
8572 +CONFIG_COMEDI_QUATECH_DAQP_CS=m
8573 +CONFIG_COMEDI_USB_DRIVERS=m
8574 +CONFIG_COMEDI_DT9812=m
8575 +CONFIG_COMEDI_NI_USB6501=m
8576 +CONFIG_COMEDI_USBDUX=m
8577 +CONFIG_COMEDI_USBDUXFAST=m
8578 +CONFIG_COMEDI_USBDUXSIGMA=m
8579 +CONFIG_COMEDI_VMK80XX=m
8580 +CONFIG_COMEDI_8254=m
8581 +CONFIG_COMEDI_8255=m
8582 +CONFIG_COMEDI_8255_SA=m
8583 +CONFIG_COMEDI_KCOMEDILIB=m
8584 +CONFIG_COMEDI_AMPLC_DIO200=m
8585 +CONFIG_COMEDI_AMPLC_PC236=m
8586 +CONFIG_COMEDI_DAS08=m
8587 +CONFIG_COMEDI_ISADMA=m
8588 +CONFIG_COMEDI_NI_LABPC=m
8589 +CONFIG_COMEDI_NI_LABPC_ISADMA=m
8590 +CONFIG_COMEDI_NI_TIO=m
8591 +CONFIG_COMEDI_NI_ROUTING=m
8592 +CONFIG_RTL8192U=m
8593 +CONFIG_RTLLIB=m
8594 +CONFIG_RTLLIB_CRYPTO_CCMP=m
8595 +CONFIG_RTLLIB_CRYPTO_TKIP=m
8596 +CONFIG_RTLLIB_CRYPTO_WEP=m
8597 +CONFIG_RTL8192E=m
8598 +CONFIG_RTL8723BS=m
8599 +CONFIG_R8712U=m
8600 +CONFIG_R8188EU=m
8601 +CONFIG_88EU_AP_MODE=y
8602 +CONFIG_RTS5208=m
8603 +CONFIG_VT6655=m
8604 +CONFIG_VT6656=m
8607 +# IIO staging drivers
8611 +# Accelerometers
8613 +CONFIG_ADIS16203=m
8614 +CONFIG_ADIS16240=m
8615 +# end of Accelerometers
8618 +# Analog to digital converters
8620 +CONFIG_AD7816=m
8621 +CONFIG_AD7280=m
8622 +# end of Analog to digital converters
8625 +# Analog digital bi-direction converters
8627 +CONFIG_ADT7316=m
8628 +CONFIG_ADT7316_SPI=m
8629 +CONFIG_ADT7316_I2C=m
8630 +# end of Analog digital bi-direction converters
8633 +# Capacitance to digital converters
8635 +CONFIG_AD7150=m
8636 +CONFIG_AD7746=m
8637 +# end of Capacitance to digital converters
8640 +# Direct Digital Synthesis
8642 +CONFIG_AD9832=m
8643 +CONFIG_AD9834=m
8644 +# end of Direct Digital Synthesis
8647 +# Network Analyzer, Impedance Converters
8649 +CONFIG_AD5933=m
8650 +# end of Network Analyzer, Impedance Converters
8653 +# Active energy metering IC
8655 +CONFIG_ADE7854=m
8656 +CONFIG_ADE7854_I2C=m
8657 +CONFIG_ADE7854_SPI=m
8658 +# end of Active energy metering IC
8661 +# Resolver to digital converters
8663 +CONFIG_AD2S1210=m
8664 +# end of Resolver to digital converters
8665 +# end of IIO staging drivers
8667 +CONFIG_FB_SM750=m
8668 +CONFIG_STAGING_MEDIA=y
8669 +CONFIG_INTEL_ATOMISP=y
8670 +CONFIG_VIDEO_ATOMISP=m
8671 +# CONFIG_VIDEO_ATOMISP_ISP2401 is not set
8672 +CONFIG_VIDEO_ATOMISP_OV2722=m
8673 +CONFIG_VIDEO_ATOMISP_GC2235=m
8674 +CONFIG_VIDEO_ATOMISP_MSRLIST_HELPER=m
8675 +CONFIG_VIDEO_ATOMISP_MT9M114=m
8676 +CONFIG_VIDEO_ATOMISP_GC0310=m
8677 +CONFIG_VIDEO_ATOMISP_OV2680=m
8678 +CONFIG_VIDEO_ATOMISP_OV5693=m
8679 +CONFIG_VIDEO_ATOMISP_LM3554=m
8680 +CONFIG_VIDEO_ZORAN=m
8681 +CONFIG_VIDEO_ZORAN_DC30=m
8682 +CONFIG_VIDEO_ZORAN_ZR36060=m
8683 +CONFIG_VIDEO_ZORAN_BUZ=m
8684 +CONFIG_VIDEO_ZORAN_DC10=m
8685 +CONFIG_VIDEO_ZORAN_LML33=m
8686 +CONFIG_VIDEO_ZORAN_LML33R10=m
8687 +CONFIG_VIDEO_ZORAN_AVS6EYES=m
8688 +CONFIG_VIDEO_IPU3_IMGU=m
8691 +# Android
8693 +CONFIG_ASHMEM=m
8694 +# end of Android
8696 +CONFIG_LTE_GDM724X=m
8697 +CONFIG_FIREWIRE_SERIAL=m
8698 +CONFIG_FWTTY_MAX_TOTAL_PORTS=64
8699 +CONFIG_FWTTY_MAX_CARD_PORTS=32
8700 +CONFIG_GS_FPGABOOT=m
8701 +CONFIG_UNISYSSPAR=y
8702 +CONFIG_UNISYS_VISORNIC=m
8703 +CONFIG_UNISYS_VISORINPUT=m
8704 +CONFIG_UNISYS_VISORHBA=m
8705 +CONFIG_FB_TFT=m
8706 +CONFIG_FB_TFT_AGM1264K_FL=m
8707 +CONFIG_FB_TFT_BD663474=m
8708 +CONFIG_FB_TFT_HX8340BN=m
8709 +CONFIG_FB_TFT_HX8347D=m
8710 +CONFIG_FB_TFT_HX8353D=m
8711 +CONFIG_FB_TFT_HX8357D=m
8712 +CONFIG_FB_TFT_ILI9163=m
8713 +CONFIG_FB_TFT_ILI9320=m
8714 +CONFIG_FB_TFT_ILI9325=m
8715 +CONFIG_FB_TFT_ILI9340=m
8716 +CONFIG_FB_TFT_ILI9341=m
8717 +CONFIG_FB_TFT_ILI9481=m
8718 +CONFIG_FB_TFT_ILI9486=m
8719 +CONFIG_FB_TFT_PCD8544=m
8720 +CONFIG_FB_TFT_RA8875=m
8721 +CONFIG_FB_TFT_S6D02A1=m
8722 +CONFIG_FB_TFT_S6D1121=m
8723 +CONFIG_FB_TFT_SEPS525=m
8724 +CONFIG_FB_TFT_SH1106=m
8725 +CONFIG_FB_TFT_SSD1289=m
8726 +CONFIG_FB_TFT_SSD1305=m
8727 +CONFIG_FB_TFT_SSD1306=m
8728 +CONFIG_FB_TFT_SSD1331=m
8729 +CONFIG_FB_TFT_SSD1351=m
8730 +CONFIG_FB_TFT_ST7735R=m
8731 +CONFIG_FB_TFT_ST7789V=m
8732 +CONFIG_FB_TFT_TINYLCD=m
8733 +CONFIG_FB_TFT_TLS8204=m
8734 +CONFIG_FB_TFT_UC1611=m
8735 +CONFIG_FB_TFT_UC1701=m
8736 +CONFIG_FB_TFT_UPD161704=m
8737 +CONFIG_FB_TFT_WATTEROTT=m
8738 +CONFIG_MOST_COMPONENTS=m
8739 +CONFIG_MOST_NET=m
8740 +CONFIG_MOST_SOUND=m
8741 +CONFIG_MOST_VIDEO=m
8742 +CONFIG_MOST_I2C=m
8743 +CONFIG_KS7010=m
8744 +CONFIG_GREYBUS_AUDIO=m
8745 +CONFIG_GREYBUS_AUDIO_APB_CODEC=m
8746 +CONFIG_GREYBUS_BOOTROM=m
8747 +CONFIG_GREYBUS_FIRMWARE=m
8748 +CONFIG_GREYBUS_HID=m
8749 +CONFIG_GREYBUS_LIGHT=m
8750 +CONFIG_GREYBUS_LOG=m
8751 +CONFIG_GREYBUS_LOOPBACK=m
8752 +CONFIG_GREYBUS_POWER=m
8753 +CONFIG_GREYBUS_RAW=m
8754 +CONFIG_GREYBUS_VIBRATOR=m
8755 +CONFIG_GREYBUS_BRIDGED_PHY=m
8756 +CONFIG_GREYBUS_GPIO=m
8757 +CONFIG_GREYBUS_I2C=m
8758 +CONFIG_GREYBUS_PWM=m
8759 +CONFIG_GREYBUS_SDIO=m
8760 +CONFIG_GREYBUS_SPI=m
8761 +CONFIG_GREYBUS_UART=m
8762 +CONFIG_GREYBUS_USB=m
8763 +CONFIG_PI433=m
8766 +# Gasket devices
8768 +CONFIG_STAGING_GASKET_FRAMEWORK=m
8769 +CONFIG_STAGING_APEX_DRIVER=m
8770 +# end of Gasket devices
8772 +CONFIG_FIELDBUS_DEV=m
8773 +CONFIG_KPC2000=y
8774 +CONFIG_KPC2000_CORE=m
8775 +CONFIG_KPC2000_SPI=m
8776 +CONFIG_KPC2000_I2C=m
8777 +CONFIG_KPC2000_DMA=m
8778 +CONFIG_QLGE=m
8779 +CONFIG_WIMAX=m
8780 +CONFIG_WIMAX_DEBUG_LEVEL=8
8781 +CONFIG_WIMAX_I2400M=m
8782 +CONFIG_WIMAX_I2400M_USB=m
8783 +CONFIG_WIMAX_I2400M_DEBUG_LEVEL=8
8784 +CONFIG_WFX=m
8785 +CONFIG_SPMI_HISI3670=m
8786 +CONFIG_X86_PLATFORM_DEVICES=y
8787 +CONFIG_ACPI_WMI=m
8788 +CONFIG_WMI_BMOF=m
8789 +CONFIG_HUAWEI_WMI=m
8790 +CONFIG_UV_SYSFS=m
8791 +CONFIG_INTEL_WMI_SBL_FW_UPDATE=m
8792 +CONFIG_INTEL_WMI_THUNDERBOLT=m
8793 +CONFIG_MXM_WMI=m
8794 +CONFIG_PEAQ_WMI=m
8795 +CONFIG_XIAOMI_WMI=m
8796 +CONFIG_ACERHDF=m
8797 +CONFIG_ACER_WIRELESS=m
8798 +CONFIG_ACER_WMI=m
8799 +CONFIG_AMD_PMC=m
8800 +CONFIG_APPLE_GMUX=m
8801 +CONFIG_ASUS_LAPTOP=m
8802 +CONFIG_ASUS_WIRELESS=m
8803 +CONFIG_ASUS_WMI=m
8804 +CONFIG_ASUS_NB_WMI=m
8805 +CONFIG_EEEPC_LAPTOP=m
8806 +CONFIG_EEEPC_WMI=m
8807 +CONFIG_X86_PLATFORM_DRIVERS_DELL=y
8808 +CONFIG_ALIENWARE_WMI=m
8809 +CONFIG_DCDBAS=m
8810 +CONFIG_DELL_LAPTOP=m
8811 +CONFIG_DELL_RBU=m
8812 +CONFIG_DELL_RBTN=m
8813 +CONFIG_DELL_SMBIOS=m
8814 +CONFIG_DELL_SMBIOS_WMI=y
8815 +CONFIG_DELL_SMBIOS_SMM=y
8816 +CONFIG_DELL_SMO8800=m
8817 +CONFIG_DELL_WMI=m
8818 +CONFIG_DELL_WMI_AIO=m
8819 +CONFIG_DELL_WMI_DESCRIPTOR=m
8820 +CONFIG_DELL_WMI_LED=m
8821 +CONFIG_DELL_WMI_SYSMAN=m
8822 +CONFIG_AMILO_RFKILL=m
8823 +CONFIG_FUJITSU_LAPTOP=m
8824 +CONFIG_FUJITSU_TABLET=m
8825 +CONFIG_GPD_POCKET_FAN=m
8826 +CONFIG_HP_ACCEL=m
8827 +CONFIG_HP_WIRELESS=m
8828 +CONFIG_HP_WMI=m
8829 +CONFIG_IBM_RTL=m
8830 +CONFIG_IDEAPAD_LAPTOP=m
8831 +CONFIG_SENSORS_HDAPS=m
8832 +CONFIG_THINKPAD_ACPI=m
8833 +CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y
8834 +CONFIG_THINKPAD_ACPI_DEBUGFACILITIES=y
8835 +# CONFIG_THINKPAD_ACPI_DEBUG is not set
8836 +# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set
8837 +CONFIG_THINKPAD_ACPI_VIDEO=y
8838 +CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y
8839 +CONFIG_INTEL_ATOMISP2_LED=m
8840 +CONFIG_INTEL_CHT_INT33FE=m
8841 +CONFIG_INTEL_HID_EVENT=m
8842 +CONFIG_INTEL_INT0002_VGPIO=m
8843 +CONFIG_INTEL_MENLOW=m
8844 +CONFIG_INTEL_OAKTRAIL=m
8845 +CONFIG_INTEL_VBTN=m
8846 +CONFIG_MSI_LAPTOP=m
8847 +CONFIG_MSI_WMI=m
8848 +CONFIG_PCENGINES_APU2=m
8849 +CONFIG_SAMSUNG_LAPTOP=m
8850 +CONFIG_SAMSUNG_Q10=m
8851 +CONFIG_ACPI_TOSHIBA=m
8852 +CONFIG_TOSHIBA_BT_RFKILL=m
8853 +CONFIG_TOSHIBA_HAPS=m
8854 +# CONFIG_TOSHIBA_WMI is not set
8855 +CONFIG_ACPI_CMPC=m
8856 +CONFIG_COMPAL_LAPTOP=m
8857 +CONFIG_LG_LAPTOP=m
8858 +CONFIG_PANASONIC_LAPTOP=m
8859 +CONFIG_SONY_LAPTOP=m
8860 +CONFIG_SONYPI_COMPAT=y
8861 +CONFIG_SYSTEM76_ACPI=m
8862 +CONFIG_TOPSTAR_LAPTOP=m
8863 +CONFIG_I2C_MULTI_INSTANTIATE=m
8864 +CONFIG_MLX_PLATFORM=m
8865 +CONFIG_TOUCHSCREEN_DMI=y
8866 +CONFIG_INTEL_IPS=m
8867 +CONFIG_INTEL_RST=m
8868 +CONFIG_INTEL_SMARTCONNECT=m
8871 +# Intel Speed Select Technology interface support
8873 +CONFIG_INTEL_SPEED_SELECT_INTERFACE=m
8874 +# end of Intel Speed Select Technology interface support
8876 +CONFIG_INTEL_TURBO_MAX_3=y
8877 +CONFIG_INTEL_UNCORE_FREQ_CONTROL=m
8878 +CONFIG_INTEL_BXTWC_PMIC_TMU=m
8879 +CONFIG_INTEL_CHTDC_TI_PWRBTN=m
8880 +CONFIG_INTEL_MRFLD_PWRBTN=m
8881 +CONFIG_INTEL_PMC_CORE=y
8882 +CONFIG_INTEL_PMT_CLASS=m
8883 +CONFIG_INTEL_PMT_TELEMETRY=m
8884 +CONFIG_INTEL_PMT_CRASHLOG=m
8885 +CONFIG_INTEL_PUNIT_IPC=m
8886 +CONFIG_INTEL_SCU_IPC=y
8887 +CONFIG_INTEL_SCU=y
8888 +CONFIG_INTEL_SCU_PCI=y
8889 +CONFIG_INTEL_SCU_PLATFORM=m
8890 +CONFIG_INTEL_SCU_IPC_UTIL=m
8891 +CONFIG_INTEL_TELEMETRY=m
8892 +CONFIG_PMC_ATOM=y
8893 +CONFIG_CHROME_PLATFORMS=y
8894 +CONFIG_CHROMEOS_LAPTOP=m
8895 +CONFIG_CHROMEOS_PSTORE=m
8896 +CONFIG_CHROMEOS_TBMC=m
8897 +CONFIG_CROS_EC=m
8898 +CONFIG_CROS_EC_I2C=m
8899 +CONFIG_CROS_EC_ISHTP=m
8900 +CONFIG_CROS_EC_SPI=m
8901 +CONFIG_CROS_EC_LPC=m
8902 +CONFIG_CROS_EC_PROTO=y
8903 +CONFIG_CROS_KBD_LED_BACKLIGHT=m
8904 +CONFIG_CROS_EC_CHARDEV=m
8905 +CONFIG_CROS_EC_LIGHTBAR=m
8906 +CONFIG_CROS_EC_DEBUGFS=m
8907 +CONFIG_CROS_EC_SENSORHUB=m
8908 +CONFIG_CROS_EC_SYSFS=m
8909 +CONFIG_CROS_EC_TYPEC=m
8910 +CONFIG_CROS_USBPD_LOGGER=m
8911 +CONFIG_CROS_USBPD_NOTIFY=m
8912 +CONFIG_WILCO_EC=m
8913 +CONFIG_WILCO_EC_DEBUGFS=m
8914 +CONFIG_WILCO_EC_EVENTS=m
8915 +CONFIG_WILCO_EC_TELEMETRY=m
8916 +CONFIG_MELLANOX_PLATFORM=y
8917 +CONFIG_MLXREG_HOTPLUG=m
8918 +CONFIG_MLXREG_IO=m
8919 +CONFIG_SURFACE_PLATFORMS=y
8920 +CONFIG_SURFACE3_WMI=m
8921 +CONFIG_SURFACE_3_BUTTON=m
8922 +CONFIG_SURFACE_3_POWER_OPREGION=m
8923 +CONFIG_SURFACE_ACPI_NOTIFY=m
8924 +CONFIG_SURFACE_AGGREGATOR_CDEV=m
8925 +CONFIG_SURFACE_GPE=m
8926 +CONFIG_SURFACE_HOTPLUG=m
8927 +CONFIG_SURFACE_PRO3_BUTTON=m
8928 +CONFIG_SURFACE_AGGREGATOR=m
8929 +CONFIG_SURFACE_AGGREGATOR_BUS=y
8930 +# CONFIG_SURFACE_AGGREGATOR_ERROR_INJECTION is not set
8931 +CONFIG_HAVE_CLK=y
8932 +CONFIG_CLKDEV_LOOKUP=y
8933 +CONFIG_HAVE_CLK_PREPARE=y
8934 +CONFIG_COMMON_CLK=y
8935 +CONFIG_COMMON_CLK_WM831X=m
8936 +CONFIG_COMMON_CLK_MAX9485=m
8937 +CONFIG_COMMON_CLK_SI5341=m
8938 +CONFIG_COMMON_CLK_SI5351=m
8939 +CONFIG_COMMON_CLK_SI544=m
8940 +CONFIG_COMMON_CLK_CDCE706=m
8941 +CONFIG_COMMON_CLK_CS2000_CP=m
8942 +CONFIG_COMMON_CLK_S2MPS11=m
8943 +CONFIG_CLK_TWL6040=m
8944 +CONFIG_COMMON_CLK_PALMAS=m
8945 +CONFIG_COMMON_CLK_PWM=m
8946 +CONFIG_XILINX_VCU=m
8947 +CONFIG_HWSPINLOCK=y
8950 +# Clock Source drivers
8952 +CONFIG_CLKEVT_I8253=y
8953 +CONFIG_I8253_LOCK=y
8954 +CONFIG_CLKBLD_I8253=y
8955 +# end of Clock Source drivers
8957 +CONFIG_MAILBOX=y
8958 +CONFIG_PCC=y
8959 +CONFIG_ALTERA_MBOX=m
8960 +CONFIG_IOMMU_IOVA=y
8961 +CONFIG_IOASID=y
8962 +CONFIG_IOMMU_API=y
8963 +CONFIG_IOMMU_SUPPORT=y
8966 +# Generic IOMMU Pagetable Support
8968 +CONFIG_IOMMU_IO_PGTABLE=y
8969 +# end of Generic IOMMU Pagetable Support
8971 +# CONFIG_IOMMU_DEBUGFS is not set
8972 +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set
8973 +CONFIG_IOMMU_DMA=y
8974 +CONFIG_AMD_IOMMU=y
8975 +CONFIG_AMD_IOMMU_V2=m
8976 +CONFIG_DMAR_TABLE=y
8977 +CONFIG_INTEL_IOMMU=y
8978 +CONFIG_INTEL_IOMMU_SVM=y
8979 +# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set
8980 +CONFIG_INTEL_IOMMU_FLOPPY_WA=y
8981 +# CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON is not set
8982 +CONFIG_IRQ_REMAP=y
8983 +CONFIG_HYPERV_IOMMU=y
8986 +# Remoteproc drivers
8988 +CONFIG_REMOTEPROC=y
8989 +CONFIG_REMOTEPROC_CDEV=y
8990 +# end of Remoteproc drivers
8993 +# Rpmsg drivers
8995 +CONFIG_RPMSG=m
8996 +CONFIG_RPMSG_CHAR=m
8997 +CONFIG_RPMSG_NS=m
8998 +CONFIG_RPMSG_QCOM_GLINK=m
8999 +CONFIG_RPMSG_QCOM_GLINK_RPM=m
9000 +CONFIG_RPMSG_VIRTIO=m
9001 +# end of Rpmsg drivers
9003 +CONFIG_SOUNDWIRE=m
9006 +# SoundWire Devices
9008 +CONFIG_SOUNDWIRE_CADENCE=m
9009 +CONFIG_SOUNDWIRE_INTEL=m
9010 +CONFIG_SOUNDWIRE_QCOM=m
9011 +CONFIG_SOUNDWIRE_GENERIC_ALLOCATION=m
9014 +# SOC (System On Chip) specific Drivers
9018 +# Amlogic SoC drivers
9020 +# end of Amlogic SoC drivers
9023 +# Broadcom SoC drivers
9025 +# end of Broadcom SoC drivers
9028 +# NXP/Freescale QorIQ SoC drivers
9030 +# end of NXP/Freescale QorIQ SoC drivers
9033 +# i.MX SoC drivers
9035 +# end of i.MX SoC drivers
9038 +# Enable LiteX SoC Builder specific drivers
9040 +# end of Enable LiteX SoC Builder specific drivers
9043 +# Qualcomm SoC drivers
9045 +CONFIG_QCOM_QMI_HELPERS=m
9046 +# end of Qualcomm SoC drivers
9048 +CONFIG_SOC_TI=y
9051 +# Xilinx SoC drivers
9053 +# end of Xilinx SoC drivers
9054 +# end of SOC (System On Chip) specific Drivers
9056 +CONFIG_PM_DEVFREQ=y
9059 +# DEVFREQ Governors
9061 +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
9062 +CONFIG_DEVFREQ_GOV_PERFORMANCE=y
9063 +CONFIG_DEVFREQ_GOV_POWERSAVE=y
9064 +CONFIG_DEVFREQ_GOV_USERSPACE=y
9065 +CONFIG_DEVFREQ_GOV_PASSIVE=y
9068 +# DEVFREQ Drivers
9070 +CONFIG_PM_DEVFREQ_EVENT=y
9071 +CONFIG_EXTCON=y
9074 +# Extcon Device Drivers
9076 +CONFIG_EXTCON_ADC_JACK=m
9077 +CONFIG_EXTCON_ARIZONA=m
9078 +CONFIG_EXTCON_AXP288=m
9079 +CONFIG_EXTCON_FSA9480=m
9080 +CONFIG_EXTCON_GPIO=m
9081 +CONFIG_EXTCON_INTEL_INT3496=m
9082 +CONFIG_EXTCON_INTEL_CHT_WC=m
9083 +CONFIG_EXTCON_INTEL_MRFLD=m
9084 +CONFIG_EXTCON_MAX14577=m
9085 +CONFIG_EXTCON_MAX3355=m
9086 +CONFIG_EXTCON_MAX77693=m
9087 +CONFIG_EXTCON_MAX77843=m
9088 +CONFIG_EXTCON_MAX8997=m
9089 +CONFIG_EXTCON_PALMAS=m
9090 +CONFIG_EXTCON_PTN5150=m
9091 +CONFIG_EXTCON_RT8973A=m
9092 +CONFIG_EXTCON_SM5502=m
9093 +CONFIG_EXTCON_USB_GPIO=m
9094 +CONFIG_EXTCON_USBC_CROS_EC=m
9095 +CONFIG_EXTCON_USBC_TUSB320=m
9096 +CONFIG_MEMORY=y
9097 +CONFIG_FPGA_DFL_EMIF=m
9098 +CONFIG_IIO=m
9099 +CONFIG_IIO_BUFFER=y
9100 +CONFIG_IIO_BUFFER_CB=m
9101 +CONFIG_IIO_BUFFER_DMA=m
9102 +CONFIG_IIO_BUFFER_DMAENGINE=m
9103 +CONFIG_IIO_BUFFER_HW_CONSUMER=m
9104 +CONFIG_IIO_KFIFO_BUF=m
9105 +CONFIG_IIO_TRIGGERED_BUFFER=m
9106 +CONFIG_IIO_CONFIGFS=m
9107 +CONFIG_IIO_TRIGGER=y
9108 +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2
9109 +CONFIG_IIO_SW_DEVICE=m
9110 +CONFIG_IIO_SW_TRIGGER=m
9111 +CONFIG_IIO_TRIGGERED_EVENT=m
9114 +# Accelerometers
9116 +CONFIG_ADIS16201=m
9117 +CONFIG_ADIS16209=m
9118 +CONFIG_ADXL372=m
9119 +CONFIG_ADXL372_SPI=m
9120 +CONFIG_ADXL372_I2C=m
9121 +CONFIG_BMA220=m
9122 +CONFIG_BMA400=m
9123 +CONFIG_BMA400_I2C=m
9124 +CONFIG_BMA400_SPI=m
9125 +CONFIG_BMC150_ACCEL=m
9126 +CONFIG_BMC150_ACCEL_I2C=m
9127 +CONFIG_BMC150_ACCEL_SPI=m
9128 +CONFIG_DA280=m
9129 +CONFIG_DA311=m
9130 +CONFIG_DMARD09=m
9131 +CONFIG_DMARD10=m
9132 +CONFIG_HID_SENSOR_ACCEL_3D=m
9133 +CONFIG_IIO_CROS_EC_ACCEL_LEGACY=m
9134 +CONFIG_IIO_ST_ACCEL_3AXIS=m
9135 +CONFIG_IIO_ST_ACCEL_I2C_3AXIS=m
9136 +CONFIG_IIO_ST_ACCEL_SPI_3AXIS=m
9137 +CONFIG_KXSD9=m
9138 +CONFIG_KXSD9_SPI=m
9139 +CONFIG_KXSD9_I2C=m
9140 +CONFIG_KXCJK1013=m
9141 +CONFIG_MC3230=m
9142 +CONFIG_MMA7455=m
9143 +CONFIG_MMA7455_I2C=m
9144 +CONFIG_MMA7455_SPI=m
9145 +CONFIG_MMA7660=m
9146 +CONFIG_MMA8452=m
9147 +CONFIG_MMA9551_CORE=m
9148 +CONFIG_MMA9551=m
9149 +CONFIG_MMA9553=m
9150 +CONFIG_MXC4005=m
9151 +CONFIG_MXC6255=m
9152 +CONFIG_SCA3000=m
9153 +CONFIG_STK8312=m
9154 +CONFIG_STK8BA50=m
9155 +# end of Accelerometers
9158 +# Analog to digital converters
9160 +CONFIG_AD_SIGMA_DELTA=m
9161 +CONFIG_AD7091R5=m
9162 +CONFIG_AD7124=m
9163 +CONFIG_AD7192=m
9164 +CONFIG_AD7266=m
9165 +CONFIG_AD7291=m
9166 +CONFIG_AD7292=m
9167 +CONFIG_AD7298=m
9168 +CONFIG_AD7476=m
9169 +CONFIG_AD7606=m
9170 +CONFIG_AD7606_IFACE_PARALLEL=m
9171 +CONFIG_AD7606_IFACE_SPI=m
9172 +CONFIG_AD7766=m
9173 +CONFIG_AD7768_1=m
9174 +CONFIG_AD7780=m
9175 +CONFIG_AD7791=m
9176 +CONFIG_AD7793=m
9177 +CONFIG_AD7887=m
9178 +CONFIG_AD7923=m
9179 +CONFIG_AD7949=m
9180 +CONFIG_AD799X=m
9181 +CONFIG_AD9467=m
9182 +CONFIG_ADI_AXI_ADC=m
9183 +CONFIG_AXP20X_ADC=m
9184 +CONFIG_AXP288_ADC=m
9185 +CONFIG_CC10001_ADC=m
9186 +CONFIG_DA9150_GPADC=m
9187 +CONFIG_DLN2_ADC=m
9188 +CONFIG_HI8435=m
9189 +CONFIG_HX711=m
9190 +CONFIG_INA2XX_ADC=m
9191 +CONFIG_INTEL_MRFLD_ADC=m
9192 +CONFIG_LP8788_ADC=m
9193 +CONFIG_LTC2471=m
9194 +CONFIG_LTC2485=m
9195 +CONFIG_LTC2496=m
9196 +CONFIG_LTC2497=m
9197 +CONFIG_MAX1027=m
9198 +CONFIG_MAX11100=m
9199 +CONFIG_MAX1118=m
9200 +CONFIG_MAX1241=m
9201 +CONFIG_MAX1363=m
9202 +CONFIG_MAX9611=m
9203 +CONFIG_MCP320X=m
9204 +CONFIG_MCP3422=m
9205 +CONFIG_MCP3911=m
9206 +CONFIG_MEDIATEK_MT6360_ADC=m
9207 +CONFIG_MEN_Z188_ADC=m
9208 +CONFIG_MP2629_ADC=m
9209 +CONFIG_NAU7802=m
9210 +CONFIG_PALMAS_GPADC=m
9211 +CONFIG_QCOM_VADC_COMMON=m
9212 +CONFIG_QCOM_SPMI_IADC=m
9213 +CONFIG_QCOM_SPMI_VADC=m
9214 +CONFIG_QCOM_SPMI_ADC5=m
9215 +CONFIG_STX104=m
9216 +CONFIG_TI_ADC081C=m
9217 +CONFIG_TI_ADC0832=m
9218 +CONFIG_TI_ADC084S021=m
9219 +CONFIG_TI_ADC12138=m
9220 +CONFIG_TI_ADC108S102=m
9221 +CONFIG_TI_ADC128S052=m
9222 +CONFIG_TI_ADC161S626=m
9223 +CONFIG_TI_ADS1015=m
9224 +CONFIG_TI_ADS7950=m
9225 +CONFIG_TI_AM335X_ADC=m
9226 +CONFIG_TI_TLC4541=m
9227 +CONFIG_TWL4030_MADC=m
9228 +CONFIG_TWL6030_GPADC=m
9229 +CONFIG_VIPERBOARD_ADC=m
9230 +CONFIG_XILINX_XADC=m
9231 +# end of Analog to digital converters
9234 +# Analog Front Ends
9236 +# end of Analog Front Ends
9239 +# Amplifiers
9241 +CONFIG_AD8366=m
9242 +CONFIG_HMC425=m
9243 +# end of Amplifiers
9246 +# Chemical Sensors
9248 +CONFIG_ATLAS_PH_SENSOR=m
9249 +CONFIG_ATLAS_EZO_SENSOR=m
9250 +CONFIG_BME680=m
9251 +CONFIG_BME680_I2C=m
9252 +CONFIG_BME680_SPI=m
9253 +CONFIG_CCS811=m
9254 +CONFIG_IAQCORE=m
9255 +CONFIG_PMS7003=m
9256 +CONFIG_SCD30_CORE=m
9257 +CONFIG_SCD30_I2C=m
9258 +CONFIG_SCD30_SERIAL=m
9259 +CONFIG_SENSIRION_SGP30=m
9260 +CONFIG_SPS30=m
9261 +CONFIG_VZ89X=m
9262 +# end of Chemical Sensors
9264 +CONFIG_IIO_CROS_EC_SENSORS_CORE=m
9265 +CONFIG_IIO_CROS_EC_SENSORS=m
9266 +CONFIG_IIO_CROS_EC_SENSORS_LID_ANGLE=m
9269 +# Hid Sensor IIO Common
9271 +CONFIG_HID_SENSOR_IIO_COMMON=m
9272 +CONFIG_HID_SENSOR_IIO_TRIGGER=m
9273 +# end of Hid Sensor IIO Common
9275 +CONFIG_IIO_MS_SENSORS_I2C=m
9278 +# SSP Sensor Common
9280 +CONFIG_IIO_SSP_SENSORS_COMMONS=m
9281 +CONFIG_IIO_SSP_SENSORHUB=m
9282 +# end of SSP Sensor Common
9284 +CONFIG_IIO_ST_SENSORS_I2C=m
9285 +CONFIG_IIO_ST_SENSORS_SPI=m
9286 +CONFIG_IIO_ST_SENSORS_CORE=m
9289 +# Digital to analog converters
9291 +CONFIG_AD5064=m
9292 +CONFIG_AD5360=m
9293 +CONFIG_AD5380=m
9294 +CONFIG_AD5421=m
9295 +CONFIG_AD5446=m
9296 +CONFIG_AD5449=m
9297 +CONFIG_AD5592R_BASE=m
9298 +CONFIG_AD5592R=m
9299 +CONFIG_AD5593R=m
9300 +CONFIG_AD5504=m
9301 +CONFIG_AD5624R_SPI=m
9302 +CONFIG_AD5686=m
9303 +CONFIG_AD5686_SPI=m
9304 +CONFIG_AD5696_I2C=m
9305 +CONFIG_AD5755=m
9306 +CONFIG_AD5758=m
9307 +CONFIG_AD5761=m
9308 +CONFIG_AD5764=m
9309 +CONFIG_AD5766=m
9310 +CONFIG_AD5770R=m
9311 +CONFIG_AD5791=m
9312 +CONFIG_AD7303=m
9313 +CONFIG_AD8801=m
9314 +CONFIG_CIO_DAC=m
9315 +CONFIG_DS4424=m
9316 +CONFIG_LTC1660=m
9317 +CONFIG_LTC2632=m
9318 +CONFIG_M62332=m
9319 +CONFIG_MAX517=m
9320 +CONFIG_MCP4725=m
9321 +CONFIG_MCP4922=m
9322 +CONFIG_TI_DAC082S085=m
9323 +CONFIG_TI_DAC5571=m
9324 +CONFIG_TI_DAC7311=m
9325 +CONFIG_TI_DAC7612=m
9326 +# end of Digital to analog converters
9329 +# IIO dummy driver
9331 +CONFIG_IIO_SIMPLE_DUMMY=m
9332 +# CONFIG_IIO_SIMPLE_DUMMY_EVENTS is not set
9333 +# CONFIG_IIO_SIMPLE_DUMMY_BUFFER is not set
9334 +# end of IIO dummy driver
9337 +# Frequency Synthesizers DDS/PLL
9341 +# Clock Generator/Distribution
9343 +CONFIG_AD9523=m
9344 +# end of Clock Generator/Distribution
9347 +# Phase-Locked Loop (PLL) frequency synthesizers
9349 +CONFIG_ADF4350=m
9350 +CONFIG_ADF4371=m
9351 +# end of Phase-Locked Loop (PLL) frequency synthesizers
9352 +# end of Frequency Synthesizers DDS/PLL
9355 +# Digital gyroscope sensors
9357 +CONFIG_ADIS16080=m
9358 +CONFIG_ADIS16130=m
9359 +CONFIG_ADIS16136=m
9360 +CONFIG_ADIS16260=m
9361 +CONFIG_ADXRS290=m
9362 +CONFIG_ADXRS450=m
9363 +CONFIG_BMG160=m
9364 +CONFIG_BMG160_I2C=m
9365 +CONFIG_BMG160_SPI=m
9366 +CONFIG_FXAS21002C=m
9367 +CONFIG_FXAS21002C_I2C=m
9368 +CONFIG_FXAS21002C_SPI=m
9369 +CONFIG_HID_SENSOR_GYRO_3D=m
9370 +CONFIG_MPU3050=m
9371 +CONFIG_MPU3050_I2C=m
9372 +CONFIG_IIO_ST_GYRO_3AXIS=m
9373 +CONFIG_IIO_ST_GYRO_I2C_3AXIS=m
9374 +CONFIG_IIO_ST_GYRO_SPI_3AXIS=m
9375 +CONFIG_ITG3200=m
9376 +# end of Digital gyroscope sensors
9379 +# Health Sensors
9383 +# Heart Rate Monitors
9385 +CONFIG_AFE4403=m
9386 +CONFIG_AFE4404=m
9387 +CONFIG_MAX30100=m
9388 +CONFIG_MAX30102=m
9389 +# end of Heart Rate Monitors
9390 +# end of Health Sensors
9393 +# Humidity sensors
9395 +CONFIG_AM2315=m
9396 +CONFIG_DHT11=m
9397 +CONFIG_HDC100X=m
9398 +CONFIG_HDC2010=m
9399 +CONFIG_HID_SENSOR_HUMIDITY=m
9400 +CONFIG_HTS221=m
9401 +CONFIG_HTS221_I2C=m
9402 +CONFIG_HTS221_SPI=m
9403 +CONFIG_HTU21=m
9404 +CONFIG_SI7005=m
9405 +CONFIG_SI7020=m
9406 +# end of Humidity sensors
9409 +# Inertial measurement units
9411 +CONFIG_ADIS16400=m
9412 +CONFIG_ADIS16460=m
9413 +CONFIG_ADIS16475=m
9414 +CONFIG_ADIS16480=m
9415 +CONFIG_BMI160=m
9416 +CONFIG_BMI160_I2C=m
9417 +CONFIG_BMI160_SPI=m
9418 +CONFIG_FXOS8700=m
9419 +CONFIG_FXOS8700_I2C=m
9420 +CONFIG_FXOS8700_SPI=m
9421 +CONFIG_KMX61=m
9422 +CONFIG_INV_ICM42600=m
9423 +CONFIG_INV_ICM42600_I2C=m
9424 +CONFIG_INV_ICM42600_SPI=m
9425 +CONFIG_INV_MPU6050_IIO=m
9426 +CONFIG_INV_MPU6050_I2C=m
9427 +CONFIG_INV_MPU6050_SPI=m
9428 +CONFIG_IIO_ST_LSM6DSX=m
9429 +CONFIG_IIO_ST_LSM6DSX_I2C=m
9430 +CONFIG_IIO_ST_LSM6DSX_SPI=m
9431 +CONFIG_IIO_ST_LSM6DSX_I3C=m
9432 +# end of Inertial measurement units
9434 +CONFIG_IIO_ADIS_LIB=m
9435 +CONFIG_IIO_ADIS_LIB_BUFFER=y
9438 +# Light sensors
9440 +CONFIG_ACPI_ALS=m
9441 +CONFIG_ADJD_S311=m
9442 +CONFIG_ADUX1020=m
9443 +CONFIG_AL3010=m
9444 +CONFIG_AL3320A=m
9445 +CONFIG_APDS9300=m
9446 +CONFIG_APDS9960=m
9447 +CONFIG_AS73211=m
9448 +CONFIG_BH1750=m
9449 +CONFIG_BH1780=m
9450 +CONFIG_CM32181=m
9451 +CONFIG_CM3232=m
9452 +CONFIG_CM3323=m
9453 +CONFIG_CM36651=m
9454 +CONFIG_IIO_CROS_EC_LIGHT_PROX=m
9455 +CONFIG_GP2AP002=m
9456 +CONFIG_GP2AP020A00F=m
9457 +CONFIG_IQS621_ALS=m
9458 +CONFIG_SENSORS_ISL29018=m
9459 +CONFIG_SENSORS_ISL29028=m
9460 +CONFIG_ISL29125=m
9461 +CONFIG_HID_SENSOR_ALS=m
9462 +CONFIG_HID_SENSOR_PROX=m
9463 +CONFIG_JSA1212=m
9464 +CONFIG_RPR0521=m
9465 +CONFIG_SENSORS_LM3533=m
9466 +CONFIG_LTR501=m
9467 +CONFIG_LV0104CS=m
9468 +CONFIG_MAX44000=m
9469 +CONFIG_MAX44009=m
9470 +CONFIG_NOA1305=m
9471 +CONFIG_OPT3001=m
9472 +CONFIG_PA12203001=m
9473 +CONFIG_SI1133=m
9474 +CONFIG_SI1145=m
9475 +CONFIG_STK3310=m
9476 +CONFIG_ST_UVIS25=m
9477 +CONFIG_ST_UVIS25_I2C=m
9478 +CONFIG_ST_UVIS25_SPI=m
9479 +CONFIG_TCS3414=m
9480 +CONFIG_TCS3472=m
9481 +CONFIG_SENSORS_TSL2563=m
9482 +CONFIG_TSL2583=m
9483 +CONFIG_TSL2772=m
9484 +CONFIG_TSL4531=m
9485 +CONFIG_US5182D=m
9486 +CONFIG_VCNL4000=m
9487 +CONFIG_VCNL4035=m
9488 +CONFIG_VEML6030=m
9489 +CONFIG_VEML6070=m
9490 +CONFIG_VL6180=m
9491 +CONFIG_ZOPT2201=m
9492 +# end of Light sensors
9495 +# Magnetometer sensors
9497 +CONFIG_AK8975=m
9498 +CONFIG_AK09911=m
9499 +CONFIG_BMC150_MAGN=m
9500 +CONFIG_BMC150_MAGN_I2C=m
9501 +CONFIG_BMC150_MAGN_SPI=m
9502 +CONFIG_MAG3110=m
9503 +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m
9504 +CONFIG_MMC35240=m
9505 +CONFIG_IIO_ST_MAGN_3AXIS=m
9506 +CONFIG_IIO_ST_MAGN_I2C_3AXIS=m
9507 +CONFIG_IIO_ST_MAGN_SPI_3AXIS=m
9508 +CONFIG_SENSORS_HMC5843=m
9509 +CONFIG_SENSORS_HMC5843_I2C=m
9510 +CONFIG_SENSORS_HMC5843_SPI=m
9511 +CONFIG_SENSORS_RM3100=m
9512 +CONFIG_SENSORS_RM3100_I2C=m
9513 +CONFIG_SENSORS_RM3100_SPI=m
9514 +CONFIG_YAMAHA_YAS530=m
9515 +# end of Magnetometer sensors
9518 +# Multiplexers
9520 +# end of Multiplexers
9523 +# Inclinometer sensors
9525 +CONFIG_HID_SENSOR_INCLINOMETER_3D=m
9526 +CONFIG_HID_SENSOR_DEVICE_ROTATION=m
9527 +# end of Inclinometer sensors
9530 +# Triggers - standalone
9532 +CONFIG_IIO_HRTIMER_TRIGGER=m
9533 +CONFIG_IIO_INTERRUPT_TRIGGER=m
9534 +CONFIG_IIO_TIGHTLOOP_TRIGGER=m
9535 +CONFIG_IIO_SYSFS_TRIGGER=m
9536 +# end of Triggers - standalone
9539 +# Linear and angular position sensors
9541 +CONFIG_IQS624_POS=m
9542 +CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE=m
9543 +# end of Linear and angular position sensors
9546 +# Digital potentiometers
9548 +CONFIG_AD5272=m
9549 +CONFIG_DS1803=m
9550 +CONFIG_MAX5432=m
9551 +CONFIG_MAX5481=m
9552 +CONFIG_MAX5487=m
9553 +CONFIG_MCP4018=m
9554 +CONFIG_MCP4131=m
9555 +CONFIG_MCP4531=m
9556 +CONFIG_MCP41010=m
9557 +CONFIG_TPL0102=m
9558 +# end of Digital potentiometers
9561 +# Digital potentiostats
9563 +CONFIG_LMP91000=m
9564 +# end of Digital potentiostats
9567 +# Pressure sensors
9569 +CONFIG_ABP060MG=m
9570 +CONFIG_BMP280=m
9571 +CONFIG_BMP280_I2C=m
9572 +CONFIG_BMP280_SPI=m
9573 +CONFIG_IIO_CROS_EC_BARO=m
9574 +CONFIG_DLHL60D=m
9575 +CONFIG_DPS310=m
9576 +CONFIG_HID_SENSOR_PRESS=m
9577 +CONFIG_HP03=m
9578 +CONFIG_ICP10100=m
9579 +CONFIG_MPL115=m
9580 +CONFIG_MPL115_I2C=m
9581 +CONFIG_MPL115_SPI=m
9582 +CONFIG_MPL3115=m
9583 +CONFIG_MS5611=m
9584 +CONFIG_MS5611_I2C=m
9585 +CONFIG_MS5611_SPI=m
9586 +CONFIG_MS5637=m
9587 +CONFIG_IIO_ST_PRESS=m
9588 +CONFIG_IIO_ST_PRESS_I2C=m
9589 +CONFIG_IIO_ST_PRESS_SPI=m
9590 +CONFIG_T5403=m
9591 +CONFIG_HP206C=m
9592 +CONFIG_ZPA2326=m
9593 +CONFIG_ZPA2326_I2C=m
9594 +CONFIG_ZPA2326_SPI=m
9595 +# end of Pressure sensors
9598 +# Lightning sensors
9600 +CONFIG_AS3935=m
9601 +# end of Lightning sensors
9604 +# Proximity and distance sensors
9606 +CONFIG_ISL29501=m
9607 +CONFIG_LIDAR_LITE_V2=m
9608 +CONFIG_MB1232=m
9609 +CONFIG_PING=m
9610 +CONFIG_RFD77402=m
9611 +CONFIG_SRF04=m
9612 +CONFIG_SX9310=m
9613 +CONFIG_SX9500=m
9614 +CONFIG_SRF08=m
9615 +CONFIG_VCNL3020=m
9616 +CONFIG_VL53L0X_I2C=m
9617 +# end of Proximity and distance sensors
9620 +# Resolver to digital converters
9622 +CONFIG_AD2S90=m
9623 +CONFIG_AD2S1200=m
9624 +# end of Resolver to digital converters
9627 +# Temperature sensors
9629 +CONFIG_IQS620AT_TEMP=m
9630 +CONFIG_LTC2983=m
9631 +CONFIG_MAXIM_THERMOCOUPLE=m
9632 +CONFIG_HID_SENSOR_TEMP=m
9633 +CONFIG_MLX90614=m
9634 +CONFIG_MLX90632=m
9635 +CONFIG_TMP006=m
9636 +CONFIG_TMP007=m
9637 +CONFIG_TSYS01=m
9638 +CONFIG_TSYS02D=m
9639 +CONFIG_MAX31856=m
9640 +# end of Temperature sensors
9642 +CONFIG_NTB=m
9643 +CONFIG_NTB_MSI=y
9644 +# CONFIG_NTB_AMD is not set
9645 +CONFIG_NTB_IDT=m
9646 +CONFIG_NTB_INTEL=m
9647 +CONFIG_NTB_EPF=m
9648 +CONFIG_NTB_SWITCHTEC=m
9649 +CONFIG_NTB_PINGPONG=m
9650 +CONFIG_NTB_TOOL=m
9651 +CONFIG_NTB_PERF=m
9652 +# CONFIG_NTB_MSI_TEST is not set
9653 +CONFIG_NTB_TRANSPORT=m
9654 +CONFIG_VME_BUS=y
9657 +# VME Bridge Drivers
9659 +CONFIG_VME_CA91CX42=m
9660 +CONFIG_VME_TSI148=m
9661 +CONFIG_VME_FAKE=m
9664 +# VME Board Drivers
9666 +CONFIG_VMIVME_7805=m
9669 +# VME Device Drivers
9671 +CONFIG_VME_USER=m
9672 +CONFIG_PWM=y
9673 +CONFIG_PWM_SYSFS=y
9674 +# CONFIG_PWM_DEBUG is not set
9675 +CONFIG_PWM_CRC=y
9676 +CONFIG_PWM_CROS_EC=m
9677 +CONFIG_PWM_DWC=m
9678 +CONFIG_PWM_IQS620A=m
9679 +CONFIG_PWM_LP3943=m
9680 +CONFIG_PWM_LPSS=y
9681 +CONFIG_PWM_LPSS_PCI=y
9682 +CONFIG_PWM_LPSS_PLATFORM=y
9683 +CONFIG_PWM_PCA9685=m
9684 +CONFIG_PWM_TWL=m
9685 +CONFIG_PWM_TWL_LED=m
9688 +# IRQ chip support
9690 +CONFIG_MADERA_IRQ=m
9691 +# end of IRQ chip support
9693 +CONFIG_IPACK_BUS=m
9694 +CONFIG_BOARD_TPCI200=m
9695 +CONFIG_SERIAL_IPOCTAL=m
9696 +CONFIG_RESET_CONTROLLER=y
9697 +CONFIG_RESET_BRCMSTB_RESCAL=y
9698 +CONFIG_RESET_TI_SYSCON=m
9701 +# PHY Subsystem
9703 +CONFIG_GENERIC_PHY=y
9704 +CONFIG_USB_LGM_PHY=m
9705 +CONFIG_BCM_KONA_USB2_PHY=m
9706 +CONFIG_PHY_PXA_28NM_HSIC=m
9707 +CONFIG_PHY_PXA_28NM_USB2=m
9708 +CONFIG_PHY_CPCAP_USB=m
9709 +CONFIG_PHY_QCOM_USB_HS=m
9710 +CONFIG_PHY_QCOM_USB_HSIC=m
9711 +CONFIG_PHY_SAMSUNG_USB2=m
9712 +CONFIG_PHY_TUSB1210=m
9713 +CONFIG_PHY_INTEL_LGM_EMMC=m
9714 +# end of PHY Subsystem
9716 +CONFIG_POWERCAP=y
9717 +CONFIG_INTEL_RAPL_CORE=m
9718 +CONFIG_INTEL_RAPL=m
9719 +CONFIG_IDLE_INJECT=y
9720 +CONFIG_DTPM=y
9721 +CONFIG_DTPM_CPU=y
9722 +CONFIG_MCB=m
9723 +CONFIG_MCB_PCI=m
9724 +CONFIG_MCB_LPC=m
9727 +# Performance monitor support
9729 +# end of Performance monitor support
9731 +CONFIG_RAS=y
9732 +CONFIG_RAS_CEC=y
9733 +# CONFIG_RAS_CEC_DEBUG is not set
9734 +CONFIG_USB4=m
9735 +# CONFIG_USB4_DEBUGFS_WRITE is not set
9736 +# CONFIG_USB4_DMA_TEST is not set
9739 +# Android
9741 +CONFIG_ANDROID=y
9742 +CONFIG_ANDROID_BINDER_IPC=m
9743 +CONFIG_ANDROID_BINDERFS=m
9744 +CONFIG_ANDROID_BINDER_DEVICES="binder,hwbinder,vndbinder"
9745 +# CONFIG_ANDROID_BINDER_IPC_SELFTEST is not set
9746 +# end of Android
9748 +CONFIG_LIBNVDIMM=y
9749 +CONFIG_BLK_DEV_PMEM=m
9750 +CONFIG_ND_BLK=m
9751 +CONFIG_ND_CLAIM=y
9752 +CONFIG_ND_BTT=m
9753 +CONFIG_BTT=y
9754 +CONFIG_ND_PFN=m
9755 +CONFIG_NVDIMM_PFN=y
9756 +CONFIG_NVDIMM_DAX=y
9757 +CONFIG_NVDIMM_KEYS=y
9758 +CONFIG_DAX_DRIVER=y
9759 +CONFIG_DAX=y
9760 +CONFIG_DEV_DAX=m
9761 +CONFIG_DEV_DAX_PMEM=m
9762 +CONFIG_DEV_DAX_HMEM=m
9763 +CONFIG_DEV_DAX_HMEM_DEVICES=y
9764 +CONFIG_DEV_DAX_KMEM=m
9765 +CONFIG_DEV_DAX_PMEM_COMPAT=m
9766 +CONFIG_NVMEM=y
9767 +CONFIG_NVMEM_SYSFS=y
9768 +CONFIG_NVMEM_SPMI_SDAM=m
9769 +CONFIG_RAVE_SP_EEPROM=m
9770 +CONFIG_NVMEM_RMEM=m
9773 +# HW tracing support
9775 +CONFIG_STM=m
9776 +CONFIG_STM_PROTO_BASIC=m
9777 +CONFIG_STM_PROTO_SYS_T=m
9778 +CONFIG_STM_DUMMY=m
9779 +CONFIG_STM_SOURCE_CONSOLE=m
9780 +CONFIG_STM_SOURCE_HEARTBEAT=m
9781 +CONFIG_INTEL_TH=m
9782 +CONFIG_INTEL_TH_PCI=m
9783 +CONFIG_INTEL_TH_ACPI=m
9784 +CONFIG_INTEL_TH_GTH=m
9785 +CONFIG_INTEL_TH_STH=m
9786 +CONFIG_INTEL_TH_MSU=m
9787 +CONFIG_INTEL_TH_PTI=m
9788 +# CONFIG_INTEL_TH_DEBUG is not set
9789 +# end of HW tracing support
9791 +CONFIG_FPGA=m
9792 +CONFIG_ALTERA_PR_IP_CORE=m
9793 +CONFIG_FPGA_MGR_ALTERA_PS_SPI=m
9794 +CONFIG_FPGA_MGR_ALTERA_CVP=m
9795 +CONFIG_FPGA_MGR_XILINX_SPI=m
9796 +CONFIG_FPGA_MGR_MACHXO2_SPI=m
9797 +CONFIG_FPGA_BRIDGE=m
9798 +CONFIG_ALTERA_FREEZE_BRIDGE=m
9799 +CONFIG_XILINX_PR_DECOUPLER=m
9800 +CONFIG_FPGA_REGION=m
9801 +CONFIG_FPGA_DFL=m
9802 +CONFIG_FPGA_DFL_FME=m
9803 +CONFIG_FPGA_DFL_FME_MGR=m
9804 +CONFIG_FPGA_DFL_FME_BRIDGE=m
9805 +CONFIG_FPGA_DFL_FME_REGION=m
9806 +CONFIG_FPGA_DFL_AFU=m
9807 +CONFIG_FPGA_DFL_NIOS_INTEL_PAC_N3000=m
9808 +CONFIG_FPGA_DFL_PCI=m
9809 +CONFIG_TEE=m
9812 +# TEE drivers
9814 +CONFIG_AMDTEE=m
9815 +# end of TEE drivers
9817 +CONFIG_MULTIPLEXER=m
9820 +# Multiplexer drivers
9822 +CONFIG_MUX_ADG792A=m
9823 +CONFIG_MUX_ADGS1408=m
9824 +CONFIG_MUX_GPIO=m
9825 +# end of Multiplexer drivers
9827 +CONFIG_PM_OPP=y
9828 +CONFIG_UNISYS_VISORBUS=m
9829 +CONFIG_SIOX=m
9830 +CONFIG_SIOX_BUS_GPIO=m
9831 +CONFIG_SLIMBUS=m
9832 +CONFIG_SLIM_QCOM_CTRL=m
9833 +CONFIG_INTERCONNECT=y
9834 +CONFIG_COUNTER=m
9835 +CONFIG_104_QUAD_8=m
9836 +CONFIG_MOST=m
9837 +CONFIG_MOST_USB_HDM=m
9838 +CONFIG_MOST_CDEV=m
9839 +# end of Device Drivers
9842 +# File systems
9844 +CONFIG_DCACHE_WORD_ACCESS=y
9845 +CONFIG_VALIDATE_FS_PARSER=y
9846 +CONFIG_FS_IOMAP=y
9847 +# CONFIG_EXT2_FS is not set
9848 +# CONFIG_EXT3_FS is not set
9849 +CONFIG_EXT4_FS=y
9850 +CONFIG_EXT4_USE_FOR_EXT2=y
9851 +CONFIG_EXT4_FS_POSIX_ACL=y
9852 +CONFIG_EXT4_FS_SECURITY=y
9853 +# CONFIG_EXT4_DEBUG is not set
9854 +CONFIG_JBD2=y
9855 +# CONFIG_JBD2_DEBUG is not set
9856 +CONFIG_FS_MBCACHE=y
9857 +CONFIG_REISERFS_FS=m
9858 +# CONFIG_REISERFS_CHECK is not set
9859 +# CONFIG_REISERFS_PROC_INFO is not set
9860 +CONFIG_REISERFS_FS_XATTR=y
9861 +CONFIG_REISERFS_FS_POSIX_ACL=y
9862 +CONFIG_REISERFS_FS_SECURITY=y
9863 +CONFIG_JFS_FS=m
9864 +CONFIG_JFS_POSIX_ACL=y
9865 +CONFIG_JFS_SECURITY=y
9866 +# CONFIG_JFS_DEBUG is not set
9867 +CONFIG_JFS_STATISTICS=y
9868 +CONFIG_XFS_FS=m
9869 +CONFIG_XFS_SUPPORT_V4=y
9870 +CONFIG_XFS_QUOTA=y
9871 +CONFIG_XFS_POSIX_ACL=y
9872 +CONFIG_XFS_RT=y
9873 +# CONFIG_XFS_ONLINE_SCRUB is not set
9874 +# CONFIG_XFS_WARN is not set
9875 +# CONFIG_XFS_DEBUG is not set
9876 +CONFIG_GFS2_FS=m
9877 +CONFIG_GFS2_FS_LOCKING_DLM=y
9878 +CONFIG_OCFS2_FS=m
9879 +CONFIG_OCFS2_FS_O2CB=m
9880 +CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m
9881 +CONFIG_OCFS2_FS_STATS=y
9882 +CONFIG_OCFS2_DEBUG_MASKLOG=y
9883 +# CONFIG_OCFS2_DEBUG_FS is not set
9884 +CONFIG_BTRFS_FS=m
9885 +CONFIG_BTRFS_FS_POSIX_ACL=y
9886 +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set
9887 +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set
9888 +# CONFIG_BTRFS_DEBUG is not set
9889 +# CONFIG_BTRFS_ASSERT is not set
9890 +# CONFIG_BTRFS_FS_REF_VERIFY is not set
9891 +CONFIG_NILFS2_FS=m
9892 +CONFIG_F2FS_FS=m
9893 +CONFIG_F2FS_STAT_FS=y
9894 +CONFIG_F2FS_FS_XATTR=y
9895 +CONFIG_F2FS_FS_POSIX_ACL=y
9896 +CONFIG_F2FS_FS_SECURITY=y
9897 +# CONFIG_F2FS_CHECK_FS is not set
9898 +# CONFIG_F2FS_FAULT_INJECTION is not set
9899 +CONFIG_F2FS_FS_COMPRESSION=y
9900 +CONFIG_F2FS_FS_LZO=y
9901 +CONFIG_F2FS_FS_LZ4=y
9902 +CONFIG_F2FS_FS_LZ4HC=y
9903 +CONFIG_F2FS_FS_ZSTD=y
9904 +CONFIG_F2FS_FS_LZORLE=y
9905 +CONFIG_ZONEFS_FS=m
9906 +CONFIG_FS_DAX=y
9907 +CONFIG_FS_DAX_PMD=y
9908 +CONFIG_FS_POSIX_ACL=y
9909 +CONFIG_EXPORTFS=y
9910 +CONFIG_EXPORTFS_BLOCK_OPS=y
9911 +CONFIG_FILE_LOCKING=y
9912 +CONFIG_MANDATORY_FILE_LOCKING=y
9913 +CONFIG_FS_ENCRYPTION=y
9914 +CONFIG_FS_ENCRYPTION_ALGS=y
9915 +CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y
9916 +CONFIG_FS_VERITY=y
9917 +# CONFIG_FS_VERITY_DEBUG is not set
9918 +CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y
9919 +CONFIG_FSNOTIFY=y
9920 +CONFIG_DNOTIFY=y
9921 +CONFIG_INOTIFY_USER=y
9922 +CONFIG_FANOTIFY=y
9923 +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
9924 +CONFIG_QUOTA=y
9925 +CONFIG_QUOTA_NETLINK_INTERFACE=y
9926 +# CONFIG_PRINT_QUOTA_WARNING is not set
9927 +# CONFIG_QUOTA_DEBUG is not set
9928 +CONFIG_QUOTA_TREE=m
9929 +CONFIG_QFMT_V1=m
9930 +CONFIG_QFMT_V2=m
9931 +CONFIG_QUOTACTL=y
9932 +CONFIG_AUTOFS4_FS=m
9933 +CONFIG_AUTOFS_FS=m
9934 +CONFIG_FUSE_FS=y
9935 +CONFIG_CUSE=m
9936 +CONFIG_VIRTIO_FS=m
9937 +CONFIG_FUSE_DAX=y
9938 +CONFIG_OVERLAY_FS=m
9939 +# CONFIG_OVERLAY_FS_REDIRECT_DIR is not set
9940 +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y
9941 +# CONFIG_OVERLAY_FS_INDEX is not set
9942 +CONFIG_OVERLAY_FS_XINO_AUTO=y
9943 +# CONFIG_OVERLAY_FS_METACOPY is not set
9946 +# Caches
9948 +CONFIG_FSCACHE=m
9949 +CONFIG_FSCACHE_STATS=y
9950 +# CONFIG_FSCACHE_HISTOGRAM is not set
9951 +# CONFIG_FSCACHE_DEBUG is not set
9952 +# CONFIG_FSCACHE_OBJECT_LIST is not set
9953 +CONFIG_CACHEFILES=m
9954 +# CONFIG_CACHEFILES_DEBUG is not set
9955 +# CONFIG_CACHEFILES_HISTOGRAM is not set
9956 +# end of Caches
9959 +# CD-ROM/DVD Filesystems
9961 +CONFIG_ISO9660_FS=m
9962 +CONFIG_JOLIET=y
9963 +CONFIG_ZISOFS=y
9964 +CONFIG_UDF_FS=m
9965 +# end of CD-ROM/DVD Filesystems
9968 +# DOS/FAT/EXFAT/NT Filesystems
9970 +CONFIG_FAT_FS=y
9971 +CONFIG_MSDOS_FS=m
9972 +CONFIG_VFAT_FS=y
9973 +CONFIG_FAT_DEFAULT_CODEPAGE=437
9974 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
9975 +# CONFIG_FAT_DEFAULT_UTF8 is not set
9976 +CONFIG_EXFAT_FS=m
9977 +CONFIG_EXFAT_DEFAULT_IOCHARSET="utf8"
9978 +# CONFIG_NTFS_FS is not set
9979 +CONFIG_NTFS3_FS=m
9980 +# CONFIG_NTFS3_64BIT_CLUSTER is not set
9981 +CONFIG_NTFS3_LZX_XPRESS=y
9982 +# CONFIG_NTFS3_FS_POSIX_ACL is not set
9983 +# end of DOS/FAT/EXFAT/NT Filesystems
9986 +# Pseudo filesystems
9988 +CONFIG_PROC_FS=y
9989 +CONFIG_PROC_KCORE=y
9990 +CONFIG_PROC_VMCORE=y
9991 +CONFIG_PROC_VMCORE_DEVICE_DUMP=y
9992 +CONFIG_PROC_SYSCTL=y
9993 +CONFIG_PROC_PAGE_MONITOR=y
9994 +CONFIG_PROC_CHILDREN=y
9995 +CONFIG_PROC_PID_ARCH_STATUS=y
9996 +CONFIG_PROC_CPU_RESCTRL=y
9997 +CONFIG_KERNFS=y
9998 +CONFIG_SYSFS=y
9999 +CONFIG_TMPFS=y
10000 +CONFIG_TMPFS_POSIX_ACL=y
10001 +CONFIG_TMPFS_XATTR=y
10002 +CONFIG_TMPFS_INODE64=y
10003 +CONFIG_HUGETLBFS=y
10004 +CONFIG_HUGETLB_PAGE=y
10005 +CONFIG_MEMFD_CREATE=y
10006 +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y
10007 +CONFIG_CONFIGFS_FS=y
10008 +CONFIG_EFIVAR_FS=y
10009 +# end of Pseudo filesystems
10011 +CONFIG_MISC_FILESYSTEMS=y
10012 +CONFIG_ORANGEFS_FS=m
10013 +CONFIG_ADFS_FS=m
10014 +# CONFIG_ADFS_FS_RW is not set
10015 +CONFIG_AFFS_FS=m
10016 +CONFIG_ECRYPT_FS=y
10017 +CONFIG_ECRYPT_FS_MESSAGING=y
10018 +CONFIG_HFS_FS=m
10019 +CONFIG_HFSPLUS_FS=m
10020 +CONFIG_BEFS_FS=m
10021 +# CONFIG_BEFS_DEBUG is not set
10022 +CONFIG_BFS_FS=m
10023 +CONFIG_EFS_FS=m
10024 +CONFIG_JFFS2_FS=m
10025 +CONFIG_JFFS2_FS_DEBUG=0
10026 +CONFIG_JFFS2_FS_WRITEBUFFER=y
10027 +# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
10028 +# CONFIG_JFFS2_SUMMARY is not set
10029 +CONFIG_JFFS2_FS_XATTR=y
10030 +CONFIG_JFFS2_FS_POSIX_ACL=y
10031 +CONFIG_JFFS2_FS_SECURITY=y
10032 +CONFIG_JFFS2_COMPRESSION_OPTIONS=y
10033 +CONFIG_JFFS2_ZLIB=y
10034 +CONFIG_JFFS2_LZO=y
10035 +CONFIG_JFFS2_RTIME=y
10036 +# CONFIG_JFFS2_RUBIN is not set
10037 +# CONFIG_JFFS2_CMODE_NONE is not set
10038 +# CONFIG_JFFS2_CMODE_PRIORITY is not set
10039 +# CONFIG_JFFS2_CMODE_SIZE is not set
10040 +CONFIG_JFFS2_CMODE_FAVOURLZO=y
10041 +CONFIG_UBIFS_FS=m
10042 +# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
10043 +CONFIG_UBIFS_FS_LZO=y
10044 +CONFIG_UBIFS_FS_ZLIB=y
10045 +CONFIG_UBIFS_FS_ZSTD=y
10046 +# CONFIG_UBIFS_ATIME_SUPPORT is not set
10047 +CONFIG_UBIFS_FS_XATTR=y
10048 +CONFIG_UBIFS_FS_SECURITY=y
10049 +CONFIG_UBIFS_FS_AUTHENTICATION=y
10050 +CONFIG_CRAMFS=m
10051 +CONFIG_CRAMFS_BLOCKDEV=y
10052 +CONFIG_CRAMFS_MTD=y
10053 +CONFIG_SQUASHFS=y
10054 +# CONFIG_SQUASHFS_FILE_CACHE is not set
10055 +CONFIG_SQUASHFS_FILE_DIRECT=y
10056 +CONFIG_SQUASHFS_DECOMP_SINGLE=y
10057 +# CONFIG_SQUASHFS_DECOMP_MULTI is not set
10058 +# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set
10059 +CONFIG_SQUASHFS_XATTR=y
10060 +CONFIG_SQUASHFS_ZLIB=y
10061 +CONFIG_SQUASHFS_LZ4=y
10062 +CONFIG_SQUASHFS_LZO=y
10063 +CONFIG_SQUASHFS_XZ=y
10064 +CONFIG_SQUASHFS_ZSTD=y
10065 +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set
10066 +# CONFIG_SQUASHFS_EMBEDDED is not set
10067 +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
10068 +CONFIG_VXFS_FS=m
10069 +CONFIG_MINIX_FS=m
10070 +CONFIG_OMFS_FS=m
10071 +CONFIG_HPFS_FS=m
10072 +CONFIG_QNX4FS_FS=m
10073 +CONFIG_QNX6FS_FS=m
10074 +# CONFIG_QNX6FS_DEBUG is not set
10075 +CONFIG_ROMFS_FS=m
10076 +CONFIG_ROMFS_BACKED_BY_BLOCK=y
10077 +# CONFIG_ROMFS_BACKED_BY_MTD is not set
10078 +# CONFIG_ROMFS_BACKED_BY_BOTH is not set
10079 +CONFIG_ROMFS_ON_BLOCK=y
10080 +CONFIG_PSTORE=y
10081 +CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240
10082 +# CONFIG_PSTORE_DEFLATE_COMPRESS is not set
10083 +# CONFIG_PSTORE_LZO_COMPRESS is not set
10084 +# CONFIG_PSTORE_LZ4_COMPRESS is not set
10085 +# CONFIG_PSTORE_LZ4HC_COMPRESS is not set
10086 +# CONFIG_PSTORE_842_COMPRESS is not set
10087 +CONFIG_PSTORE_ZSTD_COMPRESS=y
10088 +CONFIG_PSTORE_COMPRESS=y
10089 +CONFIG_PSTORE_ZSTD_COMPRESS_DEFAULT=y
10090 +CONFIG_PSTORE_COMPRESS_DEFAULT="zstd"
10091 +# CONFIG_PSTORE_CONSOLE is not set
10092 +# CONFIG_PSTORE_PMSG is not set
10093 +CONFIG_PSTORE_RAM=m
10094 +CONFIG_PSTORE_ZONE=m
10095 +CONFIG_PSTORE_BLK=m
10096 +CONFIG_PSTORE_BLK_BLKDEV=""
10097 +CONFIG_PSTORE_BLK_KMSG_SIZE=64
10098 +CONFIG_PSTORE_BLK_MAX_REASON=2
10099 +CONFIG_SYSV_FS=m
10100 +CONFIG_UFS_FS=m
10101 +# CONFIG_UFS_FS_WRITE is not set
10102 +# CONFIG_UFS_DEBUG is not set
10103 +CONFIG_EROFS_FS=m
10104 +# CONFIG_EROFS_FS_DEBUG is not set
10105 +CONFIG_EROFS_FS_XATTR=y
10106 +CONFIG_EROFS_FS_POSIX_ACL=y
10107 +CONFIG_EROFS_FS_SECURITY=y
10108 +CONFIG_EROFS_FS_ZIP=y
10109 +CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT=1
10110 +CONFIG_VBOXSF_FS=m
10111 +CONFIG_NETWORK_FILESYSTEMS=y
10112 +CONFIG_NFS_FS=m
10113 +CONFIG_NFS_V2=m
10114 +CONFIG_NFS_V3=m
10115 +CONFIG_NFS_V3_ACL=y
10116 +CONFIG_NFS_V4=m
10117 +CONFIG_NFS_SWAP=y
10118 +CONFIG_NFS_V4_1=y
10119 +CONFIG_NFS_V4_2=y
10120 +CONFIG_PNFS_FILE_LAYOUT=m
10121 +CONFIG_PNFS_BLOCK=m
10122 +CONFIG_PNFS_FLEXFILE_LAYOUT=m
10123 +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org"
10124 +CONFIG_NFS_V4_1_MIGRATION=y
10125 +CONFIG_NFS_V4_SECURITY_LABEL=y
10126 +CONFIG_NFS_FSCACHE=y
10127 +# CONFIG_NFS_USE_LEGACY_DNS is not set
10128 +CONFIG_NFS_USE_KERNEL_DNS=y
10129 +CONFIG_NFS_DEBUG=y
10130 +CONFIG_NFS_DISABLE_UDP_SUPPORT=y
10131 +# CONFIG_NFS_V4_2_READ_PLUS is not set
10132 +CONFIG_NFSD=m
10133 +CONFIG_NFSD_V2_ACL=y
10134 +CONFIG_NFSD_V3=y
10135 +CONFIG_NFSD_V3_ACL=y
10136 +CONFIG_NFSD_V4=y
10137 +CONFIG_NFSD_PNFS=y
10138 +CONFIG_NFSD_BLOCKLAYOUT=y
10139 +CONFIG_NFSD_SCSILAYOUT=y
10140 +CONFIG_NFSD_FLEXFILELAYOUT=y
10141 +CONFIG_NFSD_V4_2_INTER_SSC=y
10142 +CONFIG_NFSD_V4_SECURITY_LABEL=y
10143 +CONFIG_GRACE_PERIOD=m
10144 +CONFIG_LOCKD=m
10145 +CONFIG_LOCKD_V4=y
10146 +CONFIG_NFS_ACL_SUPPORT=m
10147 +CONFIG_NFS_COMMON=y
10148 +CONFIG_NFS_V4_2_SSC_HELPER=m
10149 +CONFIG_SUNRPC=m
10150 +CONFIG_SUNRPC_GSS=m
10151 +CONFIG_SUNRPC_BACKCHANNEL=y
10152 +CONFIG_SUNRPC_SWAP=y
10153 +CONFIG_RPCSEC_GSS_KRB5=m
10154 +# CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES is not set
10155 +CONFIG_SUNRPC_DEBUG=y
10156 +CONFIG_SUNRPC_XPRT_RDMA=m
10157 +CONFIG_CEPH_FS=m
10158 +CONFIG_CEPH_FSCACHE=y
10159 +CONFIG_CEPH_FS_POSIX_ACL=y
10160 +CONFIG_CEPH_FS_SECURITY_LABEL=y
10161 +CONFIG_CIFS=m
10162 +# CONFIG_CIFS_STATS2 is not set
10163 +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y
10164 +CONFIG_CIFS_WEAK_PW_HASH=y
10165 +CONFIG_CIFS_UPCALL=y
10166 +CONFIG_CIFS_XATTR=y
10167 +CONFIG_CIFS_POSIX=y
10168 +CONFIG_CIFS_DEBUG=y
10169 +# CONFIG_CIFS_DEBUG2 is not set
10170 +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set
10171 +CONFIG_CIFS_DFS_UPCALL=y
10172 +CONFIG_CIFS_SWN_UPCALL=y
10173 +# CONFIG_CIFS_SMB_DIRECT is not set
10174 +CONFIG_CIFS_FSCACHE=y
10175 +CONFIG_CODA_FS=m
10176 +CONFIG_AFS_FS=m
10177 +# CONFIG_AFS_DEBUG is not set
10178 +CONFIG_AFS_FSCACHE=y
10179 +# CONFIG_AFS_DEBUG_CURSOR is not set
10180 +CONFIG_9P_FS=m
10181 +CONFIG_9P_FSCACHE=y
10182 +CONFIG_9P_FS_POSIX_ACL=y
10183 +CONFIG_9P_FS_SECURITY=y
10184 +CONFIG_NLS=y
10185 +CONFIG_NLS_DEFAULT="utf8"
10186 +CONFIG_NLS_CODEPAGE_437=y
10187 +CONFIG_NLS_CODEPAGE_737=m
10188 +CONFIG_NLS_CODEPAGE_775=m
10189 +CONFIG_NLS_CODEPAGE_850=m
10190 +CONFIG_NLS_CODEPAGE_852=m
10191 +CONFIG_NLS_CODEPAGE_855=m
10192 +CONFIG_NLS_CODEPAGE_857=m
10193 +CONFIG_NLS_CODEPAGE_860=m
10194 +CONFIG_NLS_CODEPAGE_861=m
10195 +CONFIG_NLS_CODEPAGE_862=m
10196 +CONFIG_NLS_CODEPAGE_863=m
10197 +CONFIG_NLS_CODEPAGE_864=m
10198 +CONFIG_NLS_CODEPAGE_865=m
10199 +CONFIG_NLS_CODEPAGE_866=m
10200 +CONFIG_NLS_CODEPAGE_869=m
10201 +CONFIG_NLS_CODEPAGE_936=m
10202 +CONFIG_NLS_CODEPAGE_950=m
10203 +CONFIG_NLS_CODEPAGE_932=m
10204 +CONFIG_NLS_CODEPAGE_949=m
10205 +CONFIG_NLS_CODEPAGE_874=m
10206 +CONFIG_NLS_ISO8859_8=m
10207 +CONFIG_NLS_CODEPAGE_1250=m
10208 +CONFIG_NLS_CODEPAGE_1251=m
10209 +CONFIG_NLS_ASCII=m
10210 +CONFIG_NLS_ISO8859_1=m
10211 +CONFIG_NLS_ISO8859_2=m
10212 +CONFIG_NLS_ISO8859_3=m
10213 +CONFIG_NLS_ISO8859_4=m
10214 +CONFIG_NLS_ISO8859_5=m
10215 +CONFIG_NLS_ISO8859_6=m
10216 +CONFIG_NLS_ISO8859_7=m
10217 +CONFIG_NLS_ISO8859_9=m
10218 +CONFIG_NLS_ISO8859_13=m
10219 +CONFIG_NLS_ISO8859_14=m
10220 +CONFIG_NLS_ISO8859_15=m
10221 +CONFIG_NLS_KOI8_R=m
10222 +CONFIG_NLS_KOI8_U=m
10223 +CONFIG_NLS_MAC_ROMAN=m
10224 +CONFIG_NLS_MAC_CELTIC=m
10225 +CONFIG_NLS_MAC_CENTEURO=m
10226 +CONFIG_NLS_MAC_CROATIAN=m
10227 +CONFIG_NLS_MAC_CYRILLIC=m
10228 +CONFIG_NLS_MAC_GAELIC=m
10229 +CONFIG_NLS_MAC_GREEK=m
10230 +CONFIG_NLS_MAC_ICELAND=m
10231 +CONFIG_NLS_MAC_INUIT=m
10232 +CONFIG_NLS_MAC_ROMANIAN=m
10233 +CONFIG_NLS_MAC_TURKISH=m
10234 +CONFIG_NLS_UTF8=m
10235 +CONFIG_DLM=m
10236 +# CONFIG_DLM_DEBUG is not set
10237 +CONFIG_UNICODE=y
10238 +# CONFIG_UNICODE_NORMALIZATION_SELFTEST is not set
10239 +CONFIG_IO_WQ=y
10240 +# end of File systems
10243 +# Security options
10245 +CONFIG_KEYS=y
10246 +CONFIG_KEYS_REQUEST_CACHE=y
10247 +CONFIG_PERSISTENT_KEYRINGS=y
10248 +CONFIG_TRUSTED_KEYS=y
10249 +CONFIG_ENCRYPTED_KEYS=y
10250 +CONFIG_KEY_DH_OPERATIONS=y
10251 +CONFIG_KEY_NOTIFICATIONS=y
10252 +CONFIG_SECURITY_DMESG_RESTRICT=y
10253 +CONFIG_SECURITY=y
10254 +CONFIG_SECURITYFS=y
10255 +CONFIG_SECURITY_NETWORK=y
10256 +CONFIG_PAGE_TABLE_ISOLATION=y
10257 +CONFIG_SECURITY_INFINIBAND=y
10258 +CONFIG_SECURITY_NETWORK_XFRM=y
10259 +CONFIG_SECURITY_PATH=y
10260 +CONFIG_INTEL_TXT=y
10261 +CONFIG_LSM_MMAP_MIN_ADDR=0
10262 +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y
10263 +CONFIG_HARDENED_USERCOPY=y
10264 +CONFIG_HARDENED_USERCOPY_FALLBACK=y
10265 +# CONFIG_HARDENED_USERCOPY_PAGESPAN is not set
10266 +CONFIG_FORTIFY_SOURCE=y
10267 +# CONFIG_STATIC_USERMODEHELPER is not set
10268 +CONFIG_SECURITY_SELINUX=y
10269 +CONFIG_SECURITY_SELINUX_BOOTPARAM=y
10270 +# CONFIG_SECURITY_SELINUX_DISABLE is not set
10271 +CONFIG_SECURITY_SELINUX_DEVELOP=y
10272 +CONFIG_SECURITY_SELINUX_AVC_STATS=y
10273 +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
10274 +CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9
10275 +CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256
10276 +CONFIG_SECURITY_SMACK=y
10277 +# CONFIG_SECURITY_SMACK_BRINGUP is not set
10278 +CONFIG_SECURITY_SMACK_NETFILTER=y
10279 +CONFIG_SECURITY_SMACK_APPEND_SIGNALS=y
10280 +CONFIG_SECURITY_TOMOYO=y
10281 +CONFIG_SECURITY_TOMOYO_MAX_ACCEPT_ENTRY=2048
10282 +CONFIG_SECURITY_TOMOYO_MAX_AUDIT_LOG=1024
10283 +# CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER is not set
10284 +CONFIG_SECURITY_TOMOYO_POLICY_LOADER="/sbin/tomoyo-init"
10285 +CONFIG_SECURITY_TOMOYO_ACTIVATION_TRIGGER="/sbin/init"
10286 +# CONFIG_SECURITY_TOMOYO_INSECURE_BUILTIN_SETTING is not set
10287 +CONFIG_SECURITY_APPARMOR=y
10288 +CONFIG_SECURITY_APPARMOR_HASH=y
10289 +CONFIG_SECURITY_APPARMOR_HASH_DEFAULT=y
10290 +# CONFIG_SECURITY_APPARMOR_DEBUG is not set
10291 +# CONFIG_SECURITY_LOADPIN is not set
10292 +CONFIG_SECURITY_YAMA=y
10293 +CONFIG_SECURITY_SAFESETID=y
10294 +CONFIG_SECURITY_LOCKDOWN_LSM=y
10295 +CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
10296 +CONFIG_LOCK_DOWN_KERNEL_FORCE_NONE=y
10297 +# CONFIG_LOCK_DOWN_KERNEL_FORCE_INTEGRITY is not set
10298 +# CONFIG_LOCK_DOWN_KERNEL_FORCE_CONFIDENTIALITY is not set
10299 +CONFIG_INTEGRITY=y
10300 +CONFIG_INTEGRITY_SIGNATURE=y
10301 +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
10302 +CONFIG_INTEGRITY_TRUSTED_KEYRING=y
10303 +CONFIG_INTEGRITY_PLATFORM_KEYRING=y
10304 +CONFIG_LOAD_UEFI_KEYS=y
10305 +CONFIG_INTEGRITY_AUDIT=y
10306 +CONFIG_IMA=y
10307 +CONFIG_IMA_MEASURE_PCR_IDX=10
10308 +CONFIG_IMA_LSM_RULES=y
10309 +# CONFIG_IMA_TEMPLATE is not set
10310 +CONFIG_IMA_NG_TEMPLATE=y
10311 +# CONFIG_IMA_SIG_TEMPLATE is not set
10312 +CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng"
10313 +CONFIG_IMA_DEFAULT_HASH_SHA1=y
10314 +# CONFIG_IMA_DEFAULT_HASH_SHA256 is not set
10315 +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set
10316 +CONFIG_IMA_DEFAULT_HASH="sha1"
10317 +# CONFIG_IMA_WRITE_POLICY is not set
10318 +# CONFIG_IMA_READ_POLICY is not set
10319 +CONFIG_IMA_APPRAISE=y
10320 +# CONFIG_IMA_ARCH_POLICY is not set
10321 +# CONFIG_IMA_APPRAISE_BUILD_POLICY is not set
10322 +CONFIG_IMA_APPRAISE_BOOTPARAM=y
10323 +CONFIG_IMA_APPRAISE_MODSIG=y
10324 +CONFIG_IMA_TRUSTED_KEYRING=y
10325 +# CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY is not set
10326 +# CONFIG_IMA_BLACKLIST_KEYRING is not set
10327 +# CONFIG_IMA_LOAD_X509 is not set
10328 +CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y
10329 +CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y
10330 +# CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT is not set
10331 +CONFIG_EVM=y
10332 +CONFIG_EVM_ATTR_FSUUID=y
10333 +CONFIG_EVM_EXTRA_SMACK_XATTRS=y
10334 +CONFIG_EVM_ADD_XATTRS=y
10335 +# CONFIG_EVM_LOAD_X509 is not set
10336 +# CONFIG_DEFAULT_SECURITY_SELINUX is not set
10337 +# CONFIG_DEFAULT_SECURITY_SMACK is not set
10338 +# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
10339 +CONFIG_DEFAULT_SECURITY_APPARMOR=y
10340 +# CONFIG_DEFAULT_SECURITY_DAC is not set
10341 +CONFIG_LSM="lockdown,yama,integrity,apparmor"
10344 +# Kernel hardening options
10348 +# Memory initialization
10350 +CONFIG_INIT_STACK_NONE=y
10351 +CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
10352 +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set
10353 +# end of Memory initialization
10354 +# end of Kernel hardening options
10355 +# end of Security options
10357 +CONFIG_XOR_BLOCKS=m
10358 +CONFIG_ASYNC_CORE=m
10359 +CONFIG_ASYNC_MEMCPY=m
10360 +CONFIG_ASYNC_XOR=m
10361 +CONFIG_ASYNC_PQ=m
10362 +CONFIG_ASYNC_RAID6_RECOV=m
10363 +CONFIG_CRYPTO=y
10366 +# Crypto core or helper
10368 +CONFIG_CRYPTO_ALGAPI=y
10369 +CONFIG_CRYPTO_ALGAPI2=y
10370 +CONFIG_CRYPTO_AEAD=y
10371 +CONFIG_CRYPTO_AEAD2=y
10372 +CONFIG_CRYPTO_SKCIPHER=y
10373 +CONFIG_CRYPTO_SKCIPHER2=y
10374 +CONFIG_CRYPTO_HASH=y
10375 +CONFIG_CRYPTO_HASH2=y
10376 +CONFIG_CRYPTO_RNG=y
10377 +CONFIG_CRYPTO_RNG2=y
10378 +CONFIG_CRYPTO_RNG_DEFAULT=y
10379 +CONFIG_CRYPTO_AKCIPHER2=y
10380 +CONFIG_CRYPTO_AKCIPHER=y
10381 +CONFIG_CRYPTO_KPP2=y
10382 +CONFIG_CRYPTO_KPP=y
10383 +CONFIG_CRYPTO_ACOMP2=y
10384 +CONFIG_CRYPTO_MANAGER=y
10385 +CONFIG_CRYPTO_MANAGER2=y
10386 +CONFIG_CRYPTO_USER=m
10387 +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
10388 +CONFIG_CRYPTO_GF128MUL=y
10389 +CONFIG_CRYPTO_NULL=y
10390 +CONFIG_CRYPTO_NULL2=y
10391 +CONFIG_CRYPTO_PCRYPT=m
10392 +CONFIG_CRYPTO_CRYPTD=m
10393 +CONFIG_CRYPTO_AUTHENC=m
10394 +CONFIG_CRYPTO_TEST=m
10395 +CONFIG_CRYPTO_SIMD=m
10396 +CONFIG_CRYPTO_ENGINE=m
10399 +# Public-key cryptography
10401 +CONFIG_CRYPTO_RSA=y
10402 +CONFIG_CRYPTO_DH=y
10403 +CONFIG_CRYPTO_ECC=m
10404 +CONFIG_CRYPTO_ECDH=m
10405 +CONFIG_CRYPTO_ECRDSA=m
10406 +CONFIG_CRYPTO_SM2=m
10407 +CONFIG_CRYPTO_CURVE25519=m
10408 +CONFIG_CRYPTO_CURVE25519_X86=m
10411 +# Authenticated Encryption with Associated Data
10413 +CONFIG_CRYPTO_CCM=m
10414 +CONFIG_CRYPTO_GCM=y
10415 +CONFIG_CRYPTO_CHACHA20POLY1305=m
10416 +CONFIG_CRYPTO_AEGIS128=m
10417 +CONFIG_CRYPTO_AEGIS128_AESNI_SSE2=m
10418 +CONFIG_CRYPTO_SEQIV=y
10419 +CONFIG_CRYPTO_ECHAINIV=m
10422 +# Block modes
10424 +CONFIG_CRYPTO_CBC=y
10425 +CONFIG_CRYPTO_CFB=m
10426 +CONFIG_CRYPTO_CTR=y
10427 +CONFIG_CRYPTO_CTS=y
10428 +CONFIG_CRYPTO_ECB=y
10429 +CONFIG_CRYPTO_LRW=m
10430 +CONFIG_CRYPTO_OFB=m
10431 +CONFIG_CRYPTO_PCBC=m
10432 +CONFIG_CRYPTO_XTS=y
10433 +CONFIG_CRYPTO_KEYWRAP=m
10434 +CONFIG_CRYPTO_NHPOLY1305=m
10435 +CONFIG_CRYPTO_NHPOLY1305_SSE2=m
10436 +CONFIG_CRYPTO_NHPOLY1305_AVX2=m
10437 +CONFIG_CRYPTO_ADIANTUM=m
10438 +CONFIG_CRYPTO_ESSIV=m
10441 +# Hash modes
10443 +CONFIG_CRYPTO_CMAC=m
10444 +CONFIG_CRYPTO_HMAC=y
10445 +CONFIG_CRYPTO_XCBC=m
10446 +CONFIG_CRYPTO_VMAC=m
10449 +# Digest
10451 +CONFIG_CRYPTO_CRC32C=y
10452 +CONFIG_CRYPTO_CRC32C_INTEL=y
10453 +CONFIG_CRYPTO_CRC32=m
10454 +CONFIG_CRYPTO_CRC32_PCLMUL=m
10455 +CONFIG_CRYPTO_XXHASH=m
10456 +CONFIG_CRYPTO_BLAKE2B=m
10457 +CONFIG_CRYPTO_BLAKE2S=m
10458 +CONFIG_CRYPTO_BLAKE2S_X86=m
10459 +CONFIG_CRYPTO_CRCT10DIF=y
10460 +CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m
10461 +CONFIG_CRYPTO_GHASH=y
10462 +CONFIG_CRYPTO_POLY1305=m
10463 +CONFIG_CRYPTO_POLY1305_X86_64=m
10464 +CONFIG_CRYPTO_MD4=m
10465 +CONFIG_CRYPTO_MD5=y
10466 +CONFIG_CRYPTO_MICHAEL_MIC=m
10467 +CONFIG_CRYPTO_RMD160=m
10468 +CONFIG_CRYPTO_SHA1=y
10469 +CONFIG_CRYPTO_SHA1_SSSE3=m
10470 +CONFIG_CRYPTO_SHA256_SSSE3=m
10471 +CONFIG_CRYPTO_SHA512_SSSE3=m
10472 +CONFIG_CRYPTO_SHA256=y
10473 +CONFIG_CRYPTO_SHA512=y
10474 +CONFIG_CRYPTO_SHA3=m
10475 +CONFIG_CRYPTO_SM3=m
10476 +CONFIG_CRYPTO_STREEBOG=m
10477 +CONFIG_CRYPTO_WP512=m
10478 +CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m
10481 +# Ciphers
10483 +CONFIG_CRYPTO_AES=y
10484 +CONFIG_CRYPTO_AES_TI=m
10485 +CONFIG_CRYPTO_AES_NI_INTEL=m
10486 +CONFIG_CRYPTO_BLOWFISH=m
10487 +CONFIG_CRYPTO_BLOWFISH_COMMON=m
10488 +CONFIG_CRYPTO_BLOWFISH_X86_64=m
10489 +CONFIG_CRYPTO_CAMELLIA=m
10490 +CONFIG_CRYPTO_CAMELLIA_X86_64=m
10491 +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m
10492 +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m
10493 +CONFIG_CRYPTO_CAST_COMMON=m
10494 +CONFIG_CRYPTO_CAST5=m
10495 +CONFIG_CRYPTO_CAST5_AVX_X86_64=m
10496 +CONFIG_CRYPTO_CAST6=m
10497 +CONFIG_CRYPTO_CAST6_AVX_X86_64=m
10498 +CONFIG_CRYPTO_DES=m
10499 +CONFIG_CRYPTO_DES3_EDE_X86_64=m
10500 +CONFIG_CRYPTO_FCRYPT=m
10501 +CONFIG_CRYPTO_CHACHA20=m
10502 +CONFIG_CRYPTO_CHACHA20_X86_64=m
10503 +CONFIG_CRYPTO_SERPENT=m
10504 +CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m
10505 +CONFIG_CRYPTO_SERPENT_AVX_X86_64=m
10506 +CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m
10507 +CONFIG_CRYPTO_SM4=m
10508 +CONFIG_CRYPTO_TWOFISH=m
10509 +CONFIG_CRYPTO_TWOFISH_COMMON=m
10510 +CONFIG_CRYPTO_TWOFISH_X86_64=m
10511 +CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m
10512 +CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m
10515 +# Compression
10517 +CONFIG_CRYPTO_DEFLATE=y
10518 +CONFIG_CRYPTO_LZO=y
10519 +CONFIG_CRYPTO_842=m
10520 +CONFIG_CRYPTO_LZ4=y
10521 +CONFIG_CRYPTO_LZ4HC=m
10522 +CONFIG_CRYPTO_ZSTD=y
10525 +# Random Number Generation
10527 +CONFIG_CRYPTO_ANSI_CPRNG=m
10528 +CONFIG_CRYPTO_DRBG_MENU=y
10529 +CONFIG_CRYPTO_DRBG_HMAC=y
10530 +CONFIG_CRYPTO_DRBG_HASH=y
10531 +CONFIG_CRYPTO_DRBG_CTR=y
10532 +CONFIG_CRYPTO_DRBG=y
10533 +CONFIG_CRYPTO_JITTERENTROPY=y
10534 +CONFIG_CRYPTO_USER_API=m
10535 +CONFIG_CRYPTO_USER_API_HASH=m
10536 +CONFIG_CRYPTO_USER_API_SKCIPHER=m
10537 +CONFIG_CRYPTO_USER_API_RNG=m
10538 +# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set
10539 +CONFIG_CRYPTO_USER_API_AEAD=m
10540 +# CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE is not set
10541 +CONFIG_CRYPTO_STATS=y
10542 +CONFIG_CRYPTO_HASH_INFO=y
10545 +# Crypto library routines
10547 +CONFIG_CRYPTO_LIB_AES=y
10548 +CONFIG_CRYPTO_LIB_ARC4=m
10549 +CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S=m
10550 +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=m
10551 +CONFIG_CRYPTO_LIB_BLAKE2S=m
10552 +CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA=m
10553 +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m
10554 +CONFIG_CRYPTO_LIB_CHACHA=m
10555 +CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519=m
10556 +CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m
10557 +CONFIG_CRYPTO_LIB_CURVE25519=m
10558 +CONFIG_CRYPTO_LIB_DES=m
10559 +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=11
10560 +CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305=m
10561 +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m
10562 +CONFIG_CRYPTO_LIB_POLY1305=m
10563 +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
10564 +CONFIG_CRYPTO_LIB_SHA256=y
10565 +CONFIG_CRYPTO_HW=y
10566 +CONFIG_CRYPTO_DEV_PADLOCK=y
10567 +CONFIG_CRYPTO_DEV_PADLOCK_AES=m
10568 +CONFIG_CRYPTO_DEV_PADLOCK_SHA=m
10569 +CONFIG_CRYPTO_DEV_ATMEL_I2C=m
10570 +CONFIG_CRYPTO_DEV_ATMEL_ECC=m
10571 +CONFIG_CRYPTO_DEV_ATMEL_SHA204A=m
10572 +CONFIG_CRYPTO_DEV_CCP=y
10573 +CONFIG_CRYPTO_DEV_CCP_DD=m
10574 +CONFIG_CRYPTO_DEV_SP_CCP=y
10575 +CONFIG_CRYPTO_DEV_CCP_CRYPTO=m
10576 +CONFIG_CRYPTO_DEV_SP_PSP=y
10577 +# CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set
10578 +CONFIG_CRYPTO_DEV_QAT=m
10579 +CONFIG_CRYPTO_DEV_QAT_DH895xCC=m
10580 +CONFIG_CRYPTO_DEV_QAT_C3XXX=m
10581 +CONFIG_CRYPTO_DEV_QAT_C62X=m
10582 +CONFIG_CRYPTO_DEV_QAT_4XXX=m
10583 +CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m
10584 +CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m
10585 +CONFIG_CRYPTO_DEV_QAT_C62XVF=m
10586 +CONFIG_CRYPTO_DEV_NITROX=m
10587 +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m
10588 +CONFIG_CRYPTO_DEV_CHELSIO=m
10589 +CONFIG_CRYPTO_DEV_VIRTIO=m
10590 +CONFIG_CRYPTO_DEV_SAFEXCEL=m
10591 +CONFIG_CRYPTO_DEV_AMLOGIC_GXL=m
10592 +# CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG is not set
10593 +CONFIG_ASYMMETRIC_KEY_TYPE=y
10594 +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
10595 +CONFIG_ASYMMETRIC_TPM_KEY_SUBTYPE=m
10596 +CONFIG_X509_CERTIFICATE_PARSER=y
10597 +CONFIG_PKCS8_PRIVATE_KEY_PARSER=m
10598 +CONFIG_TPM_KEY_PARSER=m
10599 +CONFIG_PKCS7_MESSAGE_PARSER=y
10600 +CONFIG_PKCS7_TEST_KEY=m
10601 +CONFIG_SIGNED_PE_FILE_VERIFICATION=y
10604 +# Certificates for signature checking
10606 +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem"
10607 +CONFIG_SYSTEM_TRUSTED_KEYRING=y
10608 +CONFIG_SYSTEM_TRUSTED_KEYS=""
10609 +CONFIG_SYSTEM_EXTRA_CERTIFICATE=y
10610 +CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE=4096
10611 +CONFIG_SECONDARY_TRUSTED_KEYRING=y
10612 +CONFIG_SYSTEM_BLACKLIST_KEYRING=y
10613 +CONFIG_SYSTEM_BLACKLIST_HASH_LIST=""
10614 +# end of Certificates for signature checking
10617 +# Library routines
10619 +CONFIG_RAID6_PQ=m
10620 +CONFIG_RAID6_PQ_BENCHMARK=y
10621 +CONFIG_LINEAR_RANGES=y
10622 +CONFIG_PACKING=y
10623 +CONFIG_BITREVERSE=y
10624 +CONFIG_GENERIC_STRNCPY_FROM_USER=y
10625 +CONFIG_GENERIC_STRNLEN_USER=y
10626 +CONFIG_GENERIC_NET_UTILS=y
10627 +CONFIG_GENERIC_FIND_FIRST_BIT=y
10628 +CONFIG_CORDIC=m
10629 +# CONFIG_PRIME_NUMBERS is not set
10630 +CONFIG_RATIONAL=y
10631 +CONFIG_GENERIC_PCI_IOMAP=y
10632 +CONFIG_GENERIC_IOMAP=y
10633 +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y
10634 +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y
10635 +CONFIG_ARCH_USE_SYM_ANNOTATIONS=y
10636 +CONFIG_CRC_CCITT=y
10637 +CONFIG_CRC16=y
10638 +CONFIG_CRC_T10DIF=y
10639 +CONFIG_CRC_ITU_T=m
10640 +CONFIG_CRC32=y
10641 +# CONFIG_CRC32_SELFTEST is not set
10642 +CONFIG_CRC32_SLICEBY8=y
10643 +# CONFIG_CRC32_SLICEBY4 is not set
10644 +# CONFIG_CRC32_SARWATE is not set
10645 +# CONFIG_CRC32_BIT is not set
10646 +CONFIG_CRC64=m
10647 +CONFIG_CRC4=m
10648 +CONFIG_CRC7=m
10649 +CONFIG_LIBCRC32C=m
10650 +CONFIG_CRC8=m
10651 +CONFIG_XXHASH=y
10652 +# CONFIG_RANDOM32_SELFTEST is not set
10653 +CONFIG_842_COMPRESS=m
10654 +CONFIG_842_DECOMPRESS=m
10655 +CONFIG_ZLIB_INFLATE=y
10656 +CONFIG_ZLIB_DEFLATE=y
10657 +CONFIG_LZO_COMPRESS=y
10658 +CONFIG_LZO_DECOMPRESS=y
10659 +CONFIG_LZ4_COMPRESS=y
10660 +CONFIG_LZ4HC_COMPRESS=y
10661 +CONFIG_LZ4_DECOMPRESS=y
10662 +CONFIG_ZSTD_COMPRESS=y
10663 +CONFIG_ZSTD_DECOMPRESS=y
10664 +CONFIG_XZ_DEC=y
10665 +CONFIG_XZ_DEC_X86=y
10666 +CONFIG_XZ_DEC_POWERPC=y
10667 +CONFIG_XZ_DEC_IA64=y
10668 +CONFIG_XZ_DEC_ARM=y
10669 +CONFIG_XZ_DEC_ARMTHUMB=y
10670 +CONFIG_XZ_DEC_SPARC=y
10671 +CONFIG_XZ_DEC_BCJ=y
10672 +CONFIG_XZ_DEC_TEST=m
10673 +CONFIG_DECOMPRESS_GZIP=y
10674 +CONFIG_DECOMPRESS_BZIP2=y
10675 +CONFIG_DECOMPRESS_LZMA=y
10676 +CONFIG_DECOMPRESS_XZ=y
10677 +CONFIG_DECOMPRESS_LZO=y
10678 +CONFIG_DECOMPRESS_LZ4=y
10679 +CONFIG_DECOMPRESS_ZSTD=y
10680 +CONFIG_GENERIC_ALLOCATOR=y
10681 +CONFIG_REED_SOLOMON=m
10682 +CONFIG_REED_SOLOMON_ENC8=y
10683 +CONFIG_REED_SOLOMON_DEC8=y
10684 +CONFIG_REED_SOLOMON_DEC16=y
10685 +CONFIG_BCH=m
10686 +CONFIG_TEXTSEARCH=y
10687 +CONFIG_TEXTSEARCH_KMP=m
10688 +CONFIG_TEXTSEARCH_BM=m
10689 +CONFIG_TEXTSEARCH_FSM=m
10690 +CONFIG_BTREE=y
10691 +CONFIG_INTERVAL_TREE=y
10692 +CONFIG_XARRAY_MULTI=y
10693 +CONFIG_ASSOCIATIVE_ARRAY=y
10694 +CONFIG_HAS_IOMEM=y
10695 +CONFIG_HAS_IOPORT_MAP=y
10696 +CONFIG_HAS_DMA=y
10697 +CONFIG_DMA_OPS=y
10698 +CONFIG_NEED_SG_DMA_LENGTH=y
10699 +CONFIG_NEED_DMA_MAP_STATE=y
10700 +CONFIG_ARCH_DMA_ADDR_T_64BIT=y
10701 +CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED=y
10702 +CONFIG_SWIOTLB=y
10703 +CONFIG_DMA_COHERENT_POOL=y
10704 +# CONFIG_DMA_API_DEBUG is not set
10705 +# CONFIG_DMA_MAP_BENCHMARK is not set
10706 +CONFIG_SGL_ALLOC=y
10707 +CONFIG_IOMMU_HELPER=y
10708 +CONFIG_CHECK_SIGNATURE=y
10709 +CONFIG_CPU_RMAP=y
10710 +CONFIG_DQL=y
10711 +CONFIG_GLOB=y
10712 +# CONFIG_GLOB_SELFTEST is not set
10713 +CONFIG_NLATTR=y
10714 +CONFIG_LRU_CACHE=m
10715 +CONFIG_CLZ_TAB=y
10716 +CONFIG_IRQ_POLL=y
10717 +CONFIG_MPILIB=y
10718 +CONFIG_SIGNATURE=y
10719 +CONFIG_DIMLIB=y
10720 +CONFIG_OID_REGISTRY=y
10721 +CONFIG_UCS2_STRING=y
10722 +CONFIG_HAVE_GENERIC_VDSO=y
10723 +CONFIG_GENERIC_GETTIMEOFDAY=y
10724 +CONFIG_GENERIC_VDSO_TIME_NS=y
10725 +CONFIG_FONT_SUPPORT=y
10726 +CONFIG_FONTS=y
10727 +CONFIG_FONT_8x8=y
10728 +CONFIG_FONT_8x16=y
10729 +# CONFIG_FONT_6x11 is not set
10730 +# CONFIG_FONT_7x14 is not set
10731 +# CONFIG_FONT_PEARL_8x8 is not set
10732 +CONFIG_FONT_ACORN_8x8=y
10733 +# CONFIG_FONT_MINI_4x6 is not set
10734 +CONFIG_FONT_6x10=y
10735 +# CONFIG_FONT_10x18 is not set
10736 +# CONFIG_FONT_SUN8x16 is not set
10737 +# CONFIG_FONT_SUN12x22 is not set
10738 +CONFIG_FONT_TER16x32=y
10739 +# CONFIG_FONT_6x8 is not set
10740 +CONFIG_SG_POOL=y
10741 +CONFIG_ARCH_HAS_PMEM_API=y
10742 +CONFIG_MEMREGION=y
10743 +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y
10744 +CONFIG_ARCH_HAS_COPY_MC=y
10745 +CONFIG_ARCH_STACKWALK=y
10746 +CONFIG_SBITMAP=y
10747 +CONFIG_PARMAN=m
10748 +CONFIG_OBJAGG=m
10749 +# CONFIG_STRING_SELFTEST is not set
10750 +# end of Library routines
10752 +CONFIG_PLDMFW=y
10755 +# Kernel hacking
10759 +# printk and dmesg options
10761 +CONFIG_PRINTK_TIME=y
10762 +# CONFIG_PRINTK_CALLER is not set
10763 +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7
10764 +CONFIG_CONSOLE_LOGLEVEL_QUIET=3
10765 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4
10766 +CONFIG_BOOT_PRINTK_DELAY=y
10767 +CONFIG_DYNAMIC_DEBUG=y
10768 +CONFIG_DYNAMIC_DEBUG_CORE=y
10769 +# CONFIG_SYMBOLIC_ERRNAME is not set
10770 +# CONFIG_DEBUG_BUGVERBOSE is not set
10771 +# end of printk and dmesg options
10774 +# Compile-time checks and compiler options
10776 +# CONFIG_DEBUG_INFO is not set
10777 +CONFIG_FRAME_WARN=1024
10778 +# CONFIG_STRIP_ASM_SYMS is not set
10779 +# CONFIG_READABLE_ASM is not set
10780 +# CONFIG_HEADERS_INSTALL is not set
10781 +# CONFIG_DEBUG_SECTION_MISMATCH is not set
10782 +CONFIG_SECTION_MISMATCH_WARN_ONLY=y
10783 +# CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_32B is not set
10784 +CONFIG_STACK_VALIDATION=y
10785 +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
10786 +# end of Compile-time checks and compiler options
10789 +# Generic Kernel Debugging Instruments
10791 +CONFIG_MAGIC_SYSRQ=y
10792 +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x01b6
10793 +CONFIG_MAGIC_SYSRQ_SERIAL=y
10794 +CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE=""
10795 +CONFIG_DEBUG_FS=y
10796 +CONFIG_DEBUG_FS_ALLOW_ALL=y
10797 +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set
10798 +# CONFIG_DEBUG_FS_ALLOW_NONE is not set
10799 +CONFIG_HAVE_ARCH_KGDB=y
10800 +CONFIG_KGDB=y
10801 +CONFIG_KGDB_HONOUR_BLOCKLIST=y
10802 +CONFIG_KGDB_SERIAL_CONSOLE=y
10803 +# CONFIG_KGDB_TESTS is not set
10804 +CONFIG_KGDB_LOW_LEVEL_TRAP=y
10805 +CONFIG_KGDB_KDB=y
10806 +CONFIG_KDB_DEFAULT_ENABLE=0x1
10807 +CONFIG_KDB_KEYBOARD=y
10808 +CONFIG_KDB_CONTINUE_CATASTROPHIC=0
10809 +CONFIG_ARCH_HAS_EARLY_DEBUG=y
10810 +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y
10811 +# CONFIG_UBSAN is not set
10812 +CONFIG_HAVE_ARCH_KCSAN=y
10813 +CONFIG_HAVE_KCSAN_COMPILER=y
10814 +# CONFIG_KCSAN is not set
10815 +# end of Generic Kernel Debugging Instruments
10817 +CONFIG_DEBUG_KERNEL=y
10818 +CONFIG_DEBUG_MISC=y
10821 +# Memory Debugging
10823 +# CONFIG_PAGE_EXTENSION is not set
10824 +# CONFIG_DEBUG_PAGEALLOC is not set
10825 +# CONFIG_PAGE_OWNER is not set
10826 +CONFIG_PAGE_POISONING=y
10827 +# CONFIG_DEBUG_RODATA_TEST is not set
10828 +CONFIG_ARCH_HAS_DEBUG_WX=y
10829 +CONFIG_DEBUG_WX=y
10830 +CONFIG_GENERIC_PTDUMP=y
10831 +CONFIG_PTDUMP_CORE=y
10832 +# CONFIG_PTDUMP_DEBUGFS is not set
10833 +# CONFIG_DEBUG_OBJECTS is not set
10834 +# CONFIG_SLUB_DEBUG_ON is not set
10835 +# CONFIG_SLUB_STATS is not set
10836 +CONFIG_HAVE_DEBUG_KMEMLEAK=y
10837 +# CONFIG_DEBUG_KMEMLEAK is not set
10838 +# CONFIG_DEBUG_STACK_USAGE is not set
10839 +CONFIG_SCHED_STACK_END_CHECK=y
10840 +CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y
10841 +# CONFIG_DEBUG_VM is not set
10842 +# CONFIG_DEBUG_VM_PGTABLE is not set
10843 +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y
10844 +# CONFIG_DEBUG_VIRTUAL is not set
10845 +# CONFIG_DEBUG_MEMORY_INIT is not set
10846 +CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
10847 +# CONFIG_DEBUG_PER_CPU_MAPS is not set
10848 +CONFIG_ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP=y
10849 +# CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP is not set
10850 +CONFIG_HAVE_ARCH_KASAN=y
10851 +CONFIG_HAVE_ARCH_KASAN_VMALLOC=y
10852 +CONFIG_CC_HAS_KASAN_GENERIC=y
10853 +CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y
10854 +# CONFIG_KASAN is not set
10855 +CONFIG_HAVE_ARCH_KFENCE=y
10856 +CONFIG_KFENCE=y
10857 +CONFIG_KFENCE_STATIC_KEYS=y
10858 +CONFIG_KFENCE_SAMPLE_INTERVAL=0
10859 +CONFIG_KFENCE_NUM_OBJECTS=255
10860 +CONFIG_KFENCE_STRESS_TEST_FAULTS=0
10861 +# end of Memory Debugging
10863 +# CONFIG_DEBUG_SHIRQ is not set
10866 +# Debug Oops, Lockups and Hangs
10868 +# CONFIG_PANIC_ON_OOPS is not set
10869 +CONFIG_PANIC_ON_OOPS_VALUE=0
10870 +CONFIG_PANIC_TIMEOUT=0
10871 +CONFIG_LOCKUP_DETECTOR=y
10872 +CONFIG_SOFTLOCKUP_DETECTOR=y
10873 +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
10874 +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
10875 +CONFIG_HARDLOCKUP_DETECTOR_PERF=y
10876 +CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y
10877 +CONFIG_HARDLOCKUP_DETECTOR=y
10878 +# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set
10879 +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0
10880 +CONFIG_DETECT_HUNG_TASK=y
10881 +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120
10882 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
10883 +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
10884 +# CONFIG_WQ_WATCHDOG is not set
10885 +# CONFIG_TEST_LOCKUP is not set
10886 +# end of Debug Oops, Lockups and Hangs
10889 +# Scheduler Debugging
10891 +CONFIG_SCHED_DEBUG=y
10892 +CONFIG_SCHED_INFO=y
10893 +CONFIG_SCHEDSTATS=y
10894 +# end of Scheduler Debugging
10896 +# CONFIG_DEBUG_TIMEKEEPING is not set
10897 +# CONFIG_DEBUG_PREEMPT is not set
10900 +# Lock Debugging (spinlocks, mutexes, etc...)
10902 +CONFIG_LOCK_DEBUGGING_SUPPORT=y
10903 +# CONFIG_PROVE_LOCKING is not set
10904 +# CONFIG_LOCK_STAT is not set
10905 +# CONFIG_DEBUG_RT_MUTEXES is not set
10906 +# CONFIG_DEBUG_SPINLOCK is not set
10907 +# CONFIG_DEBUG_MUTEXES is not set
10908 +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set
10909 +# CONFIG_DEBUG_RWSEMS is not set
10910 +# CONFIG_DEBUG_LOCK_ALLOC is not set
10911 +# CONFIG_DEBUG_ATOMIC_SLEEP is not set
10912 +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
10913 +# CONFIG_LOCK_TORTURE_TEST is not set
10914 +# CONFIG_WW_MUTEX_SELFTEST is not set
10915 +# CONFIG_SCF_TORTURE_TEST is not set
10916 +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set
10917 +# end of Lock Debugging (spinlocks, mutexes, etc...)
10919 +# CONFIG_DEBUG_IRQFLAGS is not set
10920 +CONFIG_STACKTRACE=y
10921 +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set
10922 +# CONFIG_DEBUG_KOBJECT is not set
10925 +# Debug kernel data structures
10927 +# CONFIG_DEBUG_LIST is not set
10928 +# CONFIG_DEBUG_PLIST is not set
10929 +# CONFIG_DEBUG_SG is not set
10930 +# CONFIG_DEBUG_NOTIFIERS is not set
10931 +# CONFIG_BUG_ON_DATA_CORRUPTION is not set
10932 +# end of Debug kernel data structures
10934 +# CONFIG_DEBUG_CREDENTIALS is not set
10937 +# RCU Debugging
10939 +# CONFIG_RCU_SCALE_TEST is not set
10940 +# CONFIG_RCU_TORTURE_TEST is not set
10941 +# CONFIG_RCU_REF_SCALE_TEST is not set
10942 +CONFIG_RCU_CPU_STALL_TIMEOUT=60
10943 +# CONFIG_RCU_TRACE is not set
10944 +# CONFIG_RCU_EQS_DEBUG is not set
10945 +# CONFIG_RCU_STRICT_GRACE_PERIOD is not set
10946 +# end of RCU Debugging
10948 +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set
10949 +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
10950 +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set
10951 +CONFIG_LATENCYTOP=y
10952 +CONFIG_USER_STACKTRACE_SUPPORT=y
10953 +CONFIG_HAVE_FUNCTION_TRACER=y
10954 +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
10955 +CONFIG_HAVE_DYNAMIC_FTRACE=y
10956 +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y
10957 +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y
10958 +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y
10959 +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
10960 +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
10961 +CONFIG_HAVE_FENTRY=y
10962 +CONFIG_HAVE_OBJTOOL_MCOUNT=y
10963 +CONFIG_HAVE_C_RECORDMCOUNT=y
10964 +CONFIG_TRACING_SUPPORT=y
10965 +# CONFIG_FTRACE is not set
10966 +# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set
10967 +CONFIG_SAMPLES=y
10968 +# CONFIG_SAMPLE_AUXDISPLAY is not set
10969 +# CONFIG_SAMPLE_KOBJECT is not set
10970 +# CONFIG_SAMPLE_KPROBES is not set
10971 +# CONFIG_SAMPLE_HW_BREAKPOINT is not set
10972 +# CONFIG_SAMPLE_KFIFO is not set
10973 +# CONFIG_SAMPLE_KDB is not set
10974 +# CONFIG_SAMPLE_RPMSG_CLIENT is not set
10975 +# CONFIG_SAMPLE_CONFIGFS is not set
10976 +# CONFIG_SAMPLE_VFIO_MDEV_MTTY is not set
10977 +# CONFIG_SAMPLE_VFIO_MDEV_MDPY is not set
10978 +# CONFIG_SAMPLE_VFIO_MDEV_MDPY_FB is not set
10979 +# CONFIG_SAMPLE_VFIO_MDEV_MBOCHS is not set
10980 +# CONFIG_SAMPLE_WATCHDOG is not set
10981 +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y
10982 +CONFIG_STRICT_DEVMEM=y
10983 +# CONFIG_IO_STRICT_DEVMEM is not set
10986 +# x86 Debugging
10988 +CONFIG_TRACE_IRQFLAGS_SUPPORT=y
10989 +CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y
10990 +CONFIG_EARLY_PRINTK_USB=y
10991 +# CONFIG_X86_VERBOSE_BOOTUP is not set
10992 +CONFIG_EARLY_PRINTK=y
10993 +CONFIG_EARLY_PRINTK_DBGP=y
10994 +CONFIG_EARLY_PRINTK_USB_XDBC=y
10995 +# CONFIG_EFI_PGT_DUMP is not set
10996 +# CONFIG_DEBUG_TLBFLUSH is not set
10997 +# CONFIG_IOMMU_DEBUG is not set
10998 +CONFIG_HAVE_MMIOTRACE_SUPPORT=y
10999 +# CONFIG_X86_DECODER_SELFTEST is not set
11000 +# CONFIG_IO_DELAY_0X80 is not set
11001 +CONFIG_IO_DELAY_0XED=y
11002 +# CONFIG_IO_DELAY_UDELAY is not set
11003 +# CONFIG_IO_DELAY_NONE is not set
11004 +# CONFIG_DEBUG_BOOT_PARAMS is not set
11005 +# CONFIG_CPA_DEBUG is not set
11006 +# CONFIG_DEBUG_ENTRY is not set
11007 +# CONFIG_DEBUG_NMI_SELFTEST is not set
11008 +CONFIG_X86_DEBUG_FPU=y
11009 +CONFIG_PUNIT_ATOM_DEBUG=m
11010 +CONFIG_UNWINDER_ORC=y
11011 +# CONFIG_UNWINDER_FRAME_POINTER is not set
11012 +# CONFIG_UNWINDER_GUESS is not set
11013 +# end of x86 Debugging
11016 +# Kernel Testing and Coverage
11018 +# CONFIG_KUNIT is not set
11019 +CONFIG_NOTIFIER_ERROR_INJECTION=m
11020 +CONFIG_PM_NOTIFIER_ERROR_INJECT=m
11021 +# CONFIG_NETDEV_NOTIFIER_ERROR_INJECT is not set
11022 +CONFIG_FUNCTION_ERROR_INJECTION=y
11023 +# CONFIG_FAULT_INJECTION is not set
11024 +CONFIG_ARCH_HAS_KCOV=y
11025 +CONFIG_CC_HAS_SANCOV_TRACE_PC=y
11026 +# CONFIG_KCOV is not set
11027 +CONFIG_RUNTIME_TESTING_MENU=y
11028 +# CONFIG_LKDTM is not set
11029 +# CONFIG_TEST_LIST_SORT is not set
11030 +# CONFIG_TEST_MIN_HEAP is not set
11031 +# CONFIG_TEST_SORT is not set
11032 +# CONFIG_KPROBES_SANITY_TEST is not set
11033 +# CONFIG_BACKTRACE_SELF_TEST is not set
11034 +# CONFIG_RBTREE_TEST is not set
11035 +# CONFIG_REED_SOLOMON_TEST is not set
11036 +# CONFIG_INTERVAL_TREE_TEST is not set
11037 +# CONFIG_PERCPU_TEST is not set
11038 +# CONFIG_ATOMIC64_SELFTEST is not set
11039 +# CONFIG_ASYNC_RAID6_TEST is not set
11040 +# CONFIG_TEST_HEXDUMP is not set
11041 +# CONFIG_TEST_STRING_HELPERS is not set
11042 +# CONFIG_TEST_STRSCPY is not set
11043 +# CONFIG_TEST_KSTRTOX is not set
11044 +# CONFIG_TEST_PRINTF is not set
11045 +# CONFIG_TEST_BITMAP is not set
11046 +# CONFIG_TEST_UUID is not set
11047 +# CONFIG_TEST_XARRAY is not set
11048 +# CONFIG_TEST_OVERFLOW is not set
11049 +# CONFIG_TEST_RHASHTABLE is not set
11050 +# CONFIG_TEST_HASH is not set
11051 +# CONFIG_TEST_IDA is not set
11052 +# CONFIG_TEST_PARMAN is not set
11053 +# CONFIG_TEST_LKM is not set
11054 +# CONFIG_TEST_BITOPS is not set
11055 +# CONFIG_TEST_VMALLOC is not set
11056 +# CONFIG_TEST_USER_COPY is not set
11057 +CONFIG_TEST_BPF=m
11058 +CONFIG_TEST_BLACKHOLE_DEV=m
11059 +# CONFIG_FIND_BIT_BENCHMARK is not set
11060 +# CONFIG_TEST_FIRMWARE is not set
11061 +# CONFIG_TEST_SYSCTL is not set
11062 +# CONFIG_TEST_UDELAY is not set
11063 +# CONFIG_TEST_STATIC_KEYS is not set
11064 +# CONFIG_TEST_KMOD is not set
11065 +# CONFIG_TEST_MEMCAT_P is not set
11066 +# CONFIG_TEST_OBJAGG is not set
11067 +# CONFIG_TEST_STACKINIT is not set
11068 +# CONFIG_TEST_MEMINIT is not set
11069 +# CONFIG_TEST_HMM is not set
11070 +# CONFIG_TEST_FREE_PAGES is not set
11071 +# CONFIG_TEST_FPU is not set
11072 +CONFIG_MEMTEST=y
11073 +# CONFIG_HYPERV_TESTING is not set
11074 +# end of Kernel Testing and Coverage
11075 +# end of Kernel hacking
11076 diff --git a/.gitignore b/.gitignore
11077 index 3af66272d6f1..127012c1f717 100644
11078 --- a/.gitignore
11079 +++ b/.gitignore
11080 @@ -57,6 +57,7 @@ modules.order
11081  /tags
11082  /TAGS
11083  /linux
11084 +/modules-only.symvers
11085  /vmlinux
11086  /vmlinux.32
11087  /vmlinux.symvers
11088 diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
11089 index 04545725f187..e38e2c55b2fa 100644
11090 --- a/Documentation/admin-guide/kernel-parameters.txt
11091 +++ b/Documentation/admin-guide/kernel-parameters.txt
11092 @@ -358,6 +358,10 @@
11093         autoconf=       [IPV6]
11094                         See Documentation/networking/ipv6.rst.
11096 +       autogroup=      [KNL] Enable or disable scheduler automatic task group
11097 +                       creation.
11098 +                       Format: <bool>
11100         show_lapic=     [APIC,X86] Advanced Programmable Interrupt Controller
11101                         Limit apic dumping. The parameter defines the maximal
11102                         number of local apics being dumped. Also it is possible
11103 @@ -1869,13 +1873,6 @@
11104                         bypassed by not enabling DMAR with this option. In
11105                         this case, gfx device will use physical address for
11106                         DMA.
11107 -               forcedac [X86-64]
11108 -                       With this option iommu will not optimize to look
11109 -                       for io virtual address below 32-bit forcing dual
11110 -                       address cycle on pci bus for cards supporting greater
11111 -                       than 32-bit addressing. The default is to look
11112 -                       for translation below 32-bit and if not available
11113 -                       then look in the higher range.
11114                 strict [Default Off]
11115                         With this option on every unmap_single operation will
11116                         result in a hardware IOTLB flush operation as opposed
11117 @@ -1964,6 +1961,14 @@
11118                 nobypass        [PPC/POWERNV]
11119                         Disable IOMMU bypass, using IOMMU for PCI devices.
11121 +       iommu.forcedac= [ARM64, X86] Control IOVA allocation for PCI devices.
11122 +                       Format: { "0" | "1" }
11123 +                       0 - Try to allocate a 32-bit DMA address first, before
11124 +                         falling back to the full range if needed.
11125 +                       1 - Allocate directly from the full usable range,
11126 +                         forcing Dual Address Cycle for PCI cards supporting
11127 +                         greater than 32-bit addressing.
11129         iommu.strict=   [ARM64] Configure TLB invalidation behaviour
11130                         Format: { "0" | "1" }
11131                         0 - Lazy mode.
11132 @@ -3196,8 +3201,6 @@
11133         noapic          [SMP,APIC] Tells the kernel to not make use of any
11134                         IOAPICs that may be present in the system.
11136 -       noautogroup     Disable scheduler automatic task group creation.
11138         nobats          [PPC] Do not use BATs for mapping kernel lowmem
11139                         on "Classic" PPC cores.
11141 @@ -3660,6 +3663,15 @@
11142                 nomsi           [MSI] If the PCI_MSI kernel config parameter is
11143                                 enabled, this kernel boot option can be used to
11144                                 disable the use of MSI interrupts system-wide.
11145 +               pcie_acs_override =
11146 +                                       [PCIE] Override missing PCIe ACS support for:
11147 +                               downstream
11148 +                                       All downstream ports - full ACS capabilities
11149 +                               multifunction
11150 +                                       All multifunction devices - multifunction ACS subset
11151 +                               id:nnnn:nnnn
11152 +                                       Specific device - full ACS capabilities
11153 +                                       Specified as vid:did (vendor/device ID) in hex
11154                 noioapicquirk   [APIC] Disable all boot interrupt quirks.
11155                                 Safety option to keep boot IRQs enabled. This
11156                                 should never be necessary.
11157 diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
11158 index 1d56a6b73a4e..4d55ff02310c 100644
11159 --- a/Documentation/admin-guide/sysctl/kernel.rst
11160 +++ b/Documentation/admin-guide/sysctl/kernel.rst
11161 @@ -1087,6 +1087,10 @@ Model available). If your platform happens to meet the
11162  requirements for EAS but you do not want to use it, change
11163  this value to 0.
11165 +sched_interactivity_factor (CacULE scheduler only)
11166 +==================================================
11167 +Sets the value *m* for interactivity score calculations. See
11168 +Figure 1 in https://web.cs.ucdavis.edu/~roper/ecs150/ULE.pdf
11170  sched_schedstats
11171  ================
11172 diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst
11173 index 586cd4b86428..cf4a90d7a058 100644
11174 --- a/Documentation/admin-guide/sysctl/vm.rst
11175 +++ b/Documentation/admin-guide/sysctl/vm.rst
11176 @@ -26,6 +26,8 @@ Currently, these files are in /proc/sys/vm:
11178  - admin_reserve_kbytes
11179  - block_dump
11180 +- clean_low_kbytes
11181 +- clean_min_kbytes
11182  - compact_memory
11183  - compaction_proactiveness
11184  - compact_unevictable_allowed
11185 @@ -113,6 +115,41 @@ block_dump enables block I/O debugging when set to a nonzero value. More
11186  information on block I/O debugging is in Documentation/admin-guide/laptops/laptop-mode.rst.
11189 +clean_low_kbytes
11190 +=====================
11192 +This knob provides *best-effort* protection of clean file pages. The clean file
11193 +pages on the current node won't be reclaimed under memory pressure when their
11194 +amount is below vm.clean_low_kbytes *unless* we threaten to OOM or have no
11195 +free swap space or vm.swappiness=0.
11197 +Protection of clean file pages may be used to prevent thrashing and
11198 +reducing I/O under low-memory conditions.
11200 +Setting it to a high value may result in a early eviction of anonymous pages
11201 +into the swap space by attempting to hold the protected amount of clean file
11202 +pages in memory.
11204 +The default value is defined by CONFIG_CLEAN_LOW_KBYTES.
11207 +clean_min_kbytes
11208 +=====================
11210 +This knob provides *hard* protection of clean file pages. The clean file pages
11211 +on the current node won't be reclaimed under memory pressure when their amount
11212 +is below vm.clean_min_kbytes.
11214 +Hard protection of clean file pages may be used to avoid high latency and
11215 +prevent livelock in near-OOM conditions.
11217 +Setting it to a high value may result in a early out-of-memory condition due to
11218 +the inability to reclaim the protected amount of clean file pages when other
11219 +types of pages cannot be reclaimed.
11221 +The default value is defined by CONFIG_CLEAN_MIN_KBYTES.
11224  compact_memory
11225  ==============
11227 diff --git a/Documentation/devicetree/bindings/media/renesas,vin.yaml b/Documentation/devicetree/bindings/media/renesas,vin.yaml
11228 index fe7c4cbfe4ba..dd1a5ce5896c 100644
11229 --- a/Documentation/devicetree/bindings/media/renesas,vin.yaml
11230 +++ b/Documentation/devicetree/bindings/media/renesas,vin.yaml
11231 @@ -193,23 +193,35 @@ required:
11232    - interrupts
11233    - clocks
11234    - power-domains
11235 -  - resets
11237 -if:
11238 -  properties:
11239 -    compatible:
11240 -      contains:
11241 -        enum:
11242 -          - renesas,vin-r8a7778
11243 -          - renesas,vin-r8a7779
11244 -          - renesas,rcar-gen2-vin
11245 -then:
11246 -  required:
11247 -    - port
11248 -else:
11249 -  required:
11250 -    - renesas,id
11251 -    - ports
11253 +allOf:
11254 +  - if:
11255 +      not:
11256 +        properties:
11257 +          compatible:
11258 +            contains:
11259 +              enum:
11260 +                - renesas,vin-r8a7778
11261 +                - renesas,vin-r8a7779
11262 +    then:
11263 +      required:
11264 +        - resets
11266 +  - if:
11267 +      properties:
11268 +        compatible:
11269 +          contains:
11270 +            enum:
11271 +              - renesas,vin-r8a7778
11272 +              - renesas,vin-r8a7779
11273 +              - renesas,rcar-gen2-vin
11274 +    then:
11275 +      required:
11276 +        - port
11277 +    else:
11278 +      required:
11279 +        - renesas,id
11280 +        - ports
11282  additionalProperties: false
11284 diff --git a/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml b/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
11285 index 4a2bcc0158e2..8fdfbc763d70 100644
11286 --- a/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
11287 +++ b/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
11288 @@ -17,6 +17,7 @@ allOf:
11289  properties:
11290    compatible:
11291      oneOf:
11292 +      - const: renesas,pcie-r8a7779       # R-Car H1
11293        - items:
11294            - enum:
11295                - renesas,pcie-r8a7742      # RZ/G1H
11296 @@ -74,7 +75,16 @@ required:
11297    - clocks
11298    - clock-names
11299    - power-domains
11300 -  - resets
11302 +if:
11303 +  not:
11304 +    properties:
11305 +      compatible:
11306 +        contains:
11307 +          const: renesas,pcie-r8a7779
11308 +then:
11309 +  required:
11310 +    - resets
11312  unevaluatedProperties: false
11314 diff --git a/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
11315 index 626447fee092..7808ec8bc712 100644
11316 --- a/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
11317 +++ b/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
11318 @@ -25,11 +25,13 @@ properties:
11319        - qcom,msm8998-qmp-pcie-phy
11320        - qcom,msm8998-qmp-ufs-phy
11321        - qcom,msm8998-qmp-usb3-phy
11322 +      - qcom,sc7180-qmp-usb3-phy
11323        - qcom,sc8180x-qmp-ufs-phy
11324        - qcom,sc8180x-qmp-usb3-phy
11325        - qcom,sdm845-qhp-pcie-phy
11326        - qcom,sdm845-qmp-pcie-phy
11327        - qcom,sdm845-qmp-ufs-phy
11328 +      - qcom,sdm845-qmp-usb3-phy
11329        - qcom,sdm845-qmp-usb3-uni-phy
11330        - qcom,sm8150-qmp-ufs-phy
11331        - qcom,sm8150-qmp-usb3-phy
11332 diff --git a/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml
11333 index 33974ad10afe..62c0179d1765 100644
11334 --- a/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml
11335 +++ b/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml
11336 @@ -14,9 +14,7 @@ properties:
11337    compatible:
11338      enum:
11339        - qcom,sc7180-qmp-usb3-dp-phy
11340 -      - qcom,sc7180-qmp-usb3-phy
11341        - qcom,sdm845-qmp-usb3-dp-phy
11342 -      - qcom,sdm845-qmp-usb3-phy
11343    reg:
11344      items:
11345        - description: Address and length of PHY's USB serdes block.
11346 diff --git a/Documentation/devicetree/bindings/serial/8250.yaml b/Documentation/devicetree/bindings/serial/8250.yaml
11347 index f54cae9ff7b2..d3f87f2bfdc2 100644
11348 --- a/Documentation/devicetree/bindings/serial/8250.yaml
11349 +++ b/Documentation/devicetree/bindings/serial/8250.yaml
11350 @@ -93,11 +93,6 @@ properties:
11351                - mediatek,mt7622-btif
11352                - mediatek,mt7623-btif
11353            - const: mediatek,mtk-btif
11354 -      - items:
11355 -          - enum:
11356 -              - mediatek,mt7622-btif
11357 -              - mediatek,mt7623-btif
11358 -          - const: mediatek,mtk-btif
11359        - items:
11360            - const: mrvl,mmp-uart
11361            - const: intel,xscale-uart
11362 diff --git a/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml b/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
11363 index 8631678283f9..865be05083c3 100644
11364 --- a/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
11365 +++ b/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
11366 @@ -80,7 +80,8 @@ required:
11367    - interrupts
11368    - clocks
11370 -additionalProperties: false
11371 +additionalProperties:
11372 +  type: object
11374  examples:
11375    - |
11376 diff --git a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
11377 index b33a76eeac4e..f963204e0b16 100644
11378 --- a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
11379 +++ b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
11380 @@ -28,14 +28,7 @@ properties:
11381        - renesas,r8a77980-thermal # R-Car V3H
11382        - renesas,r8a779a0-thermal # R-Car V3U
11384 -  reg:
11385 -    minItems: 2
11386 -    maxItems: 4
11387 -    items:
11388 -      - description: TSC1 registers
11389 -      - description: TSC2 registers
11390 -      - description: TSC3 registers
11391 -      - description: TSC4 registers
11392 +  reg: true
11394    interrupts:
11395      items:
11396 @@ -71,8 +64,25 @@ if:
11397            enum:
11398              - renesas,r8a779a0-thermal
11399  then:
11400 +  properties:
11401 +    reg:
11402 +      minItems: 2
11403 +      maxItems: 3
11404 +      items:
11405 +        - description: TSC1 registers
11406 +        - description: TSC2 registers
11407 +        - description: TSC3 registers
11408    required:
11409      - interrupts
11410 +else:
11411 +  properties:
11412 +    reg:
11413 +      items:
11414 +        - description: TSC0 registers
11415 +        - description: TSC1 registers
11416 +        - description: TSC2 registers
11417 +        - description: TSC3 registers
11418 +        - description: TSC4 registers
11420  additionalProperties: false
11422 @@ -111,3 +121,20 @@ examples:
11423                      };
11424              };
11425      };
11426 +  - |
11427 +    #include <dt-bindings/clock/r8a779a0-cpg-mssr.h>
11428 +    #include <dt-bindings/interrupt-controller/arm-gic.h>
11429 +    #include <dt-bindings/power/r8a779a0-sysc.h>
11431 +    tsc_r8a779a0: thermal@e6190000 {
11432 +            compatible = "renesas,r8a779a0-thermal";
11433 +            reg = <0xe6190000 0x200>,
11434 +                  <0xe6198000 0x200>,
11435 +                  <0xe61a0000 0x200>,
11436 +                  <0xe61a8000 0x200>,
11437 +                  <0xe61b0000 0x200>;
11438 +            clocks = <&cpg CPG_MOD 919>;
11439 +            power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
11440 +            resets = <&cpg 919>;
11441 +            #thermal-sensor-cells = <1>;
11442 +    };
11443 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
11444 index e361fc95ca29..82e3eee7363b 100644
11445 --- a/Documentation/dontdiff
11446 +++ b/Documentation/dontdiff
11447 @@ -178,6 +178,7 @@ mktables
11448  mktree
11449  mkutf8data
11450  modpost
11451 +modules-only.symvers
11452  modules.builtin
11453  modules.builtin.modinfo
11454  modules.nsdeps
11455 diff --git a/Documentation/driver-api/xilinx/eemi.rst b/Documentation/driver-api/xilinx/eemi.rst
11456 index 9dcbc6f18d75..c1bc47b9000d 100644
11457 --- a/Documentation/driver-api/xilinx/eemi.rst
11458 +++ b/Documentation/driver-api/xilinx/eemi.rst
11459 @@ -16,35 +16,8 @@ components running across different processing clusters on a chip or
11460  device to communicate with a power management controller (PMC) on a
11461  device to issue or respond to power management requests.
11463 -EEMI ops is a structure containing all eemi APIs supported by Zynq MPSoC.
11464 -The zynqmp-firmware driver maintain all EEMI APIs in zynqmp_eemi_ops
11465 -structure. Any driver who want to communicate with PMC using EEMI APIs
11466 -can call zynqmp_pm_get_eemi_ops().
11468 -Example of EEMI ops::
11470 -       /* zynqmp-firmware driver maintain all EEMI APIs */
11471 -       struct zynqmp_eemi_ops {
11472 -               int (*get_api_version)(u32 *version);
11473 -               int (*query_data)(struct zynqmp_pm_query_data qdata, u32 *out);
11474 -       };
11476 -       static const struct zynqmp_eemi_ops eemi_ops = {
11477 -               .get_api_version = zynqmp_pm_get_api_version,
11478 -               .query_data = zynqmp_pm_query_data,
11479 -       };
11481 -Example of EEMI ops usage::
11483 -       static const struct zynqmp_eemi_ops *eemi_ops;
11484 -       u32 ret_payload[PAYLOAD_ARG_CNT];
11485 -       int ret;
11487 -       eemi_ops = zynqmp_pm_get_eemi_ops();
11488 -       if (IS_ERR(eemi_ops))
11489 -               return PTR_ERR(eemi_ops);
11491 -       ret = eemi_ops->query_data(qdata, ret_payload);
11492 +Any driver who wants to communicate with PMC using EEMI APIs use the
11493 +functions provided for each function.
11495  IOCTL
11496  ------
11497 diff --git a/Documentation/filesystems/ntfs3.rst b/Documentation/filesystems/ntfs3.rst
11498 new file mode 100644
11499 index 000000000000..ffe9ea0c1499
11500 --- /dev/null
11501 +++ b/Documentation/filesystems/ntfs3.rst
11502 @@ -0,0 +1,106 @@
11503 +.. SPDX-License-Identifier: GPL-2.0
11505 +=====
11506 +NTFS3
11507 +=====
11510 +Summary and Features
11511 +====================
11513 +NTFS3 is fully functional NTFS Read-Write driver. The driver works with
11514 +NTFS versions up to 3.1, normal/compressed/sparse files
11515 +and journal replaying. File system type to use on mount is 'ntfs3'.
11517 +- This driver implements NTFS read/write support for normal, sparse and
11518 +  compressed files.
11519 +- Supports native journal replaying;
11520 +- Supports extended attributes
11521 +       Predefined extended attributes:
11522 +       - 'system.ntfs_security' gets/sets security
11523 +                       descriptor (SECURITY_DESCRIPTOR_RELATIVE)
11524 +       - 'system.ntfs_attrib' gets/sets ntfs file/dir attributes.
11525 +               Note: applied to empty files, this allows to switch type between
11526 +               sparse(0x200), compressed(0x800) and normal;
11527 +- Supports NFS export of mounted NTFS volumes.
11529 +Mount Options
11530 +=============
11532 +The list below describes mount options supported by NTFS3 driver in addition to
11533 +generic ones.
11535 +===============================================================================
11537 +nls=name               This option informs the driver how to interpret path
11538 +                       strings and translate them to Unicode and back. If
11539 +                       this option is not set, the default codepage will be
11540 +                       used (CONFIG_NLS_DEFAULT).
11541 +                       Examples:
11542 +                               'nls=utf8'
11544 +uid=
11545 +gid=
11546 +umask=                 Controls the default permissions for files/directories created
11547 +                       after the NTFS volume is mounted.
11549 +fmask=
11550 +dmask=                 Instead of specifying umask which applies both to
11551 +                       files and directories, fmask applies only to files and
11552 +                       dmask only to directories.
11554 +nohidden               Files with the Windows-specific HIDDEN (FILE_ATTRIBUTE_HIDDEN)
11555 +                       attribute will not be shown under Linux.
11557 +sys_immutable          Files with the Windows-specific SYSTEM
11558 +                       (FILE_ATTRIBUTE_SYSTEM) attribute will be marked as system
11559 +                       immutable files.
11561 +discard                        Enable support of the TRIM command for improved performance
11562 +                       on delete operations, which is recommended for use with the
11563 +                       solid-state drives (SSD).
11565 +force                  Forces the driver to mount partitions even if 'dirty' flag
11566 +                       (volume dirty) is set. Not recommended for use.
11568 +sparse                 Create new files as "sparse".
11570 +showmeta               Use this parameter to show all meta-files (System Files) on
11571 +                       a mounted NTFS partition.
11572 +                       By default, all meta-files are hidden.
11574 +prealloc               Preallocate space for files excessively when file size is
11575 +                       increasing on writes. Decreases fragmentation in case of
11576 +                       parallel write operations to different files.
11578 +no_acs_rules           "No access rules" mount option sets access rights for
11579 +                       files/folders to 777 and owner/group to root. This mount
11580 +                       option absorbs all other permissions:
11581 +                       - permissions change for files/folders will be reported
11582 +                               as successful, but they will remain 777;
11583 +                       - owner/group change will be reported as successful, but
11584 +                               they will stay as root
11586 +acl                    Support POSIX ACLs (Access Control Lists). Effective if
11587 +                       supported by Kernel. Not to be confused with NTFS ACLs.
11588 +                       The option specified as acl enables support for POSIX ACLs.
11590 +noatime                        All files and directories will not update their last access
11591 +                       time attribute if a partition is mounted with this parameter.
11592 +                       This option can speed up file system operation.
11594 +===============================================================================
11596 +ToDo list
11597 +=========
11599 +- Full journaling support (currently journal replaying is supported) over JBD.
11602 +References
11603 +==========
11604 +https://www.paragon-software.com/home/ntfs-linux-professional/
11605 +       - Commercial version of the NTFS driver for Linux.
11607 +almaz.alexandrovich@paragon-software.com
11608 +       - Direct e-mail address for feedback and requests on the NTFS3 implementation.
11609 diff --git a/Documentation/locking/futex2.rst b/Documentation/locking/futex2.rst
11610 new file mode 100644
11611 index 000000000000..3ab49f0e741c
11612 --- /dev/null
11613 +++ b/Documentation/locking/futex2.rst
11614 @@ -0,0 +1,198 @@
11615 +.. SPDX-License-Identifier: GPL-2.0
11617 +======
11618 +futex2
11619 +======
11621 +:Author: André Almeida <andrealmeid@collabora.com>
11623 +futex, or fast user mutex, is a set of syscalls to allow userspace to create
11624 +performant synchronization mechanisms, such as mutexes, semaphores and
11625 +conditional variables in userspace. C standard libraries, like glibc, uses it
11626 +as a means to implement more high level interfaces like pthreads.
11628 +The interface
11629 +=============
11631 +uAPI functions
11632 +--------------
11634 +.. kernel-doc:: kernel/futex2.c
11635 +   :identifiers: sys_futex_wait sys_futex_wake sys_futex_waitv sys_futex_requeue
11637 +uAPI structures
11638 +---------------
11640 +.. kernel-doc:: include/uapi/linux/futex.h
11642 +The ``flag`` argument
11643 +---------------------
11645 +The flag is used to specify the size of the futex word
11646 +(FUTEX_[8, 16, 32]). It's mandatory to define one, since there's no
11647 +default size.
11649 +By default, the timeout uses a monotonic clock, but can be used as a realtime
11650 +one by using the FUTEX_REALTIME_CLOCK flag.
11652 +By default, futexes are of the private type, that means that this user address
11653 +will be accessed by threads that share the same memory region. This allows for
11654 +some internal optimizations, so they are faster. However, if the address needs
11655 +to be shared with different processes (like using ``mmap()`` or ``shm()``), they
11656 +need to be defined as shared and the flag FUTEX_SHARED_FLAG is used to set that.
11658 +By default, the operation has no NUMA-awareness, meaning that the user can't
11659 +choose the memory node where the kernel side futex data will be stored. The
11660 +user can choose the node where it wants to operate by setting the
11661 +FUTEX_NUMA_FLAG and using the following structure (where X can be 8, 16, or
11662 +32)::
11664 + struct futexX_numa {
11665 +         __uX value;
11666 +         __sX hint;
11667 + };
11669 +This structure should be passed at the ``void *uaddr`` of futex functions. The
11670 +address of the structure will be used to be waited on/waken on, and the
11671 +``value`` will be compared to ``val`` as usual. The ``hint`` member is used to
11672 +define which node the futex will use. When waiting, the futex will be
11673 +registered on a kernel-side table stored on that node; when waking, the futex
11674 +will be searched for on that given table. That means that there's no redundancy
11675 +between tables, and the wrong ``hint`` value will lead to undesired behavior.
11676 +Userspace is responsible for dealing with node migrations issues that may
11677 +occur. ``hint`` can range from [0, MAX_NUMA_NODES), for specifying a node, or
11678 +-1, to use the same node the current process is using.
11680 +When not using FUTEX_NUMA_FLAG on a NUMA system, the futex will be stored on a
11681 +global table on allocated on the first node.
11683 +The ``timo`` argument
11684 +---------------------
11686 +As per the Y2038 work done in the kernel, new interfaces shouldn't add timeout
11687 +options known to be buggy. Given that, ``timo`` should be a 64-bit timeout at
11688 +all platforms, using an absolute timeout value.
11690 +Implementation
11691 +==============
11693 +The internal implementation follows a similar design to the original futex.
11694 +Given that we want to replicate the same external behavior of current futex,
11695 +this should be somewhat expected.
11697 +Waiting
11698 +-------
11700 +For the wait operations, they are all treated as if you want to wait on N
11701 +futexes, so the path for futex_wait and futex_waitv is the basically the same.
11702 +For both syscalls, the first step is to prepare an internal list for the list
11703 +of futexes to wait for (using struct futexv_head). For futex_wait() calls, this
11704 +list will have a single object.
11706 +We have a hash table, where waiters register themselves before sleeping. Then
11707 +the wake function checks this table looking for waiters at uaddr.  The hash
11708 +bucket to be used is determined by a struct futex_key, that stores information
11709 +to uniquely identify an address from a given process. Given the huge address
11710 +space, there'll be hash collisions, so we store information to be later used on
11711 +collision treatment.
11713 +First, for every futex we want to wait on, we check if (``*uaddr == val``).
11714 +This check is done holding the bucket lock, so we are correctly serialized with
11715 +any futex_wake() calls. If any waiter fails the check above, we dequeue all
11716 +futexes. The check (``*uaddr == val``) can fail for two reasons:
11718 +- The values are different, and we return -EAGAIN. However, if while
11719 +  dequeueing we found that some futexes were awakened, we prioritize this
11720 +  and return success.
11722 +- When trying to access the user address, we do so with page faults
11723 +  disabled because we are holding a bucket's spin lock (and can't sleep
11724 +  while holding a spin lock). If there's an error, it might be a page
11725 +  fault, or an invalid address. We release the lock, dequeue everyone
11726 +  (because it's illegal to sleep while there are futexes enqueued, we
11727 +  could lose wakeups) and try again with page fault enabled. If we
11728 +  succeed, this means that the address is valid, but we need to do
11729 +  all the work again. For serialization reasons, we need to have the
11730 +  spin lock when getting the user value. Additionally, for shared
11731 +  futexes, we also need to recalculate the hash, since the underlying
11732 +  mapping mechanisms could have changed when dealing with page fault.
11733 +  If, even with page fault enabled, we can't access the address, it
11734 +  means it's an invalid user address, and we return -EFAULT. For this
11735 +  case, we prioritize the error, even if some futexes were awaken.
11737 +If the check is OK, they are enqueued on a linked list in our bucket, and
11738 +proceed to the next one. If all waiters succeed, we put the thread to sleep
11739 +until a futex_wake() call, timeout expires or we get a signal. After waking up,
11740 +we dequeue everyone, and check if some futex was awakened. This dequeue is done
11741 +by iteratively walking at each element of struct futex_head list.
11743 +All enqueuing/dequeuing operations requires to hold the bucket lock, to avoid
11744 +racing while modifying the list.
11746 +Waking
11747 +------
11749 +We get the bucket that's storing the waiters at uaddr, and wake the required
11750 +number of waiters, checking for hash collision.
11752 +There's an optimization that makes futex_wake() not take the bucket lock if
11753 +there's no one to be woken on that bucket. It checks an atomic counter that each
11754 +bucket has, if it says 0, then the syscall exits. In order for this to work, the
11755 +waiter thread increases it before taking the lock, so the wake thread will
11756 +correctly see that there's someone waiting and will continue the path to take
11757 +the bucket lock. To get the correct serialization, the waiter issues a memory
11758 +barrier after increasing the bucket counter and the waker issues a memory
11759 +barrier before checking it.
11761 +Requeuing
11762 +---------
11764 +The requeue path first checks for each struct futex_requeue and their flags.
11765 +Then, it will compare the expected value with the one at uaddr1::uaddr.
11766 +Following the same serialization explained at Waking_, we increase the atomic
11767 +counter for the bucket of uaddr2 before taking the lock. We need to have both
11768 +buckets locks at same time so we don't race with other futex operation. To
11769 +ensure the locks are taken in the same order for all threads (and thus avoiding
11770 +deadlocks), every requeue operation takes the "smaller" bucket first, when
11771 +comparing both addresses.
11773 +If the compare with user value succeeds, we proceed by waking ``nr_wake``
11774 +futexes, and then requeuing ``nr_requeue`` from bucket of uaddr1 to the uaddr2.
11775 +This consists in a simple list deletion/addition and replacing the old futex key
11776 +with the new one.
11778 +Futex keys
11779 +----------
11781 +There are two types of futexes: private and shared ones. The private are futexes
11782 +meant to be used by threads that share the same memory space, are easier to be
11783 +uniquely identified and thus can have some performance optimization. The
11784 +elements for identifying one are: the start address of the page where the
11785 +address is, the address offset within the page and the current->mm pointer.
11787 +Now, for uniquely identifying a shared futex:
11789 +- If the page containing the user address is an anonymous page, we can
11790 +  just use the same data used for private futexes (the start address of
11791 +  the page, the address offset within the page and the current->mm
11792 +  pointer); that will be enough for uniquely identifying such futex. We
11793 +  also set one bit at the key to differentiate if a private futex is
11794 +  used on the same address (mixing shared and private calls does not
11795 +  work).
11797 +- If the page is file-backed, current->mm maybe isn't the same one for
11798 +  every user of this futex, so we need to use other data: the
11799 +  page->index, a UUID for the struct inode and the offset within the
11800 +  page.
11802 +Note that members of futex_key don't have any particular meaning after they
11803 +are part of the struct - they are just bytes to identify a futex.  Given that,
11804 +we don't need to use a particular name or type that matches the original data,
11805 +we only need to care about the bitsize of each component and make both private
11806 +and shared fit in the same memory space.
11808 +Source code documentation
11809 +=========================
11811 +.. kernel-doc:: kernel/futex2.c
11812 +   :no-identifiers: sys_futex_wait sys_futex_wake sys_futex_waitv sys_futex_requeue
11813 diff --git a/Documentation/locking/index.rst b/Documentation/locking/index.rst
11814 index 7003bd5aeff4..9bf03c7fa1ec 100644
11815 --- a/Documentation/locking/index.rst
11816 +++ b/Documentation/locking/index.rst
11817 @@ -24,6 +24,7 @@ locking
11818      percpu-rw-semaphore
11819      robust-futexes
11820      robust-futex-ABI
11821 +    futex2
11823  .. only::  subproject and html
11825 diff --git a/Documentation/scheduler/sched-CacULE.rst b/Documentation/scheduler/sched-CacULE.rst
11826 new file mode 100644
11827 index 000000000000..82b0847c468a
11828 --- /dev/null
11829 +++ b/Documentation/scheduler/sched-CacULE.rst
11830 @@ -0,0 +1,76 @@
11831 +======================================
11832 +The CacULE Scheduler by Hamad Al Marri.
11833 +======================================
11835 +1.  Overview
11836 +=============
11838 +The CacULE CPU scheduler is based on interactivity score mechanism.
11839 +The interactivity score is inspired by the ULE scheduler (FreeBSD
11840 +scheduler).
11842 +1.1 About CacULE Scheduler
11843 +--------------------------
11845 +  - Each CPU has its own runqueue.
11847 +  - NORMAL runqueue is a linked list of sched_entities (instead of RB-Tree).
11849 +  - RT and other runqueues are just the same as the CFS's.
11851 +  - Wake up tasks preempt currently running tasks if its interactivity score value
11852 +    is higher.
11855 +1.2. Complexity
11856 +----------------
11858 +The complexity of Enqueue and Dequeue a task is O(1).
11860 +The complexity of pick the next task is in O(n), where n is the number of tasks
11861 +in a runqueue (each CPU has its own runqueue).
11863 +Note: O(n) sounds scary, but usually for a machine with 4 CPUS where it is used
11864 +for desktop or mobile jobs, the maximum number of runnable tasks might not
11865 +exceeds 10 (at the pick next run time) - the idle tasks are excluded since they
11866 +are dequeued when sleeping and enqueued when they wake up.
11869 +2. The CacULE Interactivity Score
11870 +=======================================================
11872 +The interactivity score is inspired by the ULE scheduler (FreeBSD scheduler).
11873 +For more information see: https://web.cs.ucdavis.edu/~roper/ecs150/ULE.pdf
11874 +CacULE doesn't replace CFS with ULE, it only changes the CFS' pick next task
11875 +mechanism to ULE's interactivity score mechanism for picking next task to run.
11878 +2.3 sched_interactivity_factor
11879 +=================
11880 +Sets the value *m* for interactivity score calculations. See Figure 1 in
11881 +https://web.cs.ucdavis.edu/~roper/ecs150/ULE.pdf
11882 +The default value of in CacULE is 10 which means that the Maximum Interactive
11883 +Score is 20 (since m = Maximum Interactive Score / 2).
11884 +You can tune sched_interactivity_factor with sysctl command:
11886 +       sysctl kernel.sched_interactivity_factor=50
11888 +This command changes the sched_interactivity_factor from 10 to 50.
11891 +3. Scheduling policies
11892 +=======================
11894 +CacULE some CFS, implements three scheduling policies:
11896 +  - SCHED_NORMAL (traditionally called SCHED_OTHER): The scheduling
11897 +    policy that is used for regular tasks.
11899 +  - SCHED_BATCH: Does not preempt nearly as often as regular tasks
11900 +    would, thereby allowing tasks to run longer and make better use of
11901 +    caches but at the cost of interactivity. This is well suited for
11902 +    batch jobs.
11904 +  - SCHED_IDLE: This is even weaker than nice 19, but its not a true
11905 +    idle timer scheduler in order to avoid to get into priority
11906 +    inversion problems which would deadlock the machine.
11907 diff --git a/Documentation/userspace-api/media/v4l/subdev-formats.rst b/Documentation/userspace-api/media/v4l/subdev-formats.rst
11908 index 7f16cbe46e5c..e6a9faa81197 100644
11909 --- a/Documentation/userspace-api/media/v4l/subdev-formats.rst
11910 +++ b/Documentation/userspace-api/media/v4l/subdev-formats.rst
11911 @@ -1567,8 +1567,8 @@ The following tables list existing packed RGB formats.
11912        - MEDIA_BUS_FMT_RGB101010_1X30
11913        - 0x1018
11914        -
11915 -      - 0
11916 -      - 0
11917 +      -
11918 +      -
11919        - r\ :sub:`9`
11920        - r\ :sub:`8`
11921        - r\ :sub:`7`
11922 diff --git a/Documentation/vm/index.rst b/Documentation/vm/index.rst
11923 index eff5fbd492d0..c353b3f55924 100644
11924 --- a/Documentation/vm/index.rst
11925 +++ b/Documentation/vm/index.rst
11926 @@ -17,6 +17,7 @@ various features of the Linux memory management
11928     swap_numa
11929     zswap
11930 +   multigen_lru
11932  Kernel developers MM documentation
11933  ==================================
11934 diff --git a/Documentation/vm/multigen_lru.rst b/Documentation/vm/multigen_lru.rst
11935 new file mode 100644
11936 index 000000000000..cf772aeca317
11937 --- /dev/null
11938 +++ b/Documentation/vm/multigen_lru.rst
11939 @@ -0,0 +1,192 @@
11940 +=====================
11941 +Multigenerational LRU
11942 +=====================
11944 +Quick Start
11945 +===========
11946 +Build Options
11947 +-------------
11948 +:Required: Set ``CONFIG_LRU_GEN=y``.
11950 +:Optional: Change ``CONFIG_NR_LRU_GENS`` to a number ``X`` to support
11951 + a maximum of ``X`` generations.
11953 +:Optional: Change ``CONFIG_TIERS_PER_GEN`` to a number ``Y`` to support
11954 + a maximum of ``Y`` tiers per generation.
11956 +:Optional: Set ``CONFIG_LRU_GEN_ENABLED=y`` to turn the feature on by
11957 + default.
11959 +Runtime Options
11960 +---------------
11961 +:Required: Write ``1`` to ``/sys/kernel/mm/lru_gen/enable`` if the
11962 + feature was not turned on by default.
11964 +:Optional: Change ``/sys/kernel/mm/lru_gen/spread`` to a number ``N``
11965 + to spread pages out across ``N+1`` generations. ``N`` should be less
11966 + than ``X``. Larger values make the background aging more aggressive.
11968 +:Optional: Read ``/sys/kernel/debug/lru_gen`` to verify the feature.
11969 + This file has the following output:
11973 +  memcg  memcg_id  memcg_path
11974 +    node  node_id
11975 +      min_gen  birth_time  anon_size  file_size
11976 +      ...
11977 +      max_gen  birth_time  anon_size  file_size
11979 +Given a memcg and a node, ``min_gen`` is the oldest generation
11980 +(number) and ``max_gen`` is the youngest. Birth time is in
11981 +milliseconds. The sizes of anon and file types are in pages.
11983 +Recipes
11984 +-------
11985 +:Android on ARMv8.1+: ``X=4``, ``N=0``
11987 +:Android on pre-ARMv8.1 CPUs: Not recommended due to the lack of
11988 + ``ARM64_HW_AFDBM``
11990 +:Laptops running Chrome on x86_64: ``X=7``, ``N=2``
11992 +:Working set estimation: Write ``+ memcg_id node_id gen [swappiness]``
11993 + to ``/sys/kernel/debug/lru_gen`` to account referenced pages to
11994 + generation ``max_gen`` and create the next generation ``max_gen+1``.
11995 + ``gen`` should be equal to ``max_gen``. A swap file and a non-zero
11996 + ``swappiness`` are required to scan anon type. If swapping is not
11997 + desired, set ``vm.swappiness`` to ``0``.
11999 +:Proactive reclaim: Write ``- memcg_id node_id gen [swappiness]
12000 + [nr_to_reclaim]`` to ``/sys/kernel/debug/lru_gen`` to evict
12001 + generations less than or equal to ``gen``. ``gen`` should be less
12002 + than ``max_gen-1`` as ``max_gen`` and ``max_gen-1`` are active
12003 + generations and therefore protected from the eviction. Use
12004 + ``nr_to_reclaim`` to limit the number of pages to be evicted.
12005 + Multiple command lines are supported, so does concatenation with
12006 + delimiters ``,`` and ``;``.
12008 +Framework
12009 +=========
12010 +For each ``lruvec``, evictable pages are divided into multiple
12011 +generations. The youngest generation number is stored in ``max_seq``
12012 +for both anon and file types as they are aged on an equal footing. The
12013 +oldest generation numbers are stored in ``min_seq[2]`` separately for
12014 +anon and file types as clean file pages can be evicted regardless of
12015 +swap and write-back constraints. Generation numbers are truncated into
12016 +``order_base_2(CONFIG_NR_LRU_GENS+1)`` bits in order to fit into
12017 +``page->flags``. The sliding window technique is used to prevent
12018 +truncated generation numbers from overlapping. Each truncated
12019 +generation number is an index to an array of per-type and per-zone
12020 +lists. Evictable pages are added to the per-zone lists indexed by
12021 +``max_seq`` or ``min_seq[2]`` (modulo ``CONFIG_NR_LRU_GENS``),
12022 +depending on whether they are being faulted in.
12024 +Each generation is then divided into multiple tiers. Tiers represent
12025 +levels of usage from file descriptors only. Pages accessed N times via
12026 +file descriptors belong to tier order_base_2(N). In contrast to moving
12027 +across generations which requires the lru lock, moving across tiers
12028 +only involves an atomic operation on ``page->flags`` and therefore has
12029 +a negligible cost.
12031 +The workflow comprises two conceptually independent functions: the
12032 +aging and the eviction.
12034 +Aging
12035 +-----
12036 +The aging produces young generations. Given an ``lruvec``, the aging
12037 +scans page tables for referenced pages of this ``lruvec``. Upon
12038 +finding one, the aging updates its generation number to ``max_seq``.
12039 +After each round of scan, the aging increments ``max_seq``.
12041 +The aging maintains either a system-wide ``mm_struct`` list or
12042 +per-memcg ``mm_struct`` lists, and it only scans page tables of
12043 +processes that have been scheduled since the last scan. Since scans
12044 +are differential with respect to referenced pages, the cost is roughly
12045 +proportional to their number.
12047 +The aging is due when both of ``min_seq[2]`` reaches ``max_seq-1``,
12048 +assuming both anon and file types are reclaimable.
12050 +Eviction
12051 +--------
12052 +The eviction consumes old generations. Given an ``lruvec``, the
12053 +eviction scans the pages on the per-zone lists indexed by either of
12054 +``min_seq[2]``. It first tries to select a type based on the values of
12055 +``min_seq[2]``. When anon and file types are both available from the
12056 +same generation, it selects the one that has a lower refault rate.
12058 +During a scan, the eviction sorts pages according to their generation
12059 +numbers, if the aging has found them referenced.  It also moves pages
12060 +from the tiers that have higher refault rates than tier 0 to the next
12061 +generation.
12063 +When it finds all the per-zone lists of a selected type are empty, the
12064 +eviction increments ``min_seq[2]`` indexed by this selected type.
12066 +Rationale
12067 +=========
12068 +Limitations of Current Implementation
12069 +-------------------------------------
12070 +Notion of Active/Inactive
12071 +~~~~~~~~~~~~~~~~~~~~~~~~~
12072 +For servers equipped with hundreds of gigabytes of memory, the
12073 +granularity of the active/inactive is too coarse to be useful for job
12074 +scheduling. False active/inactive rates are relatively high, and thus
12075 +the assumed savings may not materialize.
12077 +For phones and laptops, executable pages are frequently evicted
12078 +despite the fact that there are many less recently used anon pages.
12079 +Major faults on executable pages cause ``janks`` (slow UI renderings)
12080 +and negatively impact user experience.
12082 +For ``lruvec``\s from different memcgs or nodes, comparisons are
12083 +impossible due to the lack of a common frame of reference.
12085 +Incremental Scans via ``rmap``
12086 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12087 +Each incremental scan picks up at where the last scan left off and
12088 +stops after it has found a handful of unreferenced pages. For
12089 +workloads using a large amount of anon memory, incremental scans lose
12090 +the advantage under sustained memory pressure due to high ratios of
12091 +the number of scanned pages to the number of reclaimed pages. On top
12092 +of that, the ``rmap`` has poor memory locality due to its complex data
12093 +structures. The combined effects typically result in a high amount of
12094 +CPU usage in the reclaim path.
12096 +Benefits of Multigenerational LRU
12097 +---------------------------------
12098 +Notion of Generation Numbers
12099 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12100 +The notion of generation numbers introduces a quantitative approach to
12101 +memory overcommit. A larger number of pages can be spread out across
12102 +configurable generations, and thus they have relatively low false
12103 +active/inactive rates. Each generation includes all pages that have
12104 +been referenced since the last generation.
12106 +Given an ``lruvec``, scans and the selections between anon and file
12107 +types are all based on generation numbers, which are simple and yet
12108 +effective. For different ``lruvec``\s, comparisons are still possible
12109 +based on birth times of generations.
12111 +Differential Scans via Page Tables
12112 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12113 +Each differential scan discovers all pages that have been referenced
12114 +since the last scan. Specifically, it walks the ``mm_struct`` list
12115 +associated with an ``lruvec`` to scan page tables of processes that
12116 +have been scheduled since the last scan. The cost of each differential
12117 +scan is roughly proportional to the number of referenced pages it
12118 +discovers. Unless address spaces are extremely sparse, page tables
12119 +usually have better memory locality than the ``rmap``. The end result
12120 +is generally a significant reduction in CPU usage, for workloads
12121 +using a large amount of anon memory.
12123 +To-do List
12124 +==========
12125 +KVM Optimization
12126 +----------------
12127 +Support shadow page table scanning.
12129 +NUMA Optimization
12130 +-----------------
12131 +Support NUMA policies and per-node RSS counters.
12132 diff --git a/MAINTAINERS b/MAINTAINERS
12133 index 9450e052f1b1..b7a2162d159a 100644
12134 --- a/MAINTAINERS
12135 +++ b/MAINTAINERS
12136 @@ -7377,7 +7377,7 @@ F:        Documentation/locking/*futex*
12137  F:     include/asm-generic/futex.h
12138  F:     include/linux/futex.h
12139  F:     include/uapi/linux/futex.h
12140 -F:     kernel/futex.c
12141 +F:     kernel/futex*
12142  F:     tools/perf/bench/futex*
12143  F:     tools/testing/selftests/futex/
12145 @@ -12775,6 +12775,13 @@ T:     git git://git.kernel.org/pub/scm/linux/kernel/git/aia21/ntfs.git
12146  F:     Documentation/filesystems/ntfs.rst
12147  F:     fs/ntfs/
12149 +NTFS3 FILESYSTEM
12150 +M:     Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
12151 +S:     Supported
12152 +W:     http://www.paragon-software.com/
12153 +F:     Documentation/filesystems/ntfs3.rst
12154 +F:     fs/ntfs3/
12156  NUBUS SUBSYSTEM
12157  M:     Finn Thain <fthain@telegraphics.com.au>
12158  L:     linux-m68k@lists.linux-m68k.org
12159 @@ -19912,6 +19919,18 @@ F:     Documentation/vm/zsmalloc.rst
12160  F:     include/linux/zsmalloc.h
12161  F:     mm/zsmalloc.c
12163 +ZSTD
12164 +M:     Nick Terrell <terrelln@fb.com>
12165 +S:     Maintained
12166 +B:     https://github.com/facebook/zstd/issues
12167 +T:     git git://github.com/terrelln/linux.git
12168 +F:     include/linux/zstd*
12169 +F:     lib/zstd/
12170 +F:     lib/decompress_unzstd.c
12171 +F:     crypto/zstd.c
12172 +N:     zstd
12173 +K:     zstd
12175  ZSWAP COMPRESSED SWAP CACHING
12176  M:     Seth Jennings <sjenning@redhat.com>
12177  M:     Dan Streetman <ddstreet@ieee.org>
12178 diff --git a/Makefile b/Makefile
12179 index 3a10a8e08b6d..6fa682a4cd5d 100644
12180 --- a/Makefile
12181 +++ b/Makefile
12182 @@ -1,7 +1,7 @@
12183  # SPDX-License-Identifier: GPL-2.0
12184  VERSION = 5
12185  PATCHLEVEL = 12
12186 -SUBLEVEL = 0
12187 +SUBLEVEL = 6
12188  EXTRAVERSION =
12189  NAME = Frozen Wasteland
12191 @@ -775,16 +775,16 @@ KBUILD_CFLAGS += -Wno-gnu
12192  KBUILD_CFLAGS += -mno-global-merge
12193  else
12195 -# These warnings generated too much noise in a regular build.
12196 -# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
12197 -KBUILD_CFLAGS += -Wno-unused-but-set-variable
12199  # Warn about unmarked fall-throughs in switch statement.
12200  # Disabled for clang while comment to attribute conversion happens and
12201  # https://github.com/ClangBuiltLinux/linux/issues/636 is discussed.
12202  KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough,)
12203  endif
12205 +# These warnings generated too much noise in a regular build.
12206 +# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
12207 +KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
12209  KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
12210  ifdef CONFIG_FRAME_POINTER
12211  KBUILD_CFLAGS  += -fno-omit-frame-pointer -fno-optimize-sibling-calls
12212 @@ -1066,8 +1066,8 @@ endif # INSTALL_MOD_STRIP
12213  export mod_strip_cmd
12215  # CONFIG_MODULE_COMPRESS, if defined, will cause module to be compressed
12216 -# after they are installed in agreement with CONFIG_MODULE_COMPRESS_GZIP
12217 -# or CONFIG_MODULE_COMPRESS_XZ.
12218 +# after they are installed in agreement with CONFIG_MODULE_COMPRESS_GZIP,
12219 +# CONFIG_MODULE_COMPRESS_XZ, or CONFIG_MODULE_COMPRESS_ZSTD.
12221  mod_compress_cmd = true
12222  ifdef CONFIG_MODULE_COMPRESS
12223 @@ -1077,6 +1077,9 @@ ifdef CONFIG_MODULE_COMPRESS
12224    ifdef CONFIG_MODULE_COMPRESS_XZ
12225      mod_compress_cmd = $(XZ) --lzma2=dict=2MiB -f
12226    endif # CONFIG_MODULE_COMPRESS_XZ
12227 +  ifdef CONFIG_MODULE_COMPRESS_ZSTD
12228 +    mod_compress_cmd = $(ZSTD) -T0 --rm -f -q
12229 +  endif # CONFIG_MODULE_COMPRESS_ZSTD
12230  endif # CONFIG_MODULE_COMPRESS
12231  export mod_compress_cmd
12233 @@ -1513,7 +1516,7 @@ endif # CONFIG_MODULES
12234  # make distclean Remove editor backup files, patch leftover files and the like
12236  # Directories & files removed with 'make clean'
12237 -CLEAN_FILES += include/ksym vmlinux.symvers \
12238 +CLEAN_FILES += include/ksym vmlinux.symvers modules-only.symvers \
12239                modules.builtin modules.builtin.modinfo modules.nsdeps \
12240                compile_commands.json .thinlto-cache
12242 diff --git a/arch/Kconfig b/arch/Kconfig
12243 index ecfd3520b676..cbd7f66734ee 100644
12244 --- a/arch/Kconfig
12245 +++ b/arch/Kconfig
12246 @@ -782,6 +782,15 @@ config HAVE_ARCH_TRANSPARENT_HUGEPAGE
12247  config HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
12248         bool
12250 +config HAVE_ARCH_PARENT_PMD_YOUNG
12251 +       bool
12252 +       depends on PGTABLE_LEVELS > 2
12253 +       help
12254 +         Architectures that select this are able to set the accessed bit on
12255 +         non-leaf PMD entries in addition to leaf PTE entries where pages are
12256 +         mapped. For them, page table walkers that clear the accessed bit may
12257 +         stop at non-leaf PMD entries when they do not see the accessed bit.
12259  config HAVE_ARCH_HUGE_VMAP
12260         bool
12262 diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
12263 index ad9b7fe4dba3..4a9d33372fe2 100644
12264 --- a/arch/arc/include/asm/page.h
12265 +++ b/arch/arc/include/asm/page.h
12266 @@ -7,6 +7,18 @@
12268  #include <uapi/asm/page.h>
12270 +#ifdef CONFIG_ARC_HAS_PAE40
12272 +#define MAX_POSSIBLE_PHYSMEM_BITS      40
12273 +#define PAGE_MASK_PHYS                 (0xff00000000ull | PAGE_MASK)
12275 +#else /* CONFIG_ARC_HAS_PAE40 */
12277 +#define MAX_POSSIBLE_PHYSMEM_BITS      32
12278 +#define PAGE_MASK_PHYS                 PAGE_MASK
12280 +#endif /* CONFIG_ARC_HAS_PAE40 */
12282  #ifndef __ASSEMBLY__
12284  #define clear_page(paddr)              memset((paddr), 0, PAGE_SIZE)
12285 diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
12286 index 163641726a2b..5878846f00cf 100644
12287 --- a/arch/arc/include/asm/pgtable.h
12288 +++ b/arch/arc/include/asm/pgtable.h
12289 @@ -107,8 +107,8 @@
12290  #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
12292  /* Set of bits not changed in pte_modify */
12293 -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
12295 +#define _PAGE_CHG_MASK (PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \
12296 +                                                          _PAGE_SPECIAL)
12297  /* More Abbrevaited helpers */
12298  #define PAGE_U_NONE     __pgprot(___DEF)
12299  #define PAGE_U_R        __pgprot(___DEF | _PAGE_READ)
12300 @@ -132,13 +132,7 @@
12301  #define PTE_BITS_IN_PD0                (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
12302  #define PTE_BITS_RWX           (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
12304 -#ifdef CONFIG_ARC_HAS_PAE40
12305 -#define PTE_BITS_NON_RWX_IN_PD1        (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
12306 -#define MAX_POSSIBLE_PHYSMEM_BITS 40
12307 -#else
12308 -#define PTE_BITS_NON_RWX_IN_PD1        (PAGE_MASK | _PAGE_CACHEABLE)
12309 -#define MAX_POSSIBLE_PHYSMEM_BITS 32
12310 -#endif
12311 +#define PTE_BITS_NON_RWX_IN_PD1        (PAGE_MASK_PHYS | _PAGE_CACHEABLE)
12313  /**************************************************************************
12314   * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
12315 diff --git a/arch/arc/include/uapi/asm/page.h b/arch/arc/include/uapi/asm/page.h
12316 index 2a97e2718a21..2a4ad619abfb 100644
12317 --- a/arch/arc/include/uapi/asm/page.h
12318 +++ b/arch/arc/include/uapi/asm/page.h
12319 @@ -33,5 +33,4 @@
12321  #define PAGE_MASK      (~(PAGE_SIZE-1))
12324  #endif /* _UAPI__ASM_ARC_PAGE_H */
12325 diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
12326 index 1743506081da..2cb8dfe866b6 100644
12327 --- a/arch/arc/kernel/entry.S
12328 +++ b/arch/arc/kernel/entry.S
12329 @@ -177,7 +177,7 @@ tracesys:
12331         ; Do the Sys Call as we normally would.
12332         ; Validate the Sys Call number
12333 -       cmp     r8,  NR_syscalls
12334 +       cmp     r8,  NR_syscalls - 1
12335         mov.hi  r0, -ENOSYS
12336         bhi     tracesys_exit
12338 @@ -255,7 +255,7 @@ ENTRY(EV_Trap)
12339         ;============ Normal syscall case
12341         ; syscall num shd not exceed the total system calls avail
12342 -       cmp     r8,  NR_syscalls
12343 +       cmp     r8,  NR_syscalls - 1
12344         mov.hi  r0, -ENOSYS
12345         bhi     .Lret_from_system_call
12347 diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
12348 index ce07e697916c..1bcc6985b9a0 100644
12349 --- a/arch/arc/mm/init.c
12350 +++ b/arch/arc/mm/init.c
12351 @@ -157,7 +157,16 @@ void __init setup_arch_memory(void)
12352         min_high_pfn = PFN_DOWN(high_mem_start);
12353         max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
12355 -       max_zone_pfn[ZONE_HIGHMEM] = min_low_pfn;
12356 +       /*
12357 +        * max_high_pfn should be ok here for both HIGHMEM and HIGHMEM+PAE.
12358 +        * For HIGHMEM without PAE max_high_pfn should be less than
12359 +        * min_low_pfn to guarantee that these two regions don't overlap.
12360 +        * For PAE case highmem is greater than lowmem, so it is natural
12361 +        * to use max_high_pfn.
12362 +        *
12363 +        * In both cases, holes should be handled by pfn_valid().
12364 +        */
12365 +       max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
12367         high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
12369 diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
12370 index fac4adc90204..95c649fbc95a 100644
12371 --- a/arch/arc/mm/ioremap.c
12372 +++ b/arch/arc/mm/ioremap.c
12373 @@ -53,9 +53,10 @@ EXPORT_SYMBOL(ioremap);
12374  void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
12375                            unsigned long flags)
12377 +       unsigned int off;
12378         unsigned long vaddr;
12379         struct vm_struct *area;
12380 -       phys_addr_t off, end;
12381 +       phys_addr_t end;
12382         pgprot_t prot = __pgprot(flags);
12384         /* Don't allow wraparound, zero size */
12385 @@ -72,7 +73,7 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
12387         /* Mappings have to be page-aligned */
12388         off = paddr & ~PAGE_MASK;
12389 -       paddr &= PAGE_MASK;
12390 +       paddr &= PAGE_MASK_PHYS;
12391         size = PAGE_ALIGN(end + 1) - paddr;
12393         /*
12394 diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
12395 index 9bb3c24f3677..9c7c68247289 100644
12396 --- a/arch/arc/mm/tlb.c
12397 +++ b/arch/arc/mm/tlb.c
12398 @@ -576,7 +576,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
12399                       pte_t *ptep)
12401         unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
12402 -       phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
12403 +       phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
12404         struct page *page = pfn_to_page(pte_pfn(*ptep));
12406         create_tlb(vma, vaddr, ptep);
12407 diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
12408 index fd94e27ba4fa..c1f804768621 100644
12409 --- a/arch/arm/boot/compressed/Makefile
12410 +++ b/arch/arm/boot/compressed/Makefile
12411 @@ -118,8 +118,8 @@ asflags-y := -DZIMAGE
12413  # Supply kernel BSS size to the decompressor via a linker symbol.
12414  KBSS_SZ = $(shell echo $$(($$($(NM) $(obj)/../../../../vmlinux | \
12415 -               sed -n -e 's/^\([^ ]*\) [AB] __bss_start$$/-0x\1/p' \
12416 -                      -e 's/^\([^ ]*\) [AB] __bss_stop$$/+0x\1/p') )) )
12417 +               sed -n -e 's/^\([^ ]*\) [ABD] __bss_start$$/-0x\1/p' \
12418 +                      -e 's/^\([^ ]*\) [ABD] __bss_stop$$/+0x\1/p') )) )
12419  LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ)
12420  # Supply ZRELADDR to the decompressor via a linker symbol.
12421  ifneq ($(CONFIG_AUTO_ZRELADDR),y)
12422 diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
12423 index 6c9804d2f3b4..6df1ce545061 100644
12424 --- a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
12425 +++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
12426 @@ -713,9 +713,9 @@ &i2c7 {
12427         multi-master;
12428         status = "okay";
12430 -       si7021-a20@20 {
12431 +       si7021-a20@40 {
12432                 compatible = "silabs,si7020";
12433 -               reg = <0x20>;
12434 +               reg = <0x40>;
12435         };
12437         tmp275@48 {
12438 diff --git a/arch/arm/boot/dts/at91-sam9x60ek.dts b/arch/arm/boot/dts/at91-sam9x60ek.dts
12439 index 775ceb3acb6c..edca66c232c1 100644
12440 --- a/arch/arm/boot/dts/at91-sam9x60ek.dts
12441 +++ b/arch/arm/boot/dts/at91-sam9x60ek.dts
12442 @@ -8,6 +8,7 @@
12443   */
12444  /dts-v1/;
12445  #include "sam9x60.dtsi"
12446 +#include <dt-bindings/input/input.h>
12448  / {
12449         model = "Microchip SAM9X60-EK";
12450 @@ -84,7 +85,7 @@ gpio_keys {
12451                 sw1 {
12452                         label = "SW1";
12453                         gpios = <&pioD 18 GPIO_ACTIVE_LOW>;
12454 -                       linux,code=<0x104>;
12455 +                       linux,code=<KEY_PROG1>;
12456                         wakeup-source;
12457                 };
12458         };
12459 diff --git a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
12460 index 84e1180f3e89..a9e6fee55a2a 100644
12461 --- a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
12462 +++ b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
12463 @@ -11,6 +11,7 @@
12464  #include "at91-sama5d27_som1.dtsi"
12465  #include <dt-bindings/mfd/atmel-flexcom.h>
12466  #include <dt-bindings/gpio/gpio.h>
12467 +#include <dt-bindings/input/input.h>
12469  / {
12470         model = "Atmel SAMA5D27 SOM1 EK";
12471 @@ -466,7 +467,7 @@ gpio_keys {
12472                 pb4 {
12473                         label = "USER";
12474                         gpios = <&pioA PIN_PA29 GPIO_ACTIVE_LOW>;
12475 -                       linux,code = <0x104>;
12476 +                       linux,code = <KEY_PROG1>;
12477                         wakeup-source;
12478                 };
12479         };
12480 diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
12481 index 180a08765cb8..ff83967fd008 100644
12482 --- a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
12483 +++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
12484 @@ -8,6 +8,7 @@
12485   */
12486  /dts-v1/;
12487  #include "at91-sama5d27_wlsom1.dtsi"
12488 +#include <dt-bindings/input/input.h>
12490  / {
12491         model = "Microchip SAMA5D27 WLSOM1 EK";
12492 @@ -35,7 +36,7 @@ gpio_keys {
12493                 sw4 {
12494                         label = "USER BUTTON";
12495                         gpios = <&pioA PIN_PB2 GPIO_ACTIVE_LOW>;
12496 -                       linux,code = <0x104>;
12497 +                       linux,code = <KEY_PROG1>;
12498                         wakeup-source;
12499                 };
12500         };
12501 diff --git a/arch/arm/boot/dts/at91-sama5d2_icp.dts b/arch/arm/boot/dts/at91-sama5d2_icp.dts
12502 index 46722a163184..bd64721fa23c 100644
12503 --- a/arch/arm/boot/dts/at91-sama5d2_icp.dts
12504 +++ b/arch/arm/boot/dts/at91-sama5d2_icp.dts
12505 @@ -12,6 +12,7 @@
12506  #include "sama5d2.dtsi"
12507  #include "sama5d2-pinfunc.h"
12508  #include <dt-bindings/gpio/gpio.h>
12509 +#include <dt-bindings/input/input.h>
12510  #include <dt-bindings/mfd/atmel-flexcom.h>
12512  / {
12513 @@ -51,7 +52,7 @@ gpio_keys {
12514                 sw4 {
12515                         label = "USER_PB1";
12516                         gpios = <&pioA PIN_PD0 GPIO_ACTIVE_LOW>;
12517 -                       linux,code = <0x104>;
12518 +                       linux,code = <KEY_PROG1>;
12519                         wakeup-source;
12520                 };
12521         };
12522 diff --git a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
12523 index 8de57d164acd..dfd150eb0fd8 100644
12524 --- a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
12525 +++ b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
12526 @@ -11,6 +11,7 @@
12527  #include "sama5d2-pinfunc.h"
12528  #include <dt-bindings/mfd/atmel-flexcom.h>
12529  #include <dt-bindings/gpio/gpio.h>
12530 +#include <dt-bindings/input/input.h>
12531  #include <dt-bindings/pinctrl/at91.h>
12533  / {
12534 @@ -402,7 +403,7 @@ gpio_keys {
12535                 bp1 {
12536                         label = "PB_USER";
12537                         gpios = <&pioA PIN_PA10 GPIO_ACTIVE_LOW>;
12538 -                       linux,code = <0x104>;
12539 +                       linux,code = <KEY_PROG1>;
12540                         wakeup-source;
12541                 };
12542         };
12543 diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
12544 index 4e7cf21f124c..509c732a0d8b 100644
12545 --- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
12546 +++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
12547 @@ -10,6 +10,7 @@
12548  #include "sama5d2-pinfunc.h"
12549  #include <dt-bindings/mfd/atmel-flexcom.h>
12550  #include <dt-bindings/gpio/gpio.h>
12551 +#include <dt-bindings/input/input.h>
12552  #include <dt-bindings/regulator/active-semi,8945a-regulator.h>
12554  / {
12555 @@ -712,7 +713,7 @@ gpio_keys {
12556                 bp1 {
12557                         label = "PB_USER";
12558                         gpios = <&pioA PIN_PB9 GPIO_ACTIVE_LOW>;
12559 -                       linux,code = <0x104>;
12560 +                       linux,code = <KEY_PROG1>;
12561                         wakeup-source;
12562                 };
12563         };
12564 diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
12565 index 5179258f9247..9c55a921263b 100644
12566 --- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
12567 +++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
12568 @@ -7,6 +7,7 @@
12569   */
12570  /dts-v1/;
12571  #include "sama5d36.dtsi"
12572 +#include <dt-bindings/input/input.h>
12574  / {
12575         model = "SAMA5D3 Xplained";
12576 @@ -354,7 +355,7 @@ gpio_keys {
12577                 bp3 {
12578                         label = "PB_USER";
12579                         gpios = <&pioE 29 GPIO_ACTIVE_LOW>;
12580 -                       linux,code = <0x104>;
12581 +                       linux,code = <KEY_PROG1>;
12582                         wakeup-source;
12583                 };
12584         };
12585 diff --git a/arch/arm/boot/dts/at91sam9260ek.dts b/arch/arm/boot/dts/at91sam9260ek.dts
12586 index d3446e42b598..ce96345d28a3 100644
12587 --- a/arch/arm/boot/dts/at91sam9260ek.dts
12588 +++ b/arch/arm/boot/dts/at91sam9260ek.dts
12589 @@ -7,6 +7,7 @@
12590   */
12591  /dts-v1/;
12592  #include "at91sam9260.dtsi"
12593 +#include <dt-bindings/input/input.h>
12595  / {
12596         model = "Atmel at91sam9260ek";
12597 @@ -156,7 +157,7 @@ btn3 {
12598                 btn4 {
12599                         label = "Button 4";
12600                         gpios = <&pioA 31 GPIO_ACTIVE_LOW>;
12601 -                       linux,code = <0x104>;
12602 +                       linux,code = <KEY_PROG1>;
12603                         wakeup-source;
12604                 };
12605         };
12606 diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
12607 index 6e6e672c0b86..87bb39060e8b 100644
12608 --- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
12609 +++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
12610 @@ -5,6 +5,7 @@
12611   * Copyright (C) 2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
12612   */
12613  #include "at91sam9g20.dtsi"
12614 +#include <dt-bindings/input/input.h>
12616  / {
12618 @@ -234,7 +235,7 @@ btn3 {
12619                 btn4 {
12620                         label = "Button 4";
12621                         gpios = <&pioA 31 GPIO_ACTIVE_LOW>;
12622 -                       linux,code = <0x104>;
12623 +                       linux,code = <KEY_PROG1>;
12624                         wakeup-source;
12625                 };
12626         };
12627 diff --git a/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts b/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts
12628 index 6a96655d8626..8ed403767540 100644
12629 --- a/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts
12630 +++ b/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts
12631 @@ -21,8 +21,8 @@ chosen {
12633         memory@0 {
12634                 device_type = "memory";
12635 -               reg = <0x00000000 0x08000000
12636 -                      0x88000000 0x08000000>;
12637 +               reg = <0x00000000 0x08000000>,
12638 +                     <0x88000000 0x08000000>;
12639         };
12641         leds {
12642 diff --git a/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts b/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts
12643 index 3b0029e61b4c..667b118ba4ee 100644
12644 --- a/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts
12645 +++ b/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts
12646 @@ -21,8 +21,8 @@ chosen {
12648         memory@0 {
12649                 device_type = "memory";
12650 -               reg = <0x00000000 0x08000000
12651 -                      0x88000000 0x08000000>;
12652 +               reg = <0x00000000 0x08000000>,
12653 +                     <0x88000000 0x08000000>;
12654         };
12656         leds {
12657 diff --git a/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts b/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
12658 index 90f57bad6b24..ff31ce45831a 100644
12659 --- a/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
12660 +++ b/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
12661 @@ -21,8 +21,8 @@ chosen {
12663         memory@0 {
12664                 device_type = "memory";
12665 -               reg = <0x00000000 0x08000000
12666 -                      0x88000000 0x18000000>;
12667 +               reg = <0x00000000 0x08000000>,
12668 +                     <0x88000000 0x18000000>;
12669         };
12671         spi {
12672 diff --git a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
12673 index fed75e6ab58c..61c7b137607e 100644
12674 --- a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
12675 +++ b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
12676 @@ -22,8 +22,8 @@ chosen {
12678         memory {
12679                 device_type = "memory";
12680 -               reg = <0x00000000 0x08000000
12681 -                      0x88000000 0x08000000>;
12682 +               reg = <0x00000000 0x08000000>,
12683 +                     <0x88000000 0x08000000>;
12684         };
12686         leds {
12687 diff --git a/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts b/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
12688 index 79542e18915c..4c60eda296d9 100644
12689 --- a/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
12690 +++ b/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
12691 @@ -21,8 +21,8 @@ chosen {
12693         memory@0 {
12694                 device_type = "memory";
12695 -               reg = <0x00000000 0x08000000
12696 -                      0x88000000 0x08000000>;
12697 +               reg = <0x00000000 0x08000000>,
12698 +                     <0x88000000 0x08000000>;
12699         };
12701         leds {
12702 diff --git a/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts b/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
12703 index 51c64f0b2560..9ca6d1b2590d 100644
12704 --- a/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
12705 +++ b/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
12706 @@ -21,8 +21,8 @@ chosen {
12708         memory@0 {
12709                 device_type = "memory";
12710 -               reg = <0x00000000 0x08000000
12711 -                      0x88000000 0x08000000>;
12712 +               reg = <0x00000000 0x08000000>,
12713 +                     <0x88000000 0x08000000>;
12714         };
12716         leds {
12717 diff --git a/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts b/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts
12718 index c29950b43a95..0e273c598732 100644
12719 --- a/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts
12720 +++ b/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts
12721 @@ -21,8 +21,8 @@ chosen {
12723         memory@0 {
12724                 device_type = "memory";
12725 -               reg = <0x00000000 0x08000000
12726 -                      0x88000000 0x08000000>;
12727 +               reg = <0x00000000 0x08000000>,
12728 +                     <0x88000000 0x08000000>;
12729         };
12731         leds {
12732 diff --git a/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts b/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
12733 index 2f2d2b0a6893..d857751ec507 100644
12734 --- a/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
12735 +++ b/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
12736 @@ -21,8 +21,8 @@ chosen {
12738         memory@0 {
12739                 device_type = "memory";
12740 -               reg = <0x00000000 0x08000000
12741 -                      0x88000000 0x08000000>;
12742 +               reg = <0x00000000 0x08000000>,
12743 +                     <0x88000000 0x08000000>;
12744         };
12746         spi {
12747 diff --git a/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts b/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts
12748 index 0e349e39f608..8b1a05a0f1a1 100644
12749 --- a/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts
12750 +++ b/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts
12751 @@ -21,8 +21,8 @@ chosen {
12753         memory@0 {
12754                 device_type = "memory";
12755 -               reg = <0x00000000 0x08000000
12756 -                      0x88000000 0x08000000>;
12757 +               reg = <0x00000000 0x08000000>,
12758 +                     <0x88000000 0x08000000>;
12759         };
12761         spi {
12762 diff --git a/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts b/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
12763 index 8f1e565c3db4..6c6bb7b17d27 100644
12764 --- a/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
12765 +++ b/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
12766 @@ -21,8 +21,8 @@ chosen {
12768         memory {
12769                 device_type = "memory";
12770 -               reg = <0x00000000 0x08000000
12771 -                      0x88000000 0x08000000>;
12772 +               reg = <0x00000000 0x08000000>,
12773 +                     <0x88000000 0x08000000>;
12774         };
12776         leds {
12777 diff --git a/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts b/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
12778 index ce888b1835d1..d29e7f80ea6a 100644
12779 --- a/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
12780 +++ b/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
12781 @@ -21,8 +21,8 @@ chosen {
12783         memory {
12784                 device_type = "memory";
12785 -               reg = <0x00000000 0x08000000
12786 -                      0x88000000 0x18000000>;
12787 +               reg = <0x00000000 0x08000000>,
12788 +                     <0x88000000 0x18000000>;
12789         };
12791         leds {
12792 diff --git a/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts b/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts
12793 index ed8619b54d69..38fbefdf2e4e 100644
12794 --- a/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts
12795 +++ b/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts
12796 @@ -18,8 +18,8 @@ chosen {
12798         memory {
12799                 device_type = "memory";
12800 -               reg = <0x00000000 0x08000000
12801 -                      0x88000000 0x08000000>;
12802 +               reg = <0x00000000 0x08000000>,
12803 +                     <0x88000000 0x08000000>;
12804         };
12806         gpio-keys {
12807 diff --git a/arch/arm/boot/dts/bcm4709-netgear-r7000.dts b/arch/arm/boot/dts/bcm4709-netgear-r7000.dts
12808 index 1f87993eae1d..7989a53597d4 100644
12809 --- a/arch/arm/boot/dts/bcm4709-netgear-r7000.dts
12810 +++ b/arch/arm/boot/dts/bcm4709-netgear-r7000.dts
12811 @@ -21,8 +21,8 @@ chosen {
12813         memory {
12814                 device_type = "memory";
12815 -               reg = <0x00000000 0x08000000
12816 -                      0x88000000 0x08000000>;
12817 +               reg = <0x00000000 0x08000000>,
12818 +                     <0x88000000 0x08000000>;
12819         };
12821         leds {
12822 diff --git a/arch/arm/boot/dts/bcm4709-netgear-r8000.dts b/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
12823 index 6c6199a53d09..87b655be674c 100644
12824 --- a/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
12825 +++ b/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
12826 @@ -32,8 +32,8 @@ chosen {
12828         memory {
12829                 device_type = "memory";
12830 -               reg = <0x00000000 0x08000000
12831 -                      0x88000000 0x08000000>;
12832 +               reg = <0x00000000 0x08000000>,
12833 +                     <0x88000000 0x08000000>;
12834         };
12836         leds {
12837 diff --git a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
12838 index 911c65fbf251..e635a15041dd 100644
12839 --- a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
12840 +++ b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
12841 @@ -21,8 +21,8 @@ chosen {
12843         memory@0 {
12844                 device_type = "memory";
12845 -               reg = <0x00000000 0x08000000
12846 -                      0x88000000 0x08000000>;
12847 +               reg = <0x00000000 0x08000000>,
12848 +                     <0x88000000 0x08000000>;
12849         };
12851         nand: nand@18028000 {
12852 diff --git a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
12853 index 3725f2b0d60b..4b24b25389b5 100644
12854 --- a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
12855 +++ b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
12856 @@ -18,8 +18,8 @@ chosen {
12858         memory@0 {
12859                 device_type = "memory";
12860 -               reg = <0x00000000 0x08000000
12861 -                      0x88000000 0x08000000>;
12862 +               reg = <0x00000000 0x08000000>,
12863 +                     <0x88000000 0x08000000>;
12864         };
12866         gpio-keys {
12867 diff --git a/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts b/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts
12868 index 50f7cd08cfbb..a6dc99955e19 100644
12869 --- a/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts
12870 +++ b/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts
12871 @@ -18,8 +18,8 @@ chosen {
12873         memory@0 {
12874                 device_type = "memory";
12875 -               reg = <0x00000000 0x08000000
12876 -                      0x88000000 0x18000000>;
12877 +               reg = <0x00000000 0x08000000>,
12878 +                     <0x88000000 0x18000000>;
12879         };
12881         leds {
12882 diff --git a/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts b/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts
12883 index bcc420f85b56..ff98837bc0db 100644
12884 --- a/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts
12885 +++ b/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts
12886 @@ -18,8 +18,8 @@ chosen {
12888         memory@0 {
12889                 device_type = "memory";
12890 -               reg = <0x00000000 0x08000000
12891 -                      0x88000000 0x18000000>;
12892 +               reg = <0x00000000 0x08000000>,
12893 +                     <0x88000000 0x18000000>;
12894         };
12896         leds {
12897 diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts b/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
12898 index 4f8d777ae18d..452b8d0ab180 100644
12899 --- a/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
12900 +++ b/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
12901 @@ -18,8 +18,8 @@ chosen {
12903         memory {
12904                 device_type = "memory";
12905 -               reg = <0x00000000 0x08000000
12906 -                      0x88000000 0x18000000>;
12907 +               reg = <0x00000000 0x08000000>,
12908 +                     <0x88000000 0x18000000>;
12909         };
12911         leds {
12912 diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts b/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
12913 index e17e9a17fb00..b76bfe6efcd4 100644
12914 --- a/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
12915 +++ b/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
12916 @@ -18,8 +18,8 @@ chosen {
12918         memory@0 {
12919                 device_type = "memory";
12920 -               reg = <0x00000000 0x08000000
12921 -                      0x88000000 0x08000000>;
12922 +               reg = <0x00000000 0x08000000>,
12923 +                     <0x88000000 0x08000000>;
12924         };
12926         leds {
12927 diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts b/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts
12928 index 60cc87ecc7ec..32d5a50578ec 100644
12929 --- a/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts
12930 +++ b/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts
12931 @@ -18,8 +18,8 @@ chosen {
12933         memory@0 {
12934                 device_type = "memory";
12935 -               reg = <0x00000000 0x08000000
12936 -                      0x88000000 0x18000000>;
12937 +               reg = <0x00000000 0x08000000>,
12938 +                     <0x88000000 0x18000000>;
12939         };
12941         leds {
12942 diff --git a/arch/arm/boot/dts/bcm47094-netgear-r8500.dts b/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
12943 index f42a1703f4ab..42097a4c2659 100644
12944 --- a/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
12945 +++ b/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
12946 @@ -18,8 +18,8 @@ chosen {
12948         memory@0 {
12949                 device_type = "memory";
12950 -               reg = <0x00000000 0x08000000
12951 -                      0x88000000 0x18000000>;
12952 +               reg = <0x00000000 0x08000000>,
12953 +                     <0x88000000 0x18000000>;
12954         };
12956         leds {
12957 diff --git a/arch/arm/boot/dts/bcm47094-phicomm-k3.dts b/arch/arm/boot/dts/bcm47094-phicomm-k3.dts
12958 index ac3a4483dcb3..a2566ad4619c 100644
12959 --- a/arch/arm/boot/dts/bcm47094-phicomm-k3.dts
12960 +++ b/arch/arm/boot/dts/bcm47094-phicomm-k3.dts
12961 @@ -15,8 +15,8 @@ / {
12963         memory@0 {
12964                 device_type = "memory";
12965 -               reg = <0x00000000 0x08000000
12966 -                      0x88000000 0x18000000>;
12967 +               reg = <0x00000000 0x08000000>,
12968 +                     <0x88000000 0x18000000>;
12969         };
12971         gpio-keys {
12972 diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi
12973 index 3bf90d9e3335..a294a02f2d23 100644
12974 --- a/arch/arm/boot/dts/dra7-l4.dtsi
12975 +++ b/arch/arm/boot/dts/dra7-l4.dtsi
12976 @@ -1168,7 +1168,7 @@ timer2: timer@0 {
12977                         };
12978                 };
12980 -               target-module@34000 {                   /* 0x48034000, ap 7 46.0 */
12981 +               timer3_target: target-module@34000 {    /* 0x48034000, ap 7 46.0 */
12982                         compatible = "ti,sysc-omap4-timer", "ti,sysc";
12983                         reg = <0x34000 0x4>,
12984                               <0x34010 0x4>;
12985 @@ -1195,7 +1195,7 @@ timer3: timer@0 {
12986                         };
12987                 };
12989 -               target-module@36000 {                   /* 0x48036000, ap 9 4e.0 */
12990 +               timer4_target: target-module@36000 {    /* 0x48036000, ap 9 4e.0 */
12991                         compatible = "ti,sysc-omap4-timer", "ti,sysc";
12992                         reg = <0x36000 0x4>,
12993                               <0x36010 0x4>;
12994 diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
12995 index ce1194744f84..53d68786a61f 100644
12996 --- a/arch/arm/boot/dts/dra7.dtsi
12997 +++ b/arch/arm/boot/dts/dra7.dtsi
12998 @@ -46,6 +46,7 @@ aliases {
13000         timer {
13001                 compatible = "arm,armv7-timer";
13002 +               status = "disabled";    /* See ARM architected timer wrap erratum i940 */
13003                 interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
13004                              <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
13005                              <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
13006 @@ -1241,3 +1242,22 @@ timer@0 {
13007                 assigned-clock-parents = <&sys_32k_ck>;
13008         };
13009  };
13011 +/* Local timers, see ARM architected timer wrap erratum i940 */
13012 +&timer3_target {
13013 +       ti,no-reset-on-init;
13014 +       ti,no-idle;
13015 +       timer@0 {
13016 +               assigned-clocks = <&l4per_clkctrl DRA7_L4PER_TIMER3_CLKCTRL 24>;
13017 +               assigned-clock-parents = <&timer_sys_clk_div>;
13018 +       };
13021 +&timer4_target {
13022 +       ti,no-reset-on-init;
13023 +       ti,no-idle;
13024 +       timer@0 {
13025 +               assigned-clocks = <&l4per_clkctrl DRA7_L4PER_TIMER4_CLKCTRL 24>;
13026 +               assigned-clock-parents = <&timer_sys_clk_div>;
13027 +       };
13029 diff --git a/arch/arm/boot/dts/exynos4210-i9100.dts b/arch/arm/boot/dts/exynos4210-i9100.dts
13030 index 304a8ee2364c..d98c78207aaf 100644
13031 --- a/arch/arm/boot/dts/exynos4210-i9100.dts
13032 +++ b/arch/arm/boot/dts/exynos4210-i9100.dts
13033 @@ -136,7 +136,7 @@ battery@36 {
13034                         compatible = "maxim,max17042";
13036                         interrupt-parent = <&gpx2>;
13037 -                       interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
13038 +                       interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
13040                         pinctrl-0 = <&max17042_fuel_irq>;
13041                         pinctrl-names = "default";
13042 diff --git a/arch/arm/boot/dts/exynos4412-midas.dtsi b/arch/arm/boot/dts/exynos4412-midas.dtsi
13043 index 111c32bae02c..fc77c1bfd844 100644
13044 --- a/arch/arm/boot/dts/exynos4412-midas.dtsi
13045 +++ b/arch/arm/boot/dts/exynos4412-midas.dtsi
13046 @@ -173,7 +173,7 @@ i2c_max77693: i2c-gpio-1 {
13047                 pmic@66 {
13048                         compatible = "maxim,max77693";
13049                         interrupt-parent = <&gpx1>;
13050 -                       interrupts = <5 IRQ_TYPE_EDGE_FALLING>;
13051 +                       interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
13052                         pinctrl-names = "default";
13053                         pinctrl-0 = <&max77693_irq>;
13054                         reg = <0x66>;
13055 @@ -221,7 +221,7 @@ i2c_max77693_fuel: i2c-gpio-3 {
13056                 fuel-gauge@36 {
13057                         compatible = "maxim,max17047";
13058                         interrupt-parent = <&gpx2>;
13059 -                       interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
13060 +                       interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
13061                         pinctrl-names = "default";
13062                         pinctrl-0 = <&max77693_fuel_irq>;
13063                         reg = <0x36>;
13064 @@ -665,7 +665,7 @@ &i2c_7 {
13065         max77686: pmic@9 {
13066                 compatible = "maxim,max77686";
13067                 interrupt-parent = <&gpx0>;
13068 -               interrupts = <7 IRQ_TYPE_NONE>;
13069 +               interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
13070                 pinctrl-0 = <&max77686_irq>;
13071                 pinctrl-names = "default";
13072                 reg = <0x09>;
13073 diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
13074 index 2b20d9095d9f..eebe6a3952ce 100644
13075 --- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
13076 +++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
13077 @@ -278,7 +278,7 @@ usb3503: usb-hub@8 {
13078         max77686: pmic@9 {
13079                 compatible = "maxim,max77686";
13080                 interrupt-parent = <&gpx3>;
13081 -               interrupts = <2 IRQ_TYPE_NONE>;
13082 +               interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
13083                 pinctrl-names = "default";
13084                 pinctrl-0 = <&max77686_irq>;
13085                 reg = <0x09>;
13086 diff --git a/arch/arm/boot/dts/exynos4412-p4note.dtsi b/arch/arm/boot/dts/exynos4412-p4note.dtsi
13087 index b2f9d5448a18..9e750890edb8 100644
13088 --- a/arch/arm/boot/dts/exynos4412-p4note.dtsi
13089 +++ b/arch/arm/boot/dts/exynos4412-p4note.dtsi
13090 @@ -146,7 +146,7 @@ fuel-gauge@36 {
13091                         pinctrl-0 = <&fuel_alert_irq>;
13092                         pinctrl-names = "default";
13093                         interrupt-parent = <&gpx2>;
13094 -                       interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
13095 +                       interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
13096                         maxim,rsns-microohm = <10000>;
13097                         maxim,over-heat-temp = <600>;
13098                         maxim,over-volt = <4300>;
13099 @@ -322,7 +322,7 @@ &i2c_7 {
13100         max77686: pmic@9 {
13101                 compatible = "maxim,max77686";
13102                 interrupt-parent = <&gpx0>;
13103 -               interrupts = <7 IRQ_TYPE_NONE>;
13104 +               interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
13105                 pinctrl-0 = <&max77686_irq>;
13106                 pinctrl-names = "default";
13107                 reg = <0x09>;
13108 diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
13109 index 8b5a79a8720c..39bbe18145cf 100644
13110 --- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
13111 +++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
13112 @@ -134,7 +134,7 @@ max77686: pmic@9 {
13113                 compatible = "maxim,max77686";
13114                 reg = <0x09>;
13115                 interrupt-parent = <&gpx3>;
13116 -               interrupts = <2 IRQ_TYPE_NONE>;
13117 +               interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
13118                 pinctrl-names = "default";
13119                 pinctrl-0 = <&max77686_irq>;
13120                 #clock-cells = <1>;
13121 diff --git a/arch/arm/boot/dts/exynos5250-snow-common.dtsi b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
13122 index 6635f6184051..2335c4687349 100644
13123 --- a/arch/arm/boot/dts/exynos5250-snow-common.dtsi
13124 +++ b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
13125 @@ -292,7 +292,7 @@ &i2c_0 {
13126         max77686: pmic@9 {
13127                 compatible = "maxim,max77686";
13128                 interrupt-parent = <&gpx3>;
13129 -               interrupts = <2 IRQ_TYPE_NONE>;
13130 +               interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
13131                 pinctrl-names = "default";
13132                 pinctrl-0 = <&max77686_irq>;
13133                 wakeup-source;
13134 diff --git a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
13135 index 0cda654371ae..56ee02ceba7d 100644
13136 --- a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
13137 +++ b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
13138 @@ -575,7 +575,7 @@ fuelgauge: max17048@36 {
13139                         maxim,rcomp = /bits/ 8 <0x4d>;
13141                         interrupt-parent = <&msmgpio>;
13142 -                       interrupts = <9 IRQ_TYPE_EDGE_FALLING>;
13143 +                       interrupts = <9 IRQ_TYPE_LEVEL_LOW>;
13145                         pinctrl-names = "default";
13146                         pinctrl-0 = <&fuelgauge_pin>;
13147 diff --git a/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts b/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts
13148 index a0f7f461f48c..2dadb836c5fe 100644
13149 --- a/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts
13150 +++ b/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts
13151 @@ -717,7 +717,7 @@ fuelgauge@36 {
13152                         maxim,rcomp = /bits/ 8 <0x56>;
13154                         interrupt-parent = <&pma8084_gpios>;
13155 -                       interrupts = <21 IRQ_TYPE_EDGE_FALLING>;
13156 +                       interrupts = <21 IRQ_TYPE_LEVEL_LOW>;
13158                         pinctrl-names = "default";
13159                         pinctrl-0 = <&fuelgauge_pin>;
13160 diff --git a/arch/arm/boot/dts/r8a7790-lager.dts b/arch/arm/boot/dts/r8a7790-lager.dts
13161 index 09a152b91557..1d6f0c5d02e9 100644
13162 --- a/arch/arm/boot/dts/r8a7790-lager.dts
13163 +++ b/arch/arm/boot/dts/r8a7790-lager.dts
13164 @@ -53,6 +53,9 @@ aliases {
13165                 i2c11 = &i2cexio1;
13166                 i2c12 = &i2chdmi;
13167                 i2c13 = &i2cpwr;
13168 +               mmc0 = &mmcif1;
13169 +               mmc1 = &sdhi0;
13170 +               mmc2 = &sdhi2;
13171         };
13173         chosen {
13174 diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts
13175 index f603cba5441f..6af1727b8269 100644
13176 --- a/arch/arm/boot/dts/r8a7791-koelsch.dts
13177 +++ b/arch/arm/boot/dts/r8a7791-koelsch.dts
13178 @@ -53,6 +53,9 @@ aliases {
13179                 i2c12 = &i2cexio1;
13180                 i2c13 = &i2chdmi;
13181                 i2c14 = &i2cexio4;
13182 +               mmc0 = &sdhi0;
13183 +               mmc1 = &sdhi1;
13184 +               mmc2 = &sdhi2;
13185         };
13187         chosen {
13188 diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts
13189 index c6d563fb7ec7..bf51e29c793a 100644
13190 --- a/arch/arm/boot/dts/r8a7791-porter.dts
13191 +++ b/arch/arm/boot/dts/r8a7791-porter.dts
13192 @@ -28,6 +28,8 @@ aliases {
13193                 serial0 = &scif0;
13194                 i2c9 = &gpioi2c2;
13195                 i2c10 = &i2chdmi;
13196 +               mmc0 = &sdhi0;
13197 +               mmc1 = &sdhi2;
13198         };
13200         chosen {
13201 diff --git a/arch/arm/boot/dts/r8a7793-gose.dts b/arch/arm/boot/dts/r8a7793-gose.dts
13202 index abf487e8fe0f..2b59a0491350 100644
13203 --- a/arch/arm/boot/dts/r8a7793-gose.dts
13204 +++ b/arch/arm/boot/dts/r8a7793-gose.dts
13205 @@ -49,6 +49,9 @@ aliases {
13206                 i2c10 = &gpioi2c4;
13207                 i2c11 = &i2chdmi;
13208                 i2c12 = &i2cexio4;
13209 +               mmc0 = &sdhi0;
13210 +               mmc1 = &sdhi1;
13211 +               mmc2 = &sdhi2;
13212         };
13214         chosen {
13215 diff --git a/arch/arm/boot/dts/r8a7794-alt.dts b/arch/arm/boot/dts/r8a7794-alt.dts
13216 index 3f1cc5bbf329..32025986b3b9 100644
13217 --- a/arch/arm/boot/dts/r8a7794-alt.dts
13218 +++ b/arch/arm/boot/dts/r8a7794-alt.dts
13219 @@ -19,6 +19,9 @@ aliases {
13220                 i2c10 = &gpioi2c4;
13221                 i2c11 = &i2chdmi;
13222                 i2c12 = &i2cexio4;
13223 +               mmc0 = &mmcif0;
13224 +               mmc1 = &sdhi0;
13225 +               mmc2 = &sdhi1;
13226         };
13228         chosen {
13229 diff --git a/arch/arm/boot/dts/r8a7794-silk.dts b/arch/arm/boot/dts/r8a7794-silk.dts
13230 index 677596f6c9c9..af066ee5e275 100644
13231 --- a/arch/arm/boot/dts/r8a7794-silk.dts
13232 +++ b/arch/arm/boot/dts/r8a7794-silk.dts
13233 @@ -31,6 +31,8 @@ aliases {
13234                 serial0 = &scif2;
13235                 i2c9 = &gpioi2c1;
13236                 i2c10 = &i2chdmi;
13237 +               mmc0 = &mmcif0;
13238 +               mmc1 = &sdhi1;
13239         };
13241         chosen {
13242 diff --git a/arch/arm/boot/dts/s5pv210-fascinate4g.dts b/arch/arm/boot/dts/s5pv210-fascinate4g.dts
13243 index ca064359dd30..b47d8300e536 100644
13244 --- a/arch/arm/boot/dts/s5pv210-fascinate4g.dts
13245 +++ b/arch/arm/boot/dts/s5pv210-fascinate4g.dts
13246 @@ -115,7 +115,7 @@ &fg {
13247         compatible = "maxim,max77836-battery";
13249         interrupt-parent = <&gph3>;
13250 -       interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
13251 +       interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
13253         pinctrl-names = "default";
13254         pinctrl-0 = <&fg_irq>;
13255 diff --git a/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi b/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
13256 index cb3677f0a1cb..b580397ede83 100644
13257 --- a/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
13258 +++ b/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
13259 @@ -8,37 +8,43 @@
13260  / {
13261         soc {
13262                 i2c@80128000 {
13263 -                       /* Marked:
13264 -                        * 129
13265 -                        * M35
13266 -                        * L3GD20
13267 -                        */
13268 -                       l3gd20@6a {
13269 -                               /* Gyroscope */
13270 -                               compatible = "st,l3gd20";
13271 -                               status = "disabled";
13272 +                       accelerometer@19 {
13273 +                               compatible = "st,lsm303dlhc-accel";
13274                                 st,drdy-int-pin = <1>;
13275 -                               drive-open-drain;
13276 -                               reg = <0x6a>; // 0x6a or 0x6b
13277 +                               reg = <0x19>;
13278                                 vdd-supply = <&ab8500_ldo_aux1_reg>;
13279                                 vddio-supply = <&db8500_vsmps2_reg>;
13280 +                               interrupt-parent = <&gpio2>;
13281 +                               interrupts = <18 IRQ_TYPE_EDGE_RISING>,
13282 +                                            <19 IRQ_TYPE_EDGE_RISING>;
13283 +                               pinctrl-names = "default";
13284 +                               pinctrl-0 = <&accel_tvk_mode>;
13285                         };
13286 -                       /*
13287 -                        * Marked:
13288 -                        * 2122
13289 -                        * C3H
13290 -                        * DQEEE
13291 -                        * LIS3DH?
13292 -                        */
13293 -                       lis3dh@18 {
13294 -                               /* Accelerometer */
13295 -                               compatible = "st,lis3dh-accel";
13296 +                       magnetometer@1e {
13297 +                               compatible = "st,lsm303dlm-magn";
13298                                 st,drdy-int-pin = <1>;
13299 -                               reg = <0x18>;
13300 +                               reg = <0x1e>;
13301                                 vdd-supply = <&ab8500_ldo_aux1_reg>;
13302                                 vddio-supply = <&db8500_vsmps2_reg>;
13303 +                               // This interrupt is not properly working with the driver
13304 +                               // interrupt-parent = <&gpio1>;
13305 +                               // interrupts = <0 IRQ_TYPE_EDGE_RISING>;
13306                                 pinctrl-names = "default";
13307 -                               pinctrl-0 = <&accel_tvk_mode>;
13308 +                               pinctrl-0 = <&magn_tvk_mode>;
13309 +                       };
13310 +                       gyroscope@68 {
13311 +                               /* Gyroscope */
13312 +                               compatible = "st,l3g4200d-gyro";
13313 +                               reg = <0x68>;
13314 +                               vdd-supply = <&ab8500_ldo_aux1_reg>;
13315 +                               vddio-supply = <&db8500_vsmps2_reg>;
13316 +                       };
13317 +                       pressure@5c {
13318 +                               /* Barometer/pressure sensor */
13319 +                               compatible = "st,lps001wp-press";
13320 +                               reg = <0x5c>;
13321 +                               vdd-supply = <&ab8500_ldo_aux1_reg>;
13322 +                               vddio-supply = <&db8500_vsmps2_reg>;
13323                         };
13324                 };
13326 @@ -54,5 +60,26 @@ panel {
13327                                 };
13328                         };
13329                 };
13331 +               pinctrl {
13332 +                       accelerometer {
13333 +                               accel_tvk_mode: accel_tvk {
13334 +                                       /* Accelerometer interrupt lines 1 & 2 */
13335 +                                       tvk_cfg {
13336 +                                               pins = "GPIO82_C1", "GPIO83_D3";
13337 +                                               ste,config = <&gpio_in_pd>;
13338 +                                       };
13339 +                               };
13340 +                       };
13341 +                       magnetometer {
13342 +                               magn_tvk_mode: magn_tvk {
13343 +                                       /* GPIO 32 used for DRDY, pull this down */
13344 +                                       tvk_cfg {
13345 +                                               pins = "GPIO32_V2";
13346 +                                               ste,config = <&gpio_in_pd>;
13347 +                                       };
13348 +                               };
13349 +                       };
13350 +               };
13351         };
13352  };
13353 diff --git a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
13354 index 7b4249ed1983..060baa8b7e9d 100644
13355 --- a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
13356 +++ b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
13357 @@ -1891,10 +1891,15 @@ pins2 {
13358         usart2_idle_pins_c: usart2-idle-2 {
13359                 pins1 {
13360                         pinmux = <STM32_PINMUX('D', 5, ANALOG)>, /* USART2_TX */
13361 -                                <STM32_PINMUX('D', 4, ANALOG)>, /* USART2_RTS */
13362                                  <STM32_PINMUX('D', 3, ANALOG)>; /* USART2_CTS_NSS */
13363                 };
13364                 pins2 {
13365 +                       pinmux = <STM32_PINMUX('D', 4, AF7)>; /* USART2_RTS */
13366 +                       bias-disable;
13367 +                       drive-push-pull;
13368 +                       slew-rate = <3>;
13369 +               };
13370 +               pins3 {
13371                         pinmux = <STM32_PINMUX('D', 6, AF7)>; /* USART2_RX */
13372                         bias-disable;
13373                 };
13374 @@ -1940,10 +1945,15 @@ pins2 {
13375         usart3_idle_pins_b: usart3-idle-1 {
13376                 pins1 {
13377                         pinmux = <STM32_PINMUX('B', 10, ANALOG)>, /* USART3_TX */
13378 -                                <STM32_PINMUX('G', 8, ANALOG)>, /* USART3_RTS */
13379                                  <STM32_PINMUX('I', 10, ANALOG)>; /* USART3_CTS_NSS */
13380                 };
13381                 pins2 {
13382 +                       pinmux = <STM32_PINMUX('G', 8, AF8)>; /* USART3_RTS */
13383 +                       bias-disable;
13384 +                       drive-push-pull;
13385 +                       slew-rate = <0>;
13386 +               };
13387 +               pins3 {
13388                         pinmux = <STM32_PINMUX('B', 12, AF8)>; /* USART3_RX */
13389                         bias-disable;
13390                 };
13391 @@ -1976,10 +1986,15 @@ pins2 {
13392         usart3_idle_pins_c: usart3-idle-2 {
13393                 pins1 {
13394                         pinmux = <STM32_PINMUX('B', 10, ANALOG)>, /* USART3_TX */
13395 -                                <STM32_PINMUX('G', 8, ANALOG)>, /* USART3_RTS */
13396                                  <STM32_PINMUX('B', 13, ANALOG)>; /* USART3_CTS_NSS */
13397                 };
13398                 pins2 {
13399 +                       pinmux = <STM32_PINMUX('G', 8, AF8)>; /* USART3_RTS */
13400 +                       bias-disable;
13401 +                       drive-push-pull;
13402 +                       slew-rate = <0>;
13403 +               };
13404 +               pins3 {
13405                         pinmux = <STM32_PINMUX('B', 12, AF8)>; /* USART3_RX */
13406                         bias-disable;
13407                 };
13408 diff --git a/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts b/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
13409 index d3b99535d755..f9c0f6884cc1 100644
13410 --- a/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
13411 +++ b/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
13412 @@ -448,7 +448,7 @@ touchscreen@4c {
13414                         reset-gpios = <&gpio TEGRA_GPIO(Q, 7) GPIO_ACTIVE_LOW>;
13416 -                       avdd-supply = <&vdd_3v3_sys>;
13417 +                       vdda-supply = <&vdd_3v3_sys>;
13418                         vdd-supply  = <&vdd_3v3_sys>;
13419                 };
13421 diff --git a/arch/arm/boot/dts/uniphier-pxs2.dtsi b/arch/arm/boot/dts/uniphier-pxs2.dtsi
13422 index b0b15c97306b..e81e5937a60a 100644
13423 --- a/arch/arm/boot/dts/uniphier-pxs2.dtsi
13424 +++ b/arch/arm/boot/dts/uniphier-pxs2.dtsi
13425 @@ -583,7 +583,7 @@ eth: ethernet@65000000 {
13426                         clocks = <&sys_clk 6>;
13427                         reset-names = "ether";
13428                         resets = <&sys_rst 6>;
13429 -                       phy-mode = "rgmii";
13430 +                       phy-mode = "rgmii-id";
13431                         local-mac-address = [00 00 00 00 00 00];
13432                         socionext,syscon-phy-mode = <&soc_glue 0>;
13434 diff --git a/arch/arm/crypto/blake2s-core.S b/arch/arm/crypto/blake2s-core.S
13435 index bed897e9a181..86345751bbf3 100644
13436 --- a/arch/arm/crypto/blake2s-core.S
13437 +++ b/arch/arm/crypto/blake2s-core.S
13438 @@ -8,6 +8,7 @@
13439   */
13441  #include <linux/linkage.h>
13442 +#include <asm/assembler.h>
13444         // Registers used to hold message words temporarily.  There aren't
13445         // enough ARM registers to hold the whole message block, so we have to
13446 @@ -38,6 +39,23 @@
13447  #endif
13448  .endm
13450 +.macro _le32_bswap     a, tmp
13451 +#ifdef __ARMEB__
13452 +       rev_l           \a, \tmp
13453 +#endif
13454 +.endm
13456 +.macro _le32_bswap_8x  a, b, c, d, e, f, g, h,  tmp
13457 +       _le32_bswap     \a, \tmp
13458 +       _le32_bswap     \b, \tmp
13459 +       _le32_bswap     \c, \tmp
13460 +       _le32_bswap     \d, \tmp
13461 +       _le32_bswap     \e, \tmp
13462 +       _le32_bswap     \f, \tmp
13463 +       _le32_bswap     \g, \tmp
13464 +       _le32_bswap     \h, \tmp
13465 +.endm
13467  // Execute a quarter-round of BLAKE2s by mixing two columns or two diagonals.
13468  // (a0, b0, c0, d0) and (a1, b1, c1, d1) give the registers containing the two
13469  // columns/diagonals.  s0-s1 are the word offsets to the message words the first
13470 @@ -180,8 +198,10 @@ ENTRY(blake2s_compress_arch)
13471         tst             r1, #3
13472         bne             .Lcopy_block_misaligned
13473         ldmia           r1!, {r2-r9}
13474 +       _le32_bswap_8x  r2, r3, r4, r5, r6, r7, r8, r9,  r14
13475         stmia           r12!, {r2-r9}
13476         ldmia           r1!, {r2-r9}
13477 +       _le32_bswap_8x  r2, r3, r4, r5, r6, r7, r8, r9,  r14
13478         stmia           r12, {r2-r9}
13479  .Lcopy_block_done:
13480         str             r1, [sp, #68]           // Update message pointer
13481 @@ -268,6 +288,7 @@ ENTRY(blake2s_compress_arch)
13482  1:
13483  #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13484         ldr             r3, [r1], #4
13485 +       _le32_bswap     r3, r4
13486  #else
13487         ldrb            r3, [r1, #0]
13488         ldrb            r4, [r1, #1]
13489 diff --git a/arch/arm/crypto/curve25519-core.S b/arch/arm/crypto/curve25519-core.S
13490 index be18af52e7dc..b697fa5d059a 100644
13491 --- a/arch/arm/crypto/curve25519-core.S
13492 +++ b/arch/arm/crypto/curve25519-core.S
13493 @@ -10,8 +10,8 @@
13494  #include <linux/linkage.h>
13496  .text
13497 -.fpu neon
13498  .arch armv7-a
13499 +.fpu neon
13500  .align 4
13502  ENTRY(curve25519_neon)
13503 diff --git a/arch/arm/crypto/poly1305-glue.c b/arch/arm/crypto/poly1305-glue.c
13504 index 3023c1acfa19..c31bd8f7c092 100644
13505 --- a/arch/arm/crypto/poly1305-glue.c
13506 +++ b/arch/arm/crypto/poly1305-glue.c
13507 @@ -29,7 +29,7 @@ void __weak poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit)
13509  static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
13511 -void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
13512 +void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
13514         poly1305_init_arm(&dctx->h, key);
13515         dctx->s[0] = get_unaligned_le32(key + 16);
13516 diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
13517 index be8050b0c3df..70993af22d80 100644
13518 --- a/arch/arm/kernel/asm-offsets.c
13519 +++ b/arch/arm/kernel/asm-offsets.c
13520 @@ -24,6 +24,7 @@
13521  #include <asm/vdso_datapage.h>
13522  #include <asm/hardware/cache-l2x0.h>
13523  #include <linux/kbuild.h>
13524 +#include <linux/arm-smccc.h>
13525  #include "signal.h"
13527  /*
13528 @@ -148,6 +149,8 @@ int main(void)
13529    DEFINE(SLEEP_SAVE_SP_PHYS,   offsetof(struct sleep_save_sp, save_ptr_stash_phys));
13530    DEFINE(SLEEP_SAVE_SP_VIRT,   offsetof(struct sleep_save_sp, save_ptr_stash));
13531  #endif
13532 +  DEFINE(ARM_SMCCC_QUIRK_ID_OFFS,      offsetof(struct arm_smccc_quirk, id));
13533 +  DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS,   offsetof(struct arm_smccc_quirk, state));
13534    BLANK();
13535    DEFINE(DMA_BIDIRECTIONAL,    DMA_BIDIRECTIONAL);
13536    DEFINE(DMA_TO_DEVICE,                DMA_TO_DEVICE);
13537 diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
13538 index 08660ae9dcbc..b1423fb130ea 100644
13539 --- a/arch/arm/kernel/hw_breakpoint.c
13540 +++ b/arch/arm/kernel/hw_breakpoint.c
13541 @@ -886,7 +886,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
13542                         info->trigger = addr;
13543                         pr_debug("breakpoint fired: address = 0x%x\n", addr);
13544                         perf_bp_event(bp, regs);
13545 -                       if (!bp->overflow_handler)
13546 +                       if (is_default_overflow_handler(bp))
13547                                 enable_single_step(bp, addr);
13548                         goto unlock;
13549                 }
13550 diff --git a/arch/arm/kernel/smccc-call.S b/arch/arm/kernel/smccc-call.S
13551 index 00664c78faca..931df62a7831 100644
13552 --- a/arch/arm/kernel/smccc-call.S
13553 +++ b/arch/arm/kernel/smccc-call.S
13554 @@ -3,7 +3,9 @@
13555   * Copyright (c) 2015, Linaro Limited
13556   */
13557  #include <linux/linkage.h>
13558 +#include <linux/arm-smccc.h>
13560 +#include <asm/asm-offsets.h>
13561  #include <asm/opcodes-sec.h>
13562  #include <asm/opcodes-virt.h>
13563  #include <asm/unwind.h>
13564 @@ -27,7 +29,14 @@ UNWIND(      .fnstart)
13565  UNWIND(        .save   {r4-r7})
13566         ldm     r12, {r4-r7}
13567         \instr
13568 -       pop     {r4-r7}
13569 +       ldr     r4, [sp, #36]
13570 +       cmp     r4, #0
13571 +       beq     1f                      // No quirk structure
13572 +       ldr     r5, [r4, #ARM_SMCCC_QUIRK_ID_OFFS]
13573 +       cmp     r5, #ARM_SMCCC_QUIRK_QCOM_A6
13574 +       bne     1f                      // No quirk present
13575 +       str     r6, [r4, #ARM_SMCCC_QUIRK_STATE_OFFS]
13576 +1:     pop     {r4-r7}
13577         ldr     r12, [sp, #(4 * 4)]
13578         stm     r12, {r0-r3}
13579         bx      lr
13580 diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
13581 index 24bd20564be7..43f0a3ebf390 100644
13582 --- a/arch/arm/kernel/suspend.c
13583 +++ b/arch/arm/kernel/suspend.c
13584 @@ -1,4 +1,5 @@
13585  // SPDX-License-Identifier: GPL-2.0
13586 +#include <linux/ftrace.h>
13587  #include <linux/init.h>
13588  #include <linux/slab.h>
13589  #include <linux/mm_types.h>
13590 @@ -25,6 +26,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
13591         if (!idmap_pgd)
13592                 return -EINVAL;
13594 +       /*
13595 +        * Function graph tracer state gets incosistent when the kernel
13596 +        * calls functions that never return (aka suspend finishers) hence
13597 +        * disable graph tracing during their execution.
13598 +        */
13599 +       pause_graph_tracing();
13601         /*
13602          * Provide a temporary page table with an identity mapping for
13603          * the MMU-enable code, required for resuming.  On successful
13604 @@ -32,6 +40,9 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
13605          * back to the correct page tables.
13606          */
13607         ret = __cpu_suspend(arg, fn, __mpidr);
13609 +       unpause_graph_tracing();
13611         if (ret == 0) {
13612                 cpu_switch_mm(mm->pgd, mm);
13613                 local_flush_bp_all();
13614 @@ -45,7 +56,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
13615  int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
13617         u32 __mpidr = cpu_logical_map(smp_processor_id());
13618 -       return __cpu_suspend(arg, fn, __mpidr);
13619 +       int ret;
13621 +       pause_graph_tracing();
13622 +       ret = __cpu_suspend(arg, fn, __mpidr);
13623 +       unpause_graph_tracing();
13625 +       return ret;
13627  #define        idmap_pgd       NULL
13628  #endif
13629 diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl
13630 index dcc1191291a2..24a700535747 100644
13631 --- a/arch/arm/tools/syscall.tbl
13632 +++ b/arch/arm/tools/syscall.tbl
13633 @@ -456,3 +456,7 @@
13634  440    common  process_madvise                 sys_process_madvise
13635  441    common  epoll_pwait2                    sys_epoll_pwait2
13636  442    common  mount_setattr                   sys_mount_setattr
13637 +443    common  futex_wait                      sys_futex_wait
13638 +444    common  futex_wake                      sys_futex_wake
13639 +445    common  futex_waitv                     sys_futex_waitv
13640 +446    common  futex_requeue                   sys_futex_requeue
13641 diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts
13642 index 6e4ad66ff536..8d5d368dbe90 100644
13643 --- a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts
13644 +++ b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts
13645 @@ -65,6 +65,7 @@ port@3 {
13646         port@7 {
13647                 label = "sw";
13648                 reg = <7>;
13649 +               phy-mode = "rgmii";
13651                 fixed-link {
13652                         speed = <1000>;
13653 diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
13654 index 9354077f74cd..9e799328c6db 100644
13655 --- a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
13656 +++ b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
13657 @@ -131,7 +131,7 @@ usb@d000 {
13658                         status = "disabled";
13659                 };
13661 -               ethernet-switch@80000 {
13662 +               bus@80000 {
13663                         compatible = "simple-bus";
13664                         #size-cells = <1>;
13665                         #address-cells = <1>;
13666 diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts b/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts
13667 index 0d38327043f8..cd3c3edd48fa 100644
13668 --- a/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts
13669 +++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts
13670 @@ -28,6 +28,10 @@ &bq25895 {
13671         ti,termination-current = <144000>;  /* uA */
13672  };
13674 +&buck3_reg {
13675 +       regulator-always-on;
13678  &proximity {
13679         proximity-near-level = <25>;
13680  };
13681 diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
13682 index 7a2df148c6a3..456dcd4a7793 100644
13683 --- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
13684 +++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
13685 @@ -156,7 +156,8 @@ uart1: serial@12200 {
13686                         };
13688                         nb_periph_clk: nb-periph-clk@13000 {
13689 -                               compatible = "marvell,armada-3700-periph-clock-nb";
13690 +                               compatible = "marvell,armada-3700-periph-clock-nb",
13691 +                                            "syscon";
13692                                 reg = <0x13000 0x100>;
13693                                 clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
13694                                 <&tbg 3>, <&xtalclk>;
13695 diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
13696 index 6dffada2e66b..28aa634c9780 100644
13697 --- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
13698 +++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
13699 @@ -294,7 +294,7 @@ &pwm0 {
13701  &pwrap {
13702         /* Only MT8173 E1 needs USB power domain */
13703 -       power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
13704 +       power-domains = <&spm MT8173_POWER_DOMAIN_USB>;
13706         pmic: mt6397 {
13707                 compatible = "mediatek,mt6397";
13708 diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
13709 index 7fa870e4386a..ecb37a7e6870 100644
13710 --- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
13711 +++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
13712 @@ -1235,7 +1235,7 @@ dsi1: dsi@1401c000 {
13713                                  <&mmsys CLK_MM_DSI1_DIGITAL>,
13714                                  <&mipi_tx1>;
13715                         clock-names = "engine", "digital", "hs";
13716 -                       phy = <&mipi_tx1>;
13717 +                       phys = <&mipi_tx1>;
13718                         phy-names = "dphy";
13719                         status = "disabled";
13720                 };
13721 diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
13722 index 80519a145f13..16f4b1fc0fb9 100644
13723 --- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
13724 +++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
13725 @@ -983,6 +983,9 @@ mmsys: syscon@14000000 {
13726                         compatible = "mediatek,mt8183-mmsys", "syscon";
13727                         reg = <0 0x14000000 0 0x1000>;
13728                         #clock-cells = <1>;
13729 +                       mboxes = <&gce 0 CMDQ_THR_PRIO_HIGHEST>,
13730 +                                <&gce 1 CMDQ_THR_PRIO_HIGHEST>;
13731 +                       mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0 0x1000>;
13732                 };
13734                 ovl0: ovl@14008000 {
13735 @@ -1058,6 +1061,7 @@ ccorr0: ccorr@1400f000 {
13736                         interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_LOW>;
13737                         power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
13738                         clocks = <&mmsys CLK_MM_DISP_CCORR0>;
13739 +                       mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0xf000 0x1000>;
13740                 };
13742                 aal0: aal@14010000 {
13743 @@ -1067,6 +1071,7 @@ aal0: aal@14010000 {
13744                         interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_LOW>;
13745                         power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
13746                         clocks = <&mmsys CLK_MM_DISP_AAL0>;
13747 +                       mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0 0x1000>;
13748                 };
13750                 gamma0: gamma@14011000 {
13751 @@ -1075,6 +1080,7 @@ gamma0: gamma@14011000 {
13752                         interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_LOW>;
13753                         power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
13754                         clocks = <&mmsys CLK_MM_DISP_GAMMA0>;
13755 +                       mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0x1000 0x1000>;
13756                 };
13758                 dither0: dither@14012000 {
13759 @@ -1083,6 +1089,7 @@ dither0: dither@14012000 {
13760                         interrupts = <GIC_SPI 235 IRQ_TYPE_LEVEL_LOW>;
13761                         power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
13762                         clocks = <&mmsys CLK_MM_DISP_DITHER0>;
13763 +                       mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0x2000 0x1000>;
13764                 };
13766                 dsi0: dsi@14014000 {
13767 diff --git a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
13768 index 63fd70086bb8..9f27e7ed5e22 100644
13769 --- a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
13770 +++ b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
13771 @@ -56,7 +56,7 @@ &i2c0 {
13772         tca6416: gpio@20 {
13773                 compatible = "ti,tca6416";
13774                 reg = <0x20>;
13775 -               reset-gpios = <&pio 65 GPIO_ACTIVE_HIGH>;
13776 +               reset-gpios = <&pio 65 GPIO_ACTIVE_LOW>;
13777                 pinctrl-names = "default";
13778                 pinctrl-0 = <&tca6416_pins>;
13780 diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
13781 index 07c8b2c926c0..b8f7cf5cbdab 100644
13782 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
13783 +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
13784 @@ -22,9 +22,11 @@ charger-thermal {
13785                         thermal-sensors = <&pm6150_adc_tm 1>;
13787                         trips {
13788 -                               temperature = <125000>;
13789 -                               hysteresis = <1000>;
13790 -                               type = "critical";
13791 +                               charger-crit {
13792 +                                       temperature = <125000>;
13793 +                                       hysteresis = <1000>;
13794 +                                       type = "critical";
13795 +                               };
13796                         };
13797                 };
13798         };
13799 @@ -768,17 +770,17 @@ &sdhc_2 {
13800  };
13802  &spi0 {
13803 -       pinctrl-0 = <&qup_spi0_cs_gpio>;
13804 +       pinctrl-0 = <&qup_spi0_cs_gpio_init_high>, <&qup_spi0_cs_gpio>;
13805         cs-gpios = <&tlmm 37 GPIO_ACTIVE_LOW>;
13806  };
13808  &spi6 {
13809 -       pinctrl-0 = <&qup_spi6_cs_gpio>;
13810 +       pinctrl-0 = <&qup_spi6_cs_gpio_init_high>, <&qup_spi6_cs_gpio>;
13811         cs-gpios = <&tlmm 62 GPIO_ACTIVE_LOW>;
13812  };
13814  ap_spi_fp: &spi10 {
13815 -       pinctrl-0 = <&qup_spi10_cs_gpio>;
13816 +       pinctrl-0 = <&qup_spi10_cs_gpio_init_high>, <&qup_spi10_cs_gpio>;
13817         cs-gpios = <&tlmm 89 GPIO_ACTIVE_LOW>;
13819         cros_ec_fp: ec@0 {
13820 @@ -1339,6 +1341,27 @@ pinconf {
13821                 };
13822         };
13824 +       qup_spi0_cs_gpio_init_high: qup-spi0-cs-gpio-init-high {
13825 +               pinconf {
13826 +                       pins = "gpio37";
13827 +                       output-high;
13828 +               };
13829 +       };
13831 +       qup_spi6_cs_gpio_init_high: qup-spi6-cs-gpio-init-high {
13832 +               pinconf {
13833 +                       pins = "gpio62";
13834 +                       output-high;
13835 +               };
13836 +       };
13838 +       qup_spi10_cs_gpio_init_high: qup-spi10-cs-gpio-init-high {
13839 +               pinconf {
13840 +                       pins = "gpio89";
13841 +                       output-high;
13842 +               };
13843 +       };
13845         qup_uart3_sleep: qup-uart3-sleep {
13846                 pinmux {
13847                         pins = "gpio38", "gpio39",
13848 diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
13849 index c4ac6f5dc008..96d36b38f269 100644
13850 --- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
13851 +++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
13852 @@ -1015,7 +1015,7 @@ swm: swm@c85 {
13853                 left_spkr: wsa8810-left{
13854                         compatible = "sdw10217201000";
13855                         reg = <0 1>;
13856 -                       powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
13857 +                       powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>;
13858                         #thermal-sensor-cells = <0>;
13859                         sound-name-prefix = "SpkrLeft";
13860                         #sound-dai-cells = <0>;
13861 @@ -1023,7 +1023,7 @@ left_spkr: wsa8810-left{
13863                 right_spkr: wsa8810-right{
13864                         compatible = "sdw10217201000";
13865 -                       powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
13866 +                       powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>;
13867                         reg = <0 2>;
13868                         #thermal-sensor-cells = <0>;
13869                         sound-name-prefix = "SpkrRight";
13870 diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
13871 index 454f794af547..6a2ed02d383d 100644
13872 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
13873 +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
13874 @@ -2382,7 +2382,7 @@ tlmm: pinctrl@3400000 {
13875                         #gpio-cells = <2>;
13876                         interrupt-controller;
13877                         #interrupt-cells = <2>;
13878 -                       gpio-ranges = <&tlmm 0 0 150>;
13879 +                       gpio-ranges = <&tlmm 0 0 151>;
13880                         wakeup-parent = <&pdc_intc>;
13882                         cci0_default: cci0-default {
13883 diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
13884 index e5bb17bc2f46..778613d3410b 100644
13885 --- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
13886 +++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
13887 @@ -914,7 +914,7 @@ tlmm: pinctrl@3100000 {
13888                               <0x0 0x03D00000 0x0 0x300000>;
13889                         reg-names = "west", "east", "north", "south";
13890                         interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
13891 -                       gpio-ranges = <&tlmm 0 0 175>;
13892 +                       gpio-ranges = <&tlmm 0 0 176>;
13893                         gpio-controller;
13894                         #gpio-cells = <2>;
13895                         interrupt-controller;
13896 diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
13897 index 947e1accae3a..46a6c18cea91 100644
13898 --- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
13899 +++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
13900 @@ -279,7 +279,7 @@ mmcx_reg: mmcx-reg {
13902         pmu {
13903                 compatible = "arm,armv8-pmuv3";
13904 -               interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
13905 +               interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
13906         };
13908         psci {
13909 @@ -2327,10 +2327,9 @@ mdss: mdss@ae00000 {
13910                         reg = <0 0x0ae00000 0 0x1000>;
13911                         reg-names = "mdss";
13913 -                       interconnects = <&gem_noc MASTER_AMPSS_M0 &config_noc SLAVE_DISPLAY_CFG>,
13914 -                                       <&mmss_noc MASTER_MDP_PORT0 &mc_virt SLAVE_EBI_CH0>,
13915 +                       interconnects = <&mmss_noc MASTER_MDP_PORT0 &mc_virt SLAVE_EBI_CH0>,
13916                                         <&mmss_noc MASTER_MDP_PORT1 &mc_virt SLAVE_EBI_CH0>;
13917 -                       interconnect-names = "notused", "mdp0-mem", "mdp1-mem";
13918 +                       interconnect-names = "mdp0-mem", "mdp1-mem";
13920                         power-domains = <&dispcc MDSS_GDSC>;
13922 @@ -2580,7 +2579,7 @@ opp-358000000 {
13924                 dispcc: clock-controller@af00000 {
13925                         compatible = "qcom,sm8250-dispcc";
13926 -                       reg = <0 0x0af00000 0 0x20000>;
13927 +                       reg = <0 0x0af00000 0 0x10000>;
13928                         mmcx-supply = <&mmcx_reg>;
13929                         clocks = <&rpmhcc RPMH_CXO_CLK>,
13930                                  <&dsi0_phy 0>,
13931 @@ -2588,28 +2587,14 @@ dispcc: clock-controller@af00000 {
13932                                  <&dsi1_phy 0>,
13933                                  <&dsi1_phy 1>,
13934                                  <0>,
13935 -                                <0>,
13936 -                                <0>,
13937 -                                <0>,
13938 -                                <0>,
13939 -                                <0>,
13940 -                                <0>,
13941 -                                <0>,
13942 -                                <&sleep_clk>;
13943 +                                <0>;
13944                         clock-names = "bi_tcxo",
13945                                       "dsi0_phy_pll_out_byteclk",
13946                                       "dsi0_phy_pll_out_dsiclk",
13947                                       "dsi1_phy_pll_out_byteclk",
13948                                       "dsi1_phy_pll_out_dsiclk",
13949 -                                     "dp_link_clk_divsel_ten",
13950 -                                     "dp_vco_divided_clk_src_mux",
13951 -                                     "dptx1_phy_pll_link_clk",
13952 -                                     "dptx1_phy_pll_vco_div_clk",
13953 -                                     "dptx2_phy_pll_link_clk",
13954 -                                     "dptx2_phy_pll_vco_div_clk",
13955 -                                     "edp_phy_pll_link_clk",
13956 -                                     "edp_phy_pll_vco_div_clk",
13957 -                                     "sleep_clk";
13958 +                                     "dp_phy_pll_link_clk",
13959 +                                     "dp_phy_pll_vco_div_clk";
13960                         #clock-cells = <1>;
13961                         #reset-cells = <1>;
13962                         #power-domain-cells = <1>;
13963 @@ -2689,7 +2674,7 @@ tlmm: pinctrl@f100000 {
13964                         #gpio-cells = <2>;
13965                         interrupt-controller;
13966                         #interrupt-cells = <2>;
13967 -                       gpio-ranges = <&tlmm 0 0 180>;
13968 +                       gpio-ranges = <&tlmm 0 0 181>;
13969                         wakeup-parent = <&pdc>;
13971                         pri_mi2s_active: pri-mi2s-active {
13972 @@ -3754,7 +3739,7 @@ timer {
13973                                 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
13974                              <GIC_PPI 11
13975                                 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
13976 -                            <GIC_PPI 12
13977 +                            <GIC_PPI 10
13978                                 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
13979         };
13981 diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
13982 index 5ef460458f5c..e2fca420e518 100644
13983 --- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
13984 +++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
13985 @@ -153,7 +153,7 @@ memory@80000000 {
13987         pmu {
13988                 compatible = "arm,armv8-pmuv3";
13989 -               interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
13990 +               interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
13991         };
13993         psci {
13994 @@ -382,7 +382,7 @@ tlmm: pinctrl@f100000 {
13995                         #gpio-cells = <2>;
13996                         interrupt-controller;
13997                         #interrupt-cells = <2>;
13998 -                       gpio-ranges = <&tlmm 0 0 203>;
13999 +                       gpio-ranges = <&tlmm 0 0 204>;
14001                         qup_uart3_default_state: qup-uart3-default-state {
14002                                 rx {
14003 diff --git a/arch/arm64/boot/dts/renesas/hihope-common.dtsi b/arch/arm64/boot/dts/renesas/hihope-common.dtsi
14004 index 7a3da9b06f67..0c7e6f790590 100644
14005 --- a/arch/arm64/boot/dts/renesas/hihope-common.dtsi
14006 +++ b/arch/arm64/boot/dts/renesas/hihope-common.dtsi
14007 @@ -12,6 +12,9 @@ / {
14008         aliases {
14009                 serial0 = &scif2;
14010                 serial1 = &hscif0;
14011 +               mmc0 = &sdhi3;
14012 +               mmc1 = &sdhi0;
14013 +               mmc2 = &sdhi2;
14014         };
14016         chosen {
14017 diff --git a/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts b/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts
14018 index 501cb05da228..3cf2e076940f 100644
14019 --- a/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts
14020 +++ b/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts
14021 @@ -21,6 +21,9 @@ aliases {
14022                 serial4 = &hscif2;
14023                 serial5 = &scif5;
14024                 ethernet0 = &avb;
14025 +               mmc0 = &sdhi3;
14026 +               mmc1 = &sdhi0;
14027 +               mmc2 = &sdhi2;
14028         };
14030         chosen {
14031 diff --git a/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts b/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts
14032 index 71763f4402a7..3c0d59def8ee 100644
14033 --- a/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts
14034 +++ b/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts
14035 @@ -22,6 +22,9 @@ aliases {
14036                 serial5 = &scif5;
14037                 serial6 = &scif4;
14038                 ethernet0 = &avb;
14039 +               mmc0 = &sdhi3;
14040 +               mmc1 = &sdhi0;
14041 +               mmc2 = &sdhi2;
14042         };
14044         chosen {
14045 diff --git a/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts b/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
14046 index ea87cb5a459c..33257c6440b2 100644
14047 --- a/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
14048 +++ b/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
14049 @@ -17,6 +17,8 @@ / {
14050         aliases {
14051                 serial0 = &scif2;
14052                 serial1 = &hscif2;
14053 +               mmc0 = &sdhi0;
14054 +               mmc1 = &sdhi3;
14055         };
14057         chosen {
14058 diff --git a/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts b/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts
14059 index 273f062f2909..7b6649a3ded0 100644
14060 --- a/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts
14061 +++ b/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts
14062 @@ -22,6 +22,9 @@ aliases {
14063                 serial5 = &scif5;
14064                 serial6 = &scif4;
14065                 ethernet0 = &avb;
14066 +               mmc0 = &sdhi3;
14067 +               mmc1 = &sdhi0;
14068 +               mmc2 = &sdhi2;
14069         };
14071         chosen {
14072 diff --git a/arch/arm64/boot/dts/renesas/r8a77980.dtsi b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
14073 index ec7ca72399ec..1ffa4a995a7a 100644
14074 --- a/arch/arm64/boot/dts/renesas/r8a77980.dtsi
14075 +++ b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
14076 @@ -992,8 +992,8 @@ port@1 {
14078                                         reg = <1>;
14080 -                                       vin4csi41: endpoint@2 {
14081 -                                               reg = <2>;
14082 +                                       vin4csi41: endpoint@3 {
14083 +                                               reg = <3>;
14084                                                 remote-endpoint = <&csi41vin4>;
14085                                         };
14086                                 };
14087 @@ -1020,8 +1020,8 @@ port@1 {
14089                                         reg = <1>;
14091 -                                       vin5csi41: endpoint@2 {
14092 -                                               reg = <2>;
14093 +                                       vin5csi41: endpoint@3 {
14094 +                                               reg = <3>;
14095                                                 remote-endpoint = <&csi41vin5>;
14096                                         };
14097                                 };
14098 @@ -1048,8 +1048,8 @@ port@1 {
14100                                         reg = <1>;
14102 -                                       vin6csi41: endpoint@2 {
14103 -                                               reg = <2>;
14104 +                                       vin6csi41: endpoint@3 {
14105 +                                               reg = <3>;
14106                                                 remote-endpoint = <&csi41vin6>;
14107                                         };
14108                                 };
14109 @@ -1076,8 +1076,8 @@ port@1 {
14111                                         reg = <1>;
14113 -                                       vin7csi41: endpoint@2 {
14114 -                                               reg = <2>;
14115 +                                       vin7csi41: endpoint@3 {
14116 +                                               reg = <3>;
14117                                                 remote-endpoint = <&csi41vin7>;
14118                                         };
14119                                 };
14120 diff --git a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
14121 index f74f8b9993f1..6d6cdc4c324b 100644
14122 --- a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
14123 +++ b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
14124 @@ -16,6 +16,9 @@ / {
14125         aliases {
14126                 serial0 = &scif2;
14127                 ethernet0 = &avb;
14128 +               mmc0 = &sdhi3;
14129 +               mmc1 = &sdhi0;
14130 +               mmc2 = &sdhi1;
14131         };
14133         chosen {
14134 diff --git a/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi
14135 index fa284a7260d6..e202e8aa6941 100644
14136 --- a/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi
14137 +++ b/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi
14138 @@ -12,6 +12,14 @@ / {
14139         model = "Renesas Falcon CPU board";
14140         compatible = "renesas,falcon-cpu", "renesas,r8a779a0";
14142 +       aliases {
14143 +               serial0 = &scif0;
14144 +       };
14146 +       chosen {
14147 +               stdout-path = "serial0:115200n8";
14148 +       };
14150         memory@48000000 {
14151                 device_type = "memory";
14152                 /* first 128MB is reserved for secure area. */
14153 diff --git a/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts b/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts
14154 index 5617b81dd7dc..273857ae38f3 100644
14155 --- a/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts
14156 +++ b/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts
14157 @@ -14,11 +14,6 @@ / {
14159         aliases {
14160                 ethernet0 = &avb0;
14161 -               serial0 = &scif0;
14162 -       };
14164 -       chosen {
14165 -               stdout-path = "serial0:115200n8";
14166         };
14167  };
14169 diff --git a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
14170 index dfd6ae8b564f..86ac48e2c849 100644
14171 --- a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
14172 +++ b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
14173 @@ -60,10 +60,7 @@ extalr_clk: extalr {
14175         pmu_a76 {
14176                 compatible = "arm,cortex-a76-pmu";
14177 -               interrupts-extended = <&gic GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>,
14178 -                                     <&gic GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
14179 -                                     <&gic GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
14180 -                                     <&gic GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
14181 +               interrupts-extended = <&gic GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
14182         };
14184         /* External SCIF clock - to be overridden by boards that provide it */
14185 diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
14186 index c22bb38994e8..15bb1eeb6601 100644
14187 --- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi
14188 +++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
14189 @@ -36,6 +36,9 @@ aliases {
14190                 serial0 = &scif2;
14191                 serial1 = &hscif1;
14192                 ethernet0 = &avb;
14193 +               mmc0 = &sdhi2;
14194 +               mmc1 = &sdhi0;
14195 +               mmc2 = &sdhi3;
14196         };
14198         chosen {
14199 diff --git a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
14200 index e9ed2597f1c2..61bd4df09df0 100644
14201 --- a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
14202 +++ b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
14203 @@ -16,6 +16,7 @@ / {
14204         aliases {
14205                 serial1 = &hscif0;
14206                 serial2 = &scif1;
14207 +               mmc2 = &sdhi3;
14208         };
14210         clksndsel: clksndsel {
14211 diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi
14212 index a04eae55dd6c..3d88e95c65a5 100644
14213 --- a/arch/arm64/boot/dts/renesas/ulcb.dtsi
14214 +++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi
14215 @@ -23,6 +23,8 @@ / {
14216         aliases {
14217                 serial0 = &scif2;
14218                 ethernet0 = &avb;
14219 +               mmc0 = &sdhi2;
14220 +               mmc1 = &sdhi0;
14221         };
14223         chosen {
14224 diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
14225 index a87b8a678719..8f2c1c1e2c64 100644
14226 --- a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
14227 +++ b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
14228 @@ -734,7 +734,7 @@ eth: ethernet@65000000 {
14229                         clocks = <&sys_clk 6>;
14230                         reset-names = "ether";
14231                         resets = <&sys_rst 6>;
14232 -                       phy-mode = "rgmii";
14233 +                       phy-mode = "rgmii-id";
14234                         local-mac-address = [00 00 00 00 00 00];
14235                         socionext,syscon-phy-mode = <&soc_glue 0>;
14237 diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
14238 index 0e52dadf54b3..be97da132258 100644
14239 --- a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
14240 +++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
14241 @@ -564,7 +564,7 @@ eth0: ethernet@65000000 {
14242                         clocks = <&sys_clk 6>;
14243                         reset-names = "ether";
14244                         resets = <&sys_rst 6>;
14245 -                       phy-mode = "rgmii";
14246 +                       phy-mode = "rgmii-id";
14247                         local-mac-address = [00 00 00 00 00 00];
14248                         socionext,syscon-phy-mode = <&soc_glue 0>;
14250 @@ -585,7 +585,7 @@ eth1: ethernet@65200000 {
14251                         clocks = <&sys_clk 7>;
14252                         reset-names = "ether";
14253                         resets = <&sys_rst 7>;
14254 -                       phy-mode = "rgmii";
14255 +                       phy-mode = "rgmii-id";
14256                         local-mac-address = [00 00 00 00 00 00];
14257                         socionext,syscon-phy-mode = <&soc_glue 1>;
14259 diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
14260 index 8c84dafb7125..f1e7da3dfa27 100644
14261 --- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
14262 +++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
14263 @@ -1042,13 +1042,16 @@ main_sdhci0: mmc@4f80000 {
14264                 assigned-clocks = <&k3_clks 91 1>;
14265                 assigned-clock-parents = <&k3_clks 91 2>;
14266                 bus-width = <8>;
14267 -               mmc-hs400-1_8v;
14268 +               mmc-hs200-1_8v;
14269                 mmc-ddr-1_8v;
14270                 ti,otap-del-sel-legacy = <0xf>;
14271                 ti,otap-del-sel-mmc-hs = <0xf>;
14272                 ti,otap-del-sel-ddr52 = <0x5>;
14273                 ti,otap-del-sel-hs200 = <0x6>;
14274                 ti,otap-del-sel-hs400 = <0x0>;
14275 +               ti,itap-del-sel-legacy = <0x10>;
14276 +               ti,itap-del-sel-mmc-hs = <0xa>;
14277 +               ti,itap-del-sel-ddr52 = <0x3>;
14278                 ti,trm-icp = <0x8>;
14279                 ti,strobe-sel = <0x77>;
14280                 dma-coherent;
14281 @@ -1069,9 +1072,15 @@ main_sdhci1: mmc@4fb0000 {
14282                 ti,otap-del-sel-sdr25 = <0xf>;
14283                 ti,otap-del-sel-sdr50 = <0xc>;
14284                 ti,otap-del-sel-ddr50 = <0xc>;
14285 +               ti,itap-del-sel-legacy = <0x0>;
14286 +               ti,itap-del-sel-sd-hs = <0x0>;
14287 +               ti,itap-del-sel-sdr12 = <0x0>;
14288 +               ti,itap-del-sel-sdr25 = <0x0>;
14289 +               ti,itap-del-sel-ddr50 = <0x2>;
14290                 ti,trm-icp = <0x8>;
14291                 ti,clkbuf-sel = <0x7>;
14292                 dma-coherent;
14293 +               sdhci-caps-mask = <0x2 0x0>;
14294         };
14296         main_sdhci2: mmc@4f98000 {
14297 @@ -1089,9 +1098,15 @@ main_sdhci2: mmc@4f98000 {
14298                 ti,otap-del-sel-sdr25 = <0xf>;
14299                 ti,otap-del-sel-sdr50 = <0xc>;
14300                 ti,otap-del-sel-ddr50 = <0xc>;
14301 +               ti,itap-del-sel-legacy = <0x0>;
14302 +               ti,itap-del-sel-sd-hs = <0x0>;
14303 +               ti,itap-del-sel-sdr12 = <0x0>;
14304 +               ti,itap-del-sel-sdr25 = <0x0>;
14305 +               ti,itap-del-sel-ddr50 = <0x2>;
14306                 ti,trm-icp = <0x8>;
14307                 ti,clkbuf-sel = <0x7>;
14308                 dma-coherent;
14309 +               sdhci-caps-mask = <0x2 0x0>;
14310         };
14312         usbss0: cdns-usb@4104000 {
14313 diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
14314 index bbdb54702aa7..247011356d11 100644
14315 --- a/arch/arm64/crypto/aes-modes.S
14316 +++ b/arch/arm64/crypto/aes-modes.S
14317 @@ -359,6 +359,7 @@ ST5(        mov             v4.16b, vctr.16b                )
14318         ins             vctr.d[0], x8
14320         /* apply carry to N counter blocks for N := x12 */
14321 +       cbz             x12, 2f
14322         adr             x16, 1f
14323         sub             x16, x16, x12, lsl #3
14324         br              x16
14325 diff --git a/arch/arm64/crypto/poly1305-glue.c b/arch/arm64/crypto/poly1305-glue.c
14326 index 683de671741a..9c3d86e397bf 100644
14327 --- a/arch/arm64/crypto/poly1305-glue.c
14328 +++ b/arch/arm64/crypto/poly1305-glue.c
14329 @@ -25,7 +25,7 @@ asmlinkage void poly1305_emit(void *state, u8 *digest, const u32 *nonce);
14331  static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
14333 -void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
14334 +void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
14336         poly1305_init_arm64(&dctx->h, key);
14337         dctx->s[0] = get_unaligned_le32(key + 16);
14338 diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
14339 index 1c26d7baa67f..cfdde3a56805 100644
14340 --- a/arch/arm64/include/asm/daifflags.h
14341 +++ b/arch/arm64/include/asm/daifflags.h
14342 @@ -131,6 +131,9 @@ static inline void local_daif_inherit(struct pt_regs *regs)
14343         if (interrupts_enabled(regs))
14344                 trace_hardirqs_on();
14346 +       if (system_uses_irq_prio_masking())
14347 +               gic_write_pmr(regs->pmr_save);
14349         /*
14350          * We can't use local_daif_restore(regs->pstate) here as
14351          * system_has_prio_mask_debugging() won't restore the I bit if it can
14352 diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
14353 index 3d10e6527f7d..858c2fcfc043 100644
14354 --- a/arch/arm64/include/asm/kvm_host.h
14355 +++ b/arch/arm64/include/asm/kvm_host.h
14356 @@ -713,6 +713,7 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
14357  static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
14359  void kvm_arm_init_debug(void);
14360 +void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
14361  void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
14362  void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
14363  void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
14364 diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
14365 index 949788f5ba40..727bfc3be99b 100644
14366 --- a/arch/arm64/include/asm/unistd.h
14367 +++ b/arch/arm64/include/asm/unistd.h
14368 @@ -38,7 +38,7 @@
14369  #define __ARM_NR_compat_set_tls                (__ARM_NR_COMPAT_BASE + 5)
14370  #define __ARM_NR_COMPAT_END            (__ARM_NR_COMPAT_BASE + 0x800)
14372 -#define __NR_compat_syscalls           443
14373 +#define __NR_compat_syscalls           447
14374  #endif
14376  #define __ARCH_WANT_SYS_CLONE
14377 diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
14378 index 3d874f624056..e5015a2b9c94 100644
14379 --- a/arch/arm64/include/asm/unistd32.h
14380 +++ b/arch/arm64/include/asm/unistd32.h
14381 @@ -893,6 +893,14 @@ __SYSCALL(__NR_process_madvise, sys_process_madvise)
14382  __SYSCALL(__NR_epoll_pwait2, compat_sys_epoll_pwait2)
14383  #define __NR_mount_setattr 442
14384  __SYSCALL(__NR_mount_setattr, sys_mount_setattr)
14385 +#define __NR_futex_wait 443
14386 +__SYSCALL(__NR_futex_wait, sys_futex_wait)
14387 +#define __NR_futex_wake 444
14388 +__SYSCALL(__NR_futex_wake, sys_futex_wake)
14389 +#define __NR_futex_waitv 445
14390 +__SYSCALL(__NR_futex_waitv, compat_sys_futex_waitv)
14391 +#define __NR_futex_waitv 446
14392 +__SYSCALL(__NR_futex_requeue, compat_sys_futex_requeue)
14394  /*
14395   * Please add new compat syscalls above this comment and update
14396 diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
14397 index 9d3588450473..117412bae915 100644
14398 --- a/arch/arm64/kernel/entry-common.c
14399 +++ b/arch/arm64/kernel/entry-common.c
14400 @@ -226,14 +226,6 @@ static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
14402         unsigned long far = read_sysreg(far_el1);
14404 -       /*
14405 -        * The CPU masked interrupts, and we are leaving them masked during
14406 -        * do_debug_exception(). Update PMR as if we had called
14407 -        * local_daif_mask().
14408 -        */
14409 -       if (system_uses_irq_prio_masking())
14410 -               gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
14412         arm64_enter_el1_dbg(regs);
14413         if (!cortex_a76_erratum_1463225_debug_handler(regs))
14414                 do_debug_exception(far, esr, regs);
14415 @@ -398,9 +390,6 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
14416         /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
14417         unsigned long far = read_sysreg(far_el1);
14419 -       if (system_uses_irq_prio_masking())
14420 -               gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
14422         enter_from_user_mode();
14423         do_debug_exception(far, esr, regs);
14424         local_daif_restore(DAIF_PROCCTX_NOIRQ);
14425 @@ -408,9 +397,6 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
14427  static void noinstr el0_svc(struct pt_regs *regs)
14429 -       if (system_uses_irq_prio_masking())
14430 -               gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
14432         enter_from_user_mode();
14433         cortex_a76_erratum_1463225_svc_handler();
14434         do_el0_svc(regs);
14435 @@ -486,9 +472,6 @@ static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
14437  static void noinstr el0_svc_compat(struct pt_regs *regs)
14439 -       if (system_uses_irq_prio_masking())
14440 -               gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
14442         enter_from_user_mode();
14443         cortex_a76_erratum_1463225_svc_handler();
14444         do_el0_svc_compat(regs);
14445 diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
14446 index 6acfc5e6b5e0..e03fba3ae2a0 100644
14447 --- a/arch/arm64/kernel/entry.S
14448 +++ b/arch/arm64/kernel/entry.S
14449 @@ -263,16 +263,16 @@ alternative_else_nop_endif
14450         stp     lr, x21, [sp, #S_LR]
14452         /*
14453 -        * For exceptions from EL0, terminate the callchain here.
14454 +        * For exceptions from EL0, create a terminal frame record.
14455          * For exceptions from EL1, create a synthetic frame record so the
14456          * interrupted code shows up in the backtrace.
14457          */
14458         .if \el == 0
14459 -       mov     x29, xzr
14460 +       stp     xzr, xzr, [sp, #S_STACKFRAME]
14461         .else
14462         stp     x29, x22, [sp, #S_STACKFRAME]
14463 -       add     x29, sp, #S_STACKFRAME
14464         .endif
14465 +       add     x29, sp, #S_STACKFRAME
14467  #ifdef CONFIG_ARM64_SW_TTBR0_PAN
14468  alternative_if_not ARM64_HAS_PAN
14469 @@ -292,6 +292,8 @@ alternative_else_nop_endif
14470  alternative_if ARM64_HAS_IRQ_PRIO_MASKING
14471         mrs_s   x20, SYS_ICC_PMR_EL1
14472         str     x20, [sp, #S_PMR_SAVE]
14473 +       mov     x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET
14474 +       msr_s   SYS_ICC_PMR_EL1, x20
14475  alternative_else_nop_endif
14477         /* Re-enable tag checking (TCO set on exception entry) */
14478 @@ -493,8 +495,8 @@ tsk .req    x28             // current thread_info
14479  /*
14480   * Interrupt handling.
14481   */
14482 -       .macro  irq_handler
14483 -       ldr_l   x1, handle_arch_irq
14484 +       .macro  irq_handler, handler:req
14485 +       ldr_l   x1, \handler
14486         mov     x0, sp
14487         irq_stack_entry
14488         blr     x1
14489 @@ -524,13 +526,41 @@ alternative_endif
14490  #endif
14491         .endm
14493 -       .macro  gic_prio_irq_setup, pmr:req, tmp:req
14494 -#ifdef CONFIG_ARM64_PSEUDO_NMI
14495 -       alternative_if ARM64_HAS_IRQ_PRIO_MASKING
14496 -       orr     \tmp, \pmr, #GIC_PRIO_PSR_I_SET
14497 -       msr_s   SYS_ICC_PMR_EL1, \tmp
14498 -       alternative_else_nop_endif
14499 +       .macro el1_interrupt_handler, handler:req
14500 +       enable_da_f
14502 +       mov     x0, sp
14503 +       bl      enter_el1_irq_or_nmi
14505 +       irq_handler     \handler
14507 +#ifdef CONFIG_PREEMPTION
14508 +       ldr     x24, [tsk, #TSK_TI_PREEMPT]     // get preempt count
14509 +alternative_if ARM64_HAS_IRQ_PRIO_MASKING
14510 +       /*
14511 +        * DA_F were cleared at start of handling. If anything is set in DAIF,
14512 +        * we come back from an NMI, so skip preemption
14513 +        */
14514 +       mrs     x0, daif
14515 +       orr     x24, x24, x0
14516 +alternative_else_nop_endif
14517 +       cbnz    x24, 1f                         // preempt count != 0 || NMI return path
14518 +       bl      arm64_preempt_schedule_irq      // irq en/disable is done inside
14520  #endif
14522 +       mov     x0, sp
14523 +       bl      exit_el1_irq_or_nmi
14524 +       .endm
14526 +       .macro el0_interrupt_handler, handler:req
14527 +       user_exit_irqoff
14528 +       enable_da_f
14530 +       tbz     x22, #55, 1f
14531 +       bl      do_el0_irq_bp_hardening
14533 +       irq_handler     \handler
14534         .endm
14536         .text
14537 @@ -662,32 +692,7 @@ SYM_CODE_END(el1_sync)
14538         .align  6
14539  SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
14540         kernel_entry 1
14541 -       gic_prio_irq_setup pmr=x20, tmp=x1
14542 -       enable_da_f
14544 -       mov     x0, sp
14545 -       bl      enter_el1_irq_or_nmi
14547 -       irq_handler
14549 -#ifdef CONFIG_PREEMPTION
14550 -       ldr     x24, [tsk, #TSK_TI_PREEMPT]     // get preempt count
14551 -alternative_if ARM64_HAS_IRQ_PRIO_MASKING
14552 -       /*
14553 -        * DA_F were cleared at start of handling. If anything is set in DAIF,
14554 -        * we come back from an NMI, so skip preemption
14555 -        */
14556 -       mrs     x0, daif
14557 -       orr     x24, x24, x0
14558 -alternative_else_nop_endif
14559 -       cbnz    x24, 1f                         // preempt count != 0 || NMI return path
14560 -       bl      arm64_preempt_schedule_irq      // irq en/disable is done inside
14562 -#endif
14564 -       mov     x0, sp
14565 -       bl      exit_el1_irq_or_nmi
14567 +       el1_interrupt_handler handle_arch_irq
14568         kernel_exit 1
14569  SYM_CODE_END(el1_irq)
14571 @@ -727,22 +732,13 @@ SYM_CODE_END(el0_error_compat)
14572  SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
14573         kernel_entry 0
14574  el0_irq_naked:
14575 -       gic_prio_irq_setup pmr=x20, tmp=x0
14576 -       user_exit_irqoff
14577 -       enable_da_f
14579 -       tbz     x22, #55, 1f
14580 -       bl      do_el0_irq_bp_hardening
14582 -       irq_handler
14584 +       el0_interrupt_handler handle_arch_irq
14585         b       ret_to_user
14586  SYM_CODE_END(el0_irq)
14588  SYM_CODE_START_LOCAL(el1_error)
14589         kernel_entry 1
14590         mrs     x1, esr_el1
14591 -       gic_prio_kentry_setup tmp=x2
14592         enable_dbg
14593         mov     x0, sp
14594         bl      do_serror
14595 @@ -753,7 +749,6 @@ SYM_CODE_START_LOCAL(el0_error)
14596         kernel_entry 0
14597  el0_error_naked:
14598         mrs     x25, esr_el1
14599 -       gic_prio_kentry_setup tmp=x2
14600         user_exit_irqoff
14601         enable_dbg
14602         mov     x0, sp
14603 diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
14604 index d55bdfb7789c..7032a5f9e624 100644
14605 --- a/arch/arm64/kernel/stacktrace.c
14606 +++ b/arch/arm64/kernel/stacktrace.c
14607 @@ -44,10 +44,6 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
14608         unsigned long fp = frame->fp;
14609         struct stack_info info;
14611 -       /* Terminal record; nothing to unwind */
14612 -       if (!fp)
14613 -               return -ENOENT;
14615         if (fp & 0xf)
14616                 return -EINVAL;
14618 @@ -108,6 +104,12 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
14620         frame->pc = ptrauth_strip_insn_pac(frame->pc);
14622 +       /*
14623 +        * This is a terminal record, so we have finished unwinding.
14624 +        */
14625 +       if (!frame->fp && !frame->pc)
14626 +               return -ENOENT;
14628         return 0;
14630  NOKPROBE_SYMBOL(unwind_frame);
14631 diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S
14632 index 61dbb4c838ef..a5e61e09ea92 100644
14633 --- a/arch/arm64/kernel/vdso/vdso.lds.S
14634 +++ b/arch/arm64/kernel/vdso/vdso.lds.S
14635 @@ -31,6 +31,13 @@ SECTIONS
14636         .gnu.version_d  : { *(.gnu.version_d) }
14637         .gnu.version_r  : { *(.gnu.version_r) }
14639 +       /*
14640 +        * Discard .note.gnu.property sections which are unused and have
14641 +        * different alignment requirement from vDSO note sections.
14642 +        */
14643 +       /DISCARD/       : {
14644 +               *(.note.GNU-stack .note.gnu.property)
14645 +       }
14646         .note           : { *(.note.*) }                :text   :note
14648         . = ALIGN(16);
14649 @@ -48,7 +55,6 @@ SECTIONS
14650         PROVIDE(end = .);
14652         /DISCARD/       : {
14653 -               *(.note.GNU-stack)
14654                 *(.data .data.* .gnu.linkonce.d.* .sdata*)
14655                 *(.bss .sbss .dynbss .dynsbss)
14656                 *(.eh_frame .eh_frame_hdr)
14657 diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
14658 index 7f06ba76698d..84b5f79c9eab 100644
14659 --- a/arch/arm64/kvm/arm.c
14660 +++ b/arch/arm64/kvm/arm.c
14661 @@ -580,6 +580,8 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
14663         vcpu->arch.has_run_once = true;
14665 +       kvm_arm_vcpu_init_debug(vcpu);
14667         if (likely(irqchip_in_kernel(kvm))) {
14668                 /*
14669                  * Map the VGIC hardware resources before running a vcpu the
14670 @@ -1808,8 +1810,10 @@ static int init_hyp_mode(void)
14671         if (is_protected_kvm_enabled()) {
14672                 init_cpu_logical_map();
14674 -               if (!init_psci_relay())
14675 +               if (!init_psci_relay()) {
14676 +                       err = -ENODEV;
14677                         goto out_err;
14678 +               }
14679         }
14681         return 0;
14682 diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
14683 index dbc890511631..2484b2cca74b 100644
14684 --- a/arch/arm64/kvm/debug.c
14685 +++ b/arch/arm64/kvm/debug.c
14686 @@ -68,6 +68,64 @@ void kvm_arm_init_debug(void)
14687         __this_cpu_write(mdcr_el2, kvm_call_hyp_ret(__kvm_get_mdcr_el2));
14690 +/**
14691 + * kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value
14692 + *
14693 + * @vcpu:      the vcpu pointer
14694 + *
14695 + * This ensures we will trap access to:
14696 + *  - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
14697 + *  - Debug ROM Address (MDCR_EL2_TDRA)
14698 + *  - OS related registers (MDCR_EL2_TDOSA)
14699 + *  - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
14700 + *  - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
14701 + */
14702 +static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
14704 +       /*
14705 +        * This also clears MDCR_EL2_E2PB_MASK to disable guest access
14706 +        * to the profiling buffer.
14707 +        */
14708 +       vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
14709 +       vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
14710 +                               MDCR_EL2_TPMS |
14711 +                               MDCR_EL2_TTRF |
14712 +                               MDCR_EL2_TPMCR |
14713 +                               MDCR_EL2_TDRA |
14714 +                               MDCR_EL2_TDOSA);
14716 +       /* Is the VM being debugged by userspace? */
14717 +       if (vcpu->guest_debug)
14718 +               /* Route all software debug exceptions to EL2 */
14719 +               vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
14721 +       /*
14722 +        * Trap debug register access when one of the following is true:
14723 +        *  - Userspace is using the hardware to debug the guest
14724 +        *  (KVM_GUESTDBG_USE_HW is set).
14725 +        *  - The guest is not using debug (KVM_ARM64_DEBUG_DIRTY is clear).
14726 +        */
14727 +       if ((vcpu->guest_debug & KVM_GUESTDBG_USE_HW) ||
14728 +           !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
14729 +               vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
14731 +       trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
14734 +/**
14735 + * kvm_arm_vcpu_init_debug - setup vcpu debug traps
14736 + *
14737 + * @vcpu:      the vcpu pointer
14738 + *
14739 + * Set vcpu initial mdcr_el2 value.
14740 + */
14741 +void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu)
14743 +       preempt_disable();
14744 +       kvm_arm_setup_mdcr_el2(vcpu);
14745 +       preempt_enable();
14748  /**
14749   * kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
14750   */
14751 @@ -83,13 +141,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
14752   * @vcpu:      the vcpu pointer
14753   *
14754   * This is called before each entry into the hypervisor to setup any
14755 - * debug related registers. Currently this just ensures we will trap
14756 - * access to:
14757 - *  - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
14758 - *  - Debug ROM Address (MDCR_EL2_TDRA)
14759 - *  - OS related registers (MDCR_EL2_TDOSA)
14760 - *  - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
14761 - *  - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
14762 + * debug related registers.
14763   *
14764   * Additionally, KVM only traps guest accesses to the debug registers if
14765   * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
14766 @@ -101,28 +153,14 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
14768  void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
14770 -       bool trap_debug = !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY);
14771         unsigned long mdscr, orig_mdcr_el2 = vcpu->arch.mdcr_el2;
14773         trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
14775 -       /*
14776 -        * This also clears MDCR_EL2_E2PB_MASK to disable guest access
14777 -        * to the profiling buffer.
14778 -        */
14779 -       vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
14780 -       vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
14781 -                               MDCR_EL2_TPMS |
14782 -                               MDCR_EL2_TTRF |
14783 -                               MDCR_EL2_TPMCR |
14784 -                               MDCR_EL2_TDRA |
14785 -                               MDCR_EL2_TDOSA);
14786 +       kvm_arm_setup_mdcr_el2(vcpu);
14788         /* Is Guest debugging in effect? */
14789         if (vcpu->guest_debug) {
14790 -               /* Route all software debug exceptions to EL2 */
14791 -               vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
14793                 /* Save guest debug state */
14794                 save_guest_debug_regs(vcpu);
14796 @@ -176,7 +214,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
14798                         vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
14799                         vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
14800 -                       trap_debug = true;
14802                         trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
14803                                                 &vcpu->arch.debug_ptr->dbg_bcr[0],
14804 @@ -191,10 +228,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
14805         BUG_ON(!vcpu->guest_debug &&
14806                 vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
14808 -       /* Trap debug register access */
14809 -       if (trap_debug)
14810 -               vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
14812         /* If KDE or MDE are set, perform a full save/restore cycle. */
14813         if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
14814                 vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
14815 @@ -203,7 +236,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
14816         if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2)
14817                 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
14819 -       trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
14820         trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
14823 diff --git a/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c b/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c
14824 index ead02c6a7628..6bc88a756cb7 100644
14825 --- a/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c
14826 +++ b/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c
14827 @@ -50,6 +50,18 @@
14828  #ifndef R_AARCH64_ABS64
14829  #define R_AARCH64_ABS64                        257
14830  #endif
14831 +#ifndef R_AARCH64_PREL64
14832 +#define R_AARCH64_PREL64               260
14833 +#endif
14834 +#ifndef R_AARCH64_PREL32
14835 +#define R_AARCH64_PREL32               261
14836 +#endif
14837 +#ifndef R_AARCH64_PREL16
14838 +#define R_AARCH64_PREL16               262
14839 +#endif
14840 +#ifndef R_AARCH64_PLT32
14841 +#define R_AARCH64_PLT32                        314
14842 +#endif
14843  #ifndef R_AARCH64_LD_PREL_LO19
14844  #define R_AARCH64_LD_PREL_LO19         273
14845  #endif
14846 @@ -371,6 +383,12 @@ static void emit_rela_section(Elf64_Shdr *sh_rela)
14847                 case R_AARCH64_ABS64:
14848                         emit_rela_abs64(rela, sh_orig_name);
14849                         break;
14850 +               /* Allow position-relative data relocations. */
14851 +               case R_AARCH64_PREL64:
14852 +               case R_AARCH64_PREL32:
14853 +               case R_AARCH64_PREL16:
14854 +               case R_AARCH64_PLT32:
14855 +                       break;
14856                 /* Allow relocations to generate PC-relative addressing. */
14857                 case R_AARCH64_LD_PREL_LO19:
14858                 case R_AARCH64_ADR_PREL_LO21:
14859 diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
14860 index bd354cd45d28..4b5acd84b8c8 100644
14861 --- a/arch/arm64/kvm/reset.c
14862 +++ b/arch/arm64/kvm/reset.c
14863 @@ -242,6 +242,11 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
14865         /* Reset core registers */
14866         memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
14867 +       memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs));
14868 +       vcpu->arch.ctxt.spsr_abt = 0;
14869 +       vcpu->arch.ctxt.spsr_und = 0;
14870 +       vcpu->arch.ctxt.spsr_irq = 0;
14871 +       vcpu->arch.ctxt.spsr_fiq = 0;
14872         vcpu_gp_regs(vcpu)->pstate = pstate;
14874         /* Reset system registers */
14875 diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c
14876 index 44419679f91a..7740995de982 100644
14877 --- a/arch/arm64/kvm/vgic/vgic-kvm-device.c
14878 +++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c
14879 @@ -87,8 +87,8 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
14880                         r = vgic_v3_set_redist_base(kvm, 0, *addr, 0);
14881                         goto out;
14882                 }
14883 -               rdreg = list_first_entry(&vgic->rd_regions,
14884 -                                        struct vgic_redist_region, list);
14885 +               rdreg = list_first_entry_or_null(&vgic->rd_regions,
14886 +                                                struct vgic_redist_region, list);
14887                 if (!rdreg)
14888                         addr_ptr = &undef_value;
14889                 else
14890 @@ -226,6 +226,9 @@ static int vgic_get_common_attr(struct kvm_device *dev,
14891                 u64 addr;
14892                 unsigned long type = (unsigned long)attr->attr;
14894 +               if (copy_from_user(&addr, uaddr, sizeof(addr)))
14895 +                       return -EFAULT;
14897                 r = kvm_vgic_addr(dev->kvm, type, &addr, false);
14898                 if (r)
14899                         return (r == -ENODEV) ? -ENXIO : r;
14900 diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
14901 index ac485163a4a7..6d44c028d1c9 100644
14902 --- a/arch/arm64/mm/flush.c
14903 +++ b/arch/arm64/mm/flush.c
14904 @@ -55,8 +55,10 @@ void __sync_icache_dcache(pte_t pte)
14906         struct page *page = pte_page(pte);
14908 -       if (!test_and_set_bit(PG_dcache_clean, &page->flags))
14909 +       if (!test_bit(PG_dcache_clean, &page->flags)) {
14910                 sync_icache_aliases(page_address(page), page_size(page));
14911 +               set_bit(PG_dcache_clean, &page->flags);
14912 +       }
14914  EXPORT_SYMBOL_GPL(__sync_icache_dcache);
14916 diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
14917 index c967bfd30d2b..b183216a591c 100644
14918 --- a/arch/arm64/mm/proc.S
14919 +++ b/arch/arm64/mm/proc.S
14920 @@ -444,6 +444,18 @@ SYM_FUNC_START(__cpu_setup)
14921         mov     x10, #(SYS_GCR_EL1_RRND | SYS_GCR_EL1_EXCL_MASK)
14922         msr_s   SYS_GCR_EL1, x10
14924 +       /*
14925 +        * If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
14926 +        * RGSR_EL1.SEED must be non-zero for IRG to produce
14927 +        * pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
14928 +        * must initialize it.
14929 +        */
14930 +       mrs     x10, CNTVCT_EL0
14931 +       ands    x10, x10, #SYS_RGSR_EL1_SEED_MASK
14932 +       csinc   x10, x10, xzr, ne
14933 +       lsl     x10, x10, #SYS_RGSR_EL1_SEED_SHIFT
14934 +       msr_s   SYS_RGSR_EL1, x10
14936         /* clear any pending tag check faults in TFSR*_EL1 */
14937         msr_s   SYS_TFSR_EL1, xzr
14938         msr_s   SYS_TFSRE0_EL1, xzr
14939 diff --git a/arch/ia64/include/asm/module.h b/arch/ia64/include/asm/module.h
14940 index 5a29652e6def..7271b9c5fc76 100644
14941 --- a/arch/ia64/include/asm/module.h
14942 +++ b/arch/ia64/include/asm/module.h
14943 @@ -14,16 +14,20 @@
14944  struct elf64_shdr;                     /* forward declration */
14946  struct mod_arch_specific {
14947 +       /* Used only at module load time. */
14948         struct elf64_shdr *core_plt;    /* core PLT section */
14949         struct elf64_shdr *init_plt;    /* init PLT section */
14950         struct elf64_shdr *got;         /* global offset table */
14951         struct elf64_shdr *opd;         /* official procedure descriptors */
14952         struct elf64_shdr *unwind;      /* unwind-table section */
14953         unsigned long gp;               /* global-pointer for module */
14954 +       unsigned int next_got_entry;    /* index of next available got entry */
14956 +       /* Used at module run and cleanup time. */
14957         void *core_unw_table;           /* core unwind-table cookie returned by unwinder */
14958         void *init_unw_table;           /* init unwind-table cookie returned by unwinder */
14959 -       unsigned int next_got_entry;    /* index of next available got entry */
14960 +       void *opd_addr;                 /* symbolize uses .opd to get to actual function */
14961 +       unsigned long opd_size;
14962  };
14964  #define ARCH_SHF_SMALL SHF_IA_64_SHORT
14965 diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
14966 index a5636524af76..e2af6b172200 100644
14967 --- a/arch/ia64/kernel/acpi.c
14968 +++ b/arch/ia64/kernel/acpi.c
14969 @@ -446,7 +446,8 @@ void __init acpi_numa_fixup(void)
14970         if (srat_num_cpus == 0) {
14971                 node_set_online(0);
14972                 node_cpuid[0].phys_id = hard_smp_processor_id();
14973 -               return;
14974 +               slit_distance(0, 0) = LOCAL_DISTANCE;
14975 +               goto out;
14976         }
14978         /*
14979 @@ -489,7 +490,7 @@ void __init acpi_numa_fixup(void)
14980                         for (j = 0; j < MAX_NUMNODES; j++)
14981                                 slit_distance(i, j) = i == j ?
14982                                         LOCAL_DISTANCE : REMOTE_DISTANCE;
14983 -               return;
14984 +               goto out;
14985         }
14987         memset(numa_slit, -1, sizeof(numa_slit));
14988 @@ -514,6 +515,8 @@ void __init acpi_numa_fixup(void)
14989                 printk("\n");
14990         }
14991  #endif
14992 +out:
14993 +       node_possible_map = node_online_map;
14995  #endif                         /* CONFIG_ACPI_NUMA */
14997 diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
14998 index c5fe21de46a8..31149e41f9be 100644
14999 --- a/arch/ia64/kernel/efi.c
15000 +++ b/arch/ia64/kernel/efi.c
15001 @@ -415,10 +415,10 @@ efi_get_pal_addr (void)
15002                 mask  = ~((1 << IA64_GRANULE_SHIFT) - 1);
15004                 printk(KERN_INFO "CPU %d: mapping PAL code "
15005 -                       "[0x%lx-0x%lx) into [0x%lx-0x%lx)\n",
15006 -                       smp_processor_id(), md->phys_addr,
15007 -                       md->phys_addr + efi_md_size(md),
15008 -                       vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
15009 +                       "[0x%llx-0x%llx) into [0x%llx-0x%llx)\n",
15010 +                       smp_processor_id(), md->phys_addr,
15011 +                       md->phys_addr + efi_md_size(md),
15012 +                       vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
15013  #endif
15014                 return __va(md->phys_addr);
15015         }
15016 @@ -560,6 +560,7 @@ efi_init (void)
15017         {
15018                 efi_memory_desc_t *md;
15019                 void *p;
15020 +               unsigned int i;
15022                 for (i = 0, p = efi_map_start; p < efi_map_end;
15023                      ++i, p += efi_desc_size)
15024 @@ -586,7 +587,7 @@ efi_init (void)
15025                         }
15027                         printk("mem%02d: %s "
15028 -                              "range=[0x%016lx-0x%016lx) (%4lu%s)\n",
15029 +                              "range=[0x%016llx-0x%016llx) (%4lu%s)\n",
15030                                i, efi_md_typeattr_format(buf, sizeof(buf), md),
15031                                md->phys_addr,
15032                                md->phys_addr + efi_md_size(md), size, unit);
15033 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
15034 index 00a496cb346f..2cba53c1da82 100644
15035 --- a/arch/ia64/kernel/module.c
15036 +++ b/arch/ia64/kernel/module.c
15037 @@ -905,9 +905,31 @@ register_unwind_table (struct module *mod)
15038  int
15039  module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
15041 +       struct mod_arch_specific *mas = &mod->arch;
15043         DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
15044 -       if (mod->arch.unwind)
15045 +       if (mas->unwind)
15046                 register_unwind_table(mod);
15048 +       /*
15049 +        * ".opd" was already relocated to the final destination. Store
15050 +        * it's address for use in symbolizer.
15051 +        */
15052 +       mas->opd_addr = (void *)mas->opd->sh_addr;
15053 +       mas->opd_size = mas->opd->sh_size;
15055 +       /*
15056 +        * Module relocation was already done at this point. Section
15057 +        * headers are about to be deleted. Wipe out load-time context.
15058 +        */
15059 +       mas->core_plt = NULL;
15060 +       mas->init_plt = NULL;
15061 +       mas->got = NULL;
15062 +       mas->opd = NULL;
15063 +       mas->unwind = NULL;
15064 +       mas->gp = 0;
15065 +       mas->next_got_entry = 0;
15067         return 0;
15070 @@ -926,10 +948,9 @@ module_arch_cleanup (struct module *mod)
15072  void *dereference_module_function_descriptor(struct module *mod, void *ptr)
15074 -       Elf64_Shdr *opd = mod->arch.opd;
15075 +       struct mod_arch_specific *mas = &mod->arch;
15077 -       if (ptr < (void *)opd->sh_addr ||
15078 -                       ptr >= (void *)(opd->sh_addr + opd->sh_size))
15079 +       if (ptr < mas->opd_addr || ptr >= mas->opd_addr + mas->opd_size)
15080                 return ptr;
15082         return dereference_function_descriptor(ptr);
15083 diff --git a/arch/m68k/include/asm/mvme147hw.h b/arch/m68k/include/asm/mvme147hw.h
15084 index 257b29184af9..e28eb1c0e0bf 100644
15085 --- a/arch/m68k/include/asm/mvme147hw.h
15086 +++ b/arch/m68k/include/asm/mvme147hw.h
15087 @@ -66,6 +66,9 @@ struct pcc_regs {
15088  #define PCC_INT_ENAB           0x08
15090  #define PCC_TIMER_INT_CLR      0x80
15092 +#define PCC_TIMER_TIC_EN       0x01
15093 +#define PCC_TIMER_COC_EN       0x02
15094  #define PCC_TIMER_CLR_OVF      0x04
15096  #define PCC_LEVEL_ABORT                0x07
15097 diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
15098 index 1c235d8f53f3..f55bdcb8e4f1 100644
15099 --- a/arch/m68k/kernel/sys_m68k.c
15100 +++ b/arch/m68k/kernel/sys_m68k.c
15101 @@ -388,6 +388,8 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
15102                 ret = -EPERM;
15103                 if (!capable(CAP_SYS_ADMIN))
15104                         goto out;
15106 +               mmap_read_lock(current->mm);
15107         } else {
15108                 struct vm_area_struct *vma;
15110 diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
15111 index cfdc7f912e14..e1e90c49a496 100644
15112 --- a/arch/m68k/mvme147/config.c
15113 +++ b/arch/m68k/mvme147/config.c
15114 @@ -114,8 +114,10 @@ static irqreturn_t mvme147_timer_int (int irq, void *dev_id)
15115         unsigned long flags;
15117         local_irq_save(flags);
15118 -       m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR;
15119 -       m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF;
15120 +       m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF | PCC_TIMER_COC_EN |
15121 +                            PCC_TIMER_TIC_EN;
15122 +       m147_pcc->t1_int_cntrl = PCC_INT_ENAB | PCC_TIMER_INT_CLR |
15123 +                                PCC_LEVEL_TIMER1;
15124         clk_total += PCC_TIMER_CYCLES;
15125         legacy_timer_tick(1);
15126         local_irq_restore(flags);
15127 @@ -133,10 +135,10 @@ void mvme147_sched_init (void)
15128         /* Init the clock with a value */
15129         /* The clock counter increments until 0xFFFF then reloads */
15130         m147_pcc->t1_preload = PCC_TIMER_PRELOAD;
15131 -       m147_pcc->t1_cntrl = 0x0;       /* clear timer */
15132 -       m147_pcc->t1_cntrl = 0x3;       /* start timer */
15133 -       m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR;  /* clear pending ints */
15134 -       m147_pcc->t1_int_cntrl = PCC_INT_ENAB|PCC_LEVEL_TIMER1;
15135 +       m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF | PCC_TIMER_COC_EN |
15136 +                            PCC_TIMER_TIC_EN;
15137 +       m147_pcc->t1_int_cntrl = PCC_INT_ENAB | PCC_TIMER_INT_CLR |
15138 +                                PCC_LEVEL_TIMER1;
15140         clocksource_register_hz(&mvme147_clk, PCC_TIMER_CLOCK_FREQ);
15142 diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
15143 index 30357fe4ba6c..b59593c7cfb9 100644
15144 --- a/arch/m68k/mvme16x/config.c
15145 +++ b/arch/m68k/mvme16x/config.c
15146 @@ -366,6 +366,7 @@ static u32 clk_total;
15147  #define PCCTOVR1_COC_EN      0x02
15148  #define PCCTOVR1_OVR_CLR     0x04
15150 +#define PCCTIC1_INT_LEVEL    6
15151  #define PCCTIC1_INT_CLR      0x08
15152  #define PCCTIC1_INT_EN       0x10
15154 @@ -374,8 +375,8 @@ static irqreturn_t mvme16x_timer_int (int irq, void *dev_id)
15155         unsigned long flags;
15157         local_irq_save(flags);
15158 -       out_8(PCCTIC1, in_8(PCCTIC1) | PCCTIC1_INT_CLR);
15159 -       out_8(PCCTOVR1, PCCTOVR1_OVR_CLR);
15160 +       out_8(PCCTOVR1, PCCTOVR1_OVR_CLR | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
15161 +       out_8(PCCTIC1, PCCTIC1_INT_EN | PCCTIC1_INT_CLR | PCCTIC1_INT_LEVEL);
15162         clk_total += PCC_TIMER_CYCLES;
15163         legacy_timer_tick(1);
15164         local_irq_restore(flags);
15165 @@ -389,14 +390,15 @@ void mvme16x_sched_init(void)
15166      int irq;
15168      /* Using PCCchip2 or MC2 chip tick timer 1 */
15169 -    out_be32(PCCTCNT1, 0);
15170 -    out_be32(PCCTCMP1, PCC_TIMER_CYCLES);
15171 -    out_8(PCCTOVR1, in_8(PCCTOVR1) | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
15172 -    out_8(PCCTIC1, PCCTIC1_INT_EN | 6);
15173      if (request_irq(MVME16x_IRQ_TIMER, mvme16x_timer_int, IRQF_TIMER, "timer",
15174                      NULL))
15175         panic ("Couldn't register timer int");
15177 +    out_be32(PCCTCNT1, 0);
15178 +    out_be32(PCCTCMP1, PCC_TIMER_CYCLES);
15179 +    out_8(PCCTOVR1, PCCTOVR1_OVR_CLR | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
15180 +    out_8(PCCTIC1, PCCTIC1_INT_EN | PCCTIC1_INT_CLR | PCCTIC1_INT_LEVEL);
15182      clocksource_register_hz(&mvme16x_clk, PCC_TIMER_CLOCK_FREQ);
15184      if (brdno == 0x0162 || brdno == 0x172)
15185 diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
15186 index d89efba3d8a4..e89d63cd92d1 100644
15187 --- a/arch/mips/Kconfig
15188 +++ b/arch/mips/Kconfig
15189 @@ -6,6 +6,7 @@ config MIPS
15190         select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT
15191         select ARCH_HAS_FORTIFY_SOURCE
15192         select ARCH_HAS_KCOV
15193 +       select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE if !EVA
15194         select ARCH_HAS_PTE_SPECIAL if !(32BIT && CPU_HAS_RIXI)
15195         select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
15196         select ARCH_HAS_UBSAN_SANITIZE_ALL
15197 diff --git a/arch/mips/boot/dts/brcm/bcm3368.dtsi b/arch/mips/boot/dts/brcm/bcm3368.dtsi
15198 index 69cbef472377..d4b2b430dad0 100644
15199 --- a/arch/mips/boot/dts/brcm/bcm3368.dtsi
15200 +++ b/arch/mips/boot/dts/brcm/bcm3368.dtsi
15201 @@ -59,7 +59,7 @@ clkctl: clock-controller@fff8c004 {
15203                 periph_cntl: syscon@fff8c008 {
15204                         compatible = "syscon";
15205 -                       reg = <0xfff8c000 0x4>;
15206 +                       reg = <0xfff8c008 0x4>;
15207                         native-endian;
15208                 };
15210 diff --git a/arch/mips/boot/dts/brcm/bcm63268.dtsi b/arch/mips/boot/dts/brcm/bcm63268.dtsi
15211 index e0021ff9f144..940594436872 100644
15212 --- a/arch/mips/boot/dts/brcm/bcm63268.dtsi
15213 +++ b/arch/mips/boot/dts/brcm/bcm63268.dtsi
15214 @@ -59,7 +59,7 @@ clkctl: clock-controller@10000004 {
15216                 periph_cntl: syscon@10000008 {
15217                         compatible = "syscon";
15218 -                       reg = <0x10000000 0xc>;
15219 +                       reg = <0x10000008 0x4>;
15220                         native-endian;
15221                 };
15223 diff --git a/arch/mips/boot/dts/brcm/bcm6358.dtsi b/arch/mips/boot/dts/brcm/bcm6358.dtsi
15224 index 9d93e7f5e6fc..d79c88c2fc9c 100644
15225 --- a/arch/mips/boot/dts/brcm/bcm6358.dtsi
15226 +++ b/arch/mips/boot/dts/brcm/bcm6358.dtsi
15227 @@ -59,7 +59,7 @@ clkctl: clock-controller@fffe0004 {
15229                 periph_cntl: syscon@fffe0008 {
15230                         compatible = "syscon";
15231 -                       reg = <0xfffe0000 0x4>;
15232 +                       reg = <0xfffe0008 0x4>;
15233                         native-endian;
15234                 };
15236 diff --git a/arch/mips/boot/dts/brcm/bcm6362.dtsi b/arch/mips/boot/dts/brcm/bcm6362.dtsi
15237 index eb10341b75ba..8a21cb761ffd 100644
15238 --- a/arch/mips/boot/dts/brcm/bcm6362.dtsi
15239 +++ b/arch/mips/boot/dts/brcm/bcm6362.dtsi
15240 @@ -59,7 +59,7 @@ clkctl: clock-controller@10000004 {
15242                 periph_cntl: syscon@10000008 {
15243                         compatible = "syscon";
15244 -                       reg = <0x10000000 0xc>;
15245 +                       reg = <0x10000008 0x4>;
15246                         native-endian;
15247                 };
15249 diff --git a/arch/mips/boot/dts/brcm/bcm6368.dtsi b/arch/mips/boot/dts/brcm/bcm6368.dtsi
15250 index 52c19f40b9cc..8e87867ebc04 100644
15251 --- a/arch/mips/boot/dts/brcm/bcm6368.dtsi
15252 +++ b/arch/mips/boot/dts/brcm/bcm6368.dtsi
15253 @@ -59,7 +59,7 @@ clkctl: clock-controller@10000004 {
15255                 periph_cntl: syscon@100000008 {
15256                         compatible = "syscon";
15257 -                       reg = <0x10000000 0xc>;
15258 +                       reg = <0x10000008 0x4>;
15259                         native-endian;
15260                 };
15262 diff --git a/arch/mips/crypto/poly1305-glue.c b/arch/mips/crypto/poly1305-glue.c
15263 index fc881b46d911..bc6110fb98e0 100644
15264 --- a/arch/mips/crypto/poly1305-glue.c
15265 +++ b/arch/mips/crypto/poly1305-glue.c
15266 @@ -17,7 +17,7 @@ asmlinkage void poly1305_init_mips(void *state, const u8 *key);
15267  asmlinkage void poly1305_blocks_mips(void *state, const u8 *src, u32 len, u32 hibit);
15268  asmlinkage void poly1305_emit_mips(void *state, u8 *digest, const u32 *nonce);
15270 -void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
15271 +void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
15273         poly1305_init_mips(&dctx->h, key);
15274         dctx->s[0] = get_unaligned_le32(key + 16);
15275 diff --git a/arch/mips/generic/board-boston.its.S b/arch/mips/generic/board-boston.its.S
15276 index a7f51f97b910..c45ad2759421 100644
15277 --- a/arch/mips/generic/board-boston.its.S
15278 +++ b/arch/mips/generic/board-boston.its.S
15279 @@ -1,22 +1,22 @@
15280  / {
15281         images {
15282 -               fdt@boston {
15283 +               fdt-boston {
15284                         description = "img,boston Device Tree";
15285                         data = /incbin/("boot/dts/img/boston.dtb");
15286                         type = "flat_dt";
15287                         arch = "mips";
15288                         compression = "none";
15289 -                       hash@0 {
15290 +                       hash {
15291                                 algo = "sha1";
15292                         };
15293                 };
15294         };
15296         configurations {
15297 -               conf@boston {
15298 +               conf-boston {
15299                         description = "Boston Linux kernel";
15300 -                       kernel = "kernel@0";
15301 -                       fdt = "fdt@boston";
15302 +                       kernel = "kernel";
15303 +                       fdt = "fdt-boston";
15304                 };
15305         };
15306  };
15307 diff --git a/arch/mips/generic/board-jaguar2.its.S b/arch/mips/generic/board-jaguar2.its.S
15308 index fb0e589eeff7..c2b8d479b26c 100644
15309 --- a/arch/mips/generic/board-jaguar2.its.S
15310 +++ b/arch/mips/generic/board-jaguar2.its.S
15311 @@ -1,23 +1,23 @@
15312  /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
15313  / {
15314         images {
15315 -               fdt@jaguar2_pcb110 {
15316 +               fdt-jaguar2_pcb110 {
15317                         description = "MSCC Jaguar2 PCB110 Device Tree";
15318                         data = /incbin/("boot/dts/mscc/jaguar2_pcb110.dtb");
15319                         type = "flat_dt";
15320                         arch = "mips";
15321                         compression = "none";
15322 -                       hash@0 {
15323 +                       hash {
15324                                 algo = "sha1";
15325                         };
15326                 };
15327 -               fdt@jaguar2_pcb111 {
15328 +               fdt-jaguar2_pcb111 {
15329                         description = "MSCC Jaguar2 PCB111 Device Tree";
15330                         data = /incbin/("boot/dts/mscc/jaguar2_pcb111.dtb");
15331                         type = "flat_dt";
15332                         arch = "mips";
15333                         compression = "none";
15334 -                       hash@0 {
15335 +                       hash {
15336                                 algo = "sha1";
15337                         };
15338                 };
15339 @@ -26,14 +26,14 @@
15340         configurations {
15341                 pcb110 {
15342                         description = "Jaguar2 Linux kernel";
15343 -                       kernel = "kernel@0";
15344 -                       fdt = "fdt@jaguar2_pcb110";
15345 +                       kernel = "kernel";
15346 +                       fdt = "fdt-jaguar2_pcb110";
15347                         ramdisk = "ramdisk";
15348                 };
15349                 pcb111 {
15350                         description = "Jaguar2 Linux kernel";
15351 -                       kernel = "kernel@0";
15352 -                       fdt = "fdt@jaguar2_pcb111";
15353 +                       kernel = "kernel";
15354 +                       fdt = "fdt-jaguar2_pcb111";
15355                         ramdisk = "ramdisk";
15356                 };
15357         };
15358 diff --git a/arch/mips/generic/board-luton.its.S b/arch/mips/generic/board-luton.its.S
15359 index 39a543f62f25..bd9837c9af97 100644
15360 --- a/arch/mips/generic/board-luton.its.S
15361 +++ b/arch/mips/generic/board-luton.its.S
15362 @@ -1,13 +1,13 @@
15363  /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
15364  / {
15365         images {
15366 -               fdt@luton_pcb091 {
15367 +               fdt-luton_pcb091 {
15368                         description = "MSCC Luton PCB091 Device Tree";
15369                         data = /incbin/("boot/dts/mscc/luton_pcb091.dtb");
15370                         type = "flat_dt";
15371                         arch = "mips";
15372                         compression = "none";
15373 -                       hash@0 {
15374 +                       hash {
15375                                 algo = "sha1";
15376                         };
15377                 };
15378 @@ -16,8 +16,8 @@
15379         configurations {
15380                 pcb091 {
15381                         description = "Luton Linux kernel";
15382 -                       kernel = "kernel@0";
15383 -                       fdt = "fdt@luton_pcb091";
15384 +                       kernel = "kernel";
15385 +                       fdt = "fdt-luton_pcb091";
15386                 };
15387         };
15388  };
15389 diff --git a/arch/mips/generic/board-ni169445.its.S b/arch/mips/generic/board-ni169445.its.S
15390 index e4cb4f95a8cc..0a2e8f7a8526 100644
15391 --- a/arch/mips/generic/board-ni169445.its.S
15392 +++ b/arch/mips/generic/board-ni169445.its.S
15393 @@ -1,22 +1,22 @@
15394  / {
15395         images {
15396 -               fdt@ni169445 {
15397 +               fdt-ni169445 {
15398                         description = "NI 169445 device tree";
15399                         data = /incbin/("boot/dts/ni/169445.dtb");
15400                         type = "flat_dt";
15401                         arch = "mips";
15402                         compression = "none";
15403 -                       hash@0 {
15404 +                       hash {
15405                                 algo = "sha1";
15406                         };
15407                 };
15408         };
15410         configurations {
15411 -               conf@ni169445 {
15412 +               conf-ni169445 {
15413                         description = "NI 169445 Linux Kernel";
15414 -                       kernel = "kernel@0";
15415 -                       fdt = "fdt@ni169445";
15416 +                       kernel = "kernel";
15417 +                       fdt = "fdt-ni169445";
15418                 };
15419         };
15420  };
15421 diff --git a/arch/mips/generic/board-ocelot.its.S b/arch/mips/generic/board-ocelot.its.S
15422 index 3da23988149a..8c7e3a1b68d3 100644
15423 --- a/arch/mips/generic/board-ocelot.its.S
15424 +++ b/arch/mips/generic/board-ocelot.its.S
15425 @@ -1,40 +1,40 @@
15426  /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
15427  / {
15428         images {
15429 -               fdt@ocelot_pcb123 {
15430 +               fdt-ocelot_pcb123 {
15431                         description = "MSCC Ocelot PCB123 Device Tree";
15432                         data = /incbin/("boot/dts/mscc/ocelot_pcb123.dtb");
15433                         type = "flat_dt";
15434                         arch = "mips";
15435                         compression = "none";
15436 -                       hash@0 {
15437 +                       hash {
15438                                 algo = "sha1";
15439                         };
15440                 };
15442 -               fdt@ocelot_pcb120 {
15443 +               fdt-ocelot_pcb120 {
15444                         description = "MSCC Ocelot PCB120 Device Tree";
15445                         data = /incbin/("boot/dts/mscc/ocelot_pcb120.dtb");
15446                         type = "flat_dt";
15447                         arch = "mips";
15448                         compression = "none";
15449 -                       hash@0 {
15450 +                       hash {
15451                                 algo = "sha1";
15452                         };
15453                 };
15454         };
15456         configurations {
15457 -               conf@ocelot_pcb123 {
15458 +               conf-ocelot_pcb123 {
15459                         description = "Ocelot Linux kernel";
15460 -                       kernel = "kernel@0";
15461 -                       fdt = "fdt@ocelot_pcb123";
15462 +                       kernel = "kernel";
15463 +                       fdt = "fdt-ocelot_pcb123";
15464                 };
15466 -               conf@ocelot_pcb120 {
15467 +               conf-ocelot_pcb120 {
15468                         description = "Ocelot Linux kernel";
15469 -                       kernel = "kernel@0";
15470 -                       fdt = "fdt@ocelot_pcb120";
15471 +                       kernel = "kernel";
15472 +                       fdt = "fdt-ocelot_pcb120";
15473                 };
15474         };
15475  };
15476 diff --git a/arch/mips/generic/board-serval.its.S b/arch/mips/generic/board-serval.its.S
15477 index 4ea4fc9d757f..dde833efe980 100644
15478 --- a/arch/mips/generic/board-serval.its.S
15479 +++ b/arch/mips/generic/board-serval.its.S
15480 @@ -1,13 +1,13 @@
15481  /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
15482  / {
15483         images {
15484 -               fdt@serval_pcb105 {
15485 +               fdt-serval_pcb105 {
15486                         description = "MSCC Serval PCB105 Device Tree";
15487                         data = /incbin/("boot/dts/mscc/serval_pcb105.dtb");
15488                         type = "flat_dt";
15489                         arch = "mips";
15490                         compression = "none";
15491 -                       hash@0 {
15492 +                       hash {
15493                                 algo = "sha1";
15494                         };
15495                 };
15496 @@ -16,8 +16,8 @@
15497         configurations {
15498                 pcb105 {
15499                         description = "Serval Linux kernel";
15500 -                       kernel = "kernel@0";
15501 -                       fdt = "fdt@serval_pcb105";
15502 +                       kernel = "kernel";
15503 +                       fdt = "fdt-serval_pcb105";
15504                         ramdisk = "ramdisk";
15505                 };
15506         };
15507 diff --git a/arch/mips/generic/board-xilfpga.its.S b/arch/mips/generic/board-xilfpga.its.S
15508 index a2e773d3f14f..08c1e900eb4e 100644
15509 --- a/arch/mips/generic/board-xilfpga.its.S
15510 +++ b/arch/mips/generic/board-xilfpga.its.S
15511 @@ -1,22 +1,22 @@
15512  / {
15513         images {
15514 -               fdt@xilfpga {
15515 +               fdt-xilfpga {
15516                         description = "MIPSfpga (xilfpga) Device Tree";
15517                         data = /incbin/("boot/dts/xilfpga/nexys4ddr.dtb");
15518                         type = "flat_dt";
15519                         arch = "mips";
15520                         compression = "none";
15521 -                       hash@0 {
15522 +                       hash {
15523                                 algo = "sha1";
15524                         };
15525                 };
15526         };
15528         configurations {
15529 -               conf@xilfpga {
15530 +               conf-xilfpga {
15531                         description = "MIPSfpga Linux kernel";
15532 -                       kernel = "kernel@0";
15533 -                       fdt = "fdt@xilfpga";
15534 +                       kernel = "kernel";
15535 +                       fdt = "fdt-xilfpga";
15536                 };
15537         };
15538  };
15539 diff --git a/arch/mips/generic/vmlinux.its.S b/arch/mips/generic/vmlinux.its.S
15540 index 1a08438fd893..3e254676540f 100644
15541 --- a/arch/mips/generic/vmlinux.its.S
15542 +++ b/arch/mips/generic/vmlinux.its.S
15543 @@ -6,7 +6,7 @@
15544         #address-cells = <ADDR_CELLS>;
15546         images {
15547 -               kernel@0 {
15548 +               kernel {
15549                         description = KERNEL_NAME;
15550                         data = /incbin/(VMLINUX_BINARY);
15551                         type = "kernel";
15552 @@ -15,18 +15,18 @@
15553                         compression = VMLINUX_COMPRESSION;
15554                         load = /bits/ ADDR_BITS <VMLINUX_LOAD_ADDRESS>;
15555                         entry = /bits/ ADDR_BITS <VMLINUX_ENTRY_ADDRESS>;
15556 -                       hash@0 {
15557 +                       hash {
15558                                 algo = "sha1";
15559                         };
15560                 };
15561         };
15563         configurations {
15564 -               default = "conf@default";
15565 +               default = "conf-default";
15567 -               conf@default {
15568 +               conf-default {
15569                         description = "Generic Linux kernel";
15570 -                       kernel = "kernel@0";
15571 +                       kernel = "kernel";
15572                 };
15573         };
15574  };
15575 diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
15576 index 86f2323ebe6b..ca83ada7015f 100644
15577 --- a/arch/mips/include/asm/asmmacro.h
15578 +++ b/arch/mips/include/asm/asmmacro.h
15579 @@ -44,8 +44,7 @@
15580         .endm
15581  #endif
15583 -#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \
15584 -    defined(CONFIG_CPU_MIPSR6)
15585 +#ifdef CONFIG_CPU_HAS_DIEI
15586         .macro  local_irq_enable reg=t0
15587         ei
15588         irq_enable_hazard
15589 diff --git a/arch/mips/include/asm/div64.h b/arch/mips/include/asm/div64.h
15590 index dc5ea5736440..ceece76fc971 100644
15591 --- a/arch/mips/include/asm/div64.h
15592 +++ b/arch/mips/include/asm/div64.h
15593 @@ -1,5 +1,5 @@
15594  /*
15595 - * Copyright (C) 2000, 2004  Maciej W. Rozycki
15596 + * Copyright (C) 2000, 2004, 2021  Maciej W. Rozycki
15597   * Copyright (C) 2003, 07 Ralf Baechle (ralf@linux-mips.org)
15598   *
15599   * This file is subject to the terms and conditions of the GNU General Public
15600 @@ -9,25 +9,18 @@
15601  #ifndef __ASM_DIV64_H
15602  #define __ASM_DIV64_H
15604 -#include <asm-generic/div64.h>
15606 -#if BITS_PER_LONG == 64
15607 +#include <asm/bitsperlong.h>
15609 -#include <linux/types.h>
15610 +#if BITS_PER_LONG == 32
15612  /*
15613   * No traps on overflows for any of these...
15614   */
15616 -#define __div64_32(n, base)                                            \
15617 -({                                                                     \
15618 +#define do_div64_32(res, high, low, base) ({                           \
15619         unsigned long __cf, __tmp, __tmp2, __i;                         \
15620         unsigned long __quot32, __mod32;                                \
15621 -       unsigned long __high, __low;                                    \
15622 -       unsigned long long __n;                                         \
15623                                                                         \
15624 -       __high = *__n >> 32;                                            \
15625 -       __low = __n;                                                    \
15626         __asm__(                                                        \
15627         "       .set    push                                    \n"     \
15628         "       .set    noat                                    \n"     \
15629 @@ -51,18 +44,48 @@
15630         "       subu    %0, %0, %z6                             \n"     \
15631         "       addiu   %2, %2, 1                               \n"     \
15632         "3:                                                     \n"     \
15633 -       "       bnez    %4, 0b\n\t"                                     \
15634 -       "        srl    %5, %1, 0x1f\n\t"                               \
15635 +       "       bnez    %4, 0b                                  \n"     \
15636 +       "        srl    %5, %1, 0x1f                            \n"     \
15637         "       .set    pop"                                            \
15638         : "=&r" (__mod32), "=&r" (__tmp),                               \
15639           "=&r" (__quot32), "=&r" (__cf),                               \
15640           "=&r" (__i), "=&r" (__tmp2)                                   \
15641 -       : "Jr" (base), "0" (__high), "1" (__low));                      \
15642 +       : "Jr" (base), "0" (high), "1" (low));                          \
15643                                                                         \
15644 -       (__n) = __quot32;                                               \
15645 +       (res) = __quot32;                                               \
15646         __mod32;                                                        \
15647  })
15649 -#endif /* BITS_PER_LONG == 64 */
15650 +#define __div64_32(n, base) ({                                         \
15651 +       unsigned long __upper, __low, __high, __radix;                  \
15652 +       unsigned long long __quot;                                      \
15653 +       unsigned long long __div;                                       \
15654 +       unsigned long __mod;                                            \
15655 +                                                                       \
15656 +       __div = (*n);                                                   \
15657 +       __radix = (base);                                               \
15658 +                                                                       \
15659 +       __high = __div >> 32;                                           \
15660 +       __low = __div;                                                  \
15661 +                                                                       \
15662 +       if (__high < __radix) {                                         \
15663 +               __upper = __high;                                       \
15664 +               __high = 0;                                             \
15665 +       } else {                                                        \
15666 +               __upper = __high % __radix;                             \
15667 +               __high /= __radix;                                      \
15668 +       }                                                               \
15669 +                                                                       \
15670 +       __mod = do_div64_32(__low, __upper, __low, __radix);            \
15671 +                                                                       \
15672 +       __quot = __high;                                                \
15673 +       __quot = __quot << 32 | __low;                                  \
15674 +       (*n) = __quot;                                                  \
15675 +       __mod;                                                          \
15678 +#endif /* BITS_PER_LONG == 32 */
15680 +#include <asm-generic/div64.h>
15682  #endif /* __ASM_DIV64_H */
15683 diff --git a/arch/mips/include/asm/vdso/gettimeofday.h b/arch/mips/include/asm/vdso/gettimeofday.h
15684 index 2203e2d0ae2a..44a45f3fa4b0 100644
15685 --- a/arch/mips/include/asm/vdso/gettimeofday.h
15686 +++ b/arch/mips/include/asm/vdso/gettimeofday.h
15687 @@ -20,6 +20,12 @@
15689  #define VDSO_HAS_CLOCK_GETRES          1
15691 +#if MIPS_ISA_REV < 6
15692 +#define VDSO_SYSCALL_CLOBBERS "hi", "lo",
15693 +#else
15694 +#define VDSO_SYSCALL_CLOBBERS
15695 +#endif
15697  static __always_inline long gettimeofday_fallback(
15698                                 struct __kernel_old_timeval *_tv,
15699                                 struct timezone *_tz)
15700 @@ -35,7 +41,9 @@ static __always_inline long gettimeofday_fallback(
15701         : "=r" (ret), "=r" (error)
15702         : "r" (tv), "r" (tz), "r" (nr)
15703         : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
15704 -         "$14", "$15", "$24", "$25", "hi", "lo", "memory");
15705 +         "$14", "$15", "$24", "$25",
15706 +         VDSO_SYSCALL_CLOBBERS
15707 +         "memory");
15709         return error ? -ret : ret;
15711 @@ -59,7 +67,9 @@ static __always_inline long clock_gettime_fallback(
15712         : "=r" (ret), "=r" (error)
15713         : "r" (clkid), "r" (ts), "r" (nr)
15714         : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
15715 -         "$14", "$15", "$24", "$25", "hi", "lo", "memory");
15716 +         "$14", "$15", "$24", "$25",
15717 +         VDSO_SYSCALL_CLOBBERS
15718 +         "memory");
15720         return error ? -ret : ret;
15722 @@ -83,7 +93,9 @@ static __always_inline int clock_getres_fallback(
15723         : "=r" (ret), "=r" (error)
15724         : "r" (clkid), "r" (ts), "r" (nr)
15725         : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
15726 -         "$14", "$15", "$24", "$25", "hi", "lo", "memory");
15727 +         "$14", "$15", "$24", "$25",
15728 +         VDSO_SYSCALL_CLOBBERS
15729 +         "memory");
15731         return error ? -ret : ret;
15733 @@ -105,7 +117,9 @@ static __always_inline long clock_gettime32_fallback(
15734         : "=r" (ret), "=r" (error)
15735         : "r" (clkid), "r" (ts), "r" (nr)
15736         : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
15737 -         "$14", "$15", "$24", "$25", "hi", "lo", "memory");
15738 +         "$14", "$15", "$24", "$25",
15739 +         VDSO_SYSCALL_CLOBBERS
15740 +         "memory");
15742         return error ? -ret : ret;
15744 @@ -125,7 +139,9 @@ static __always_inline int clock_getres32_fallback(
15745         : "=r" (ret), "=r" (error)
15746         : "r" (clkid), "r" (ts), "r" (nr)
15747         : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
15748 -         "$14", "$15", "$24", "$25", "hi", "lo", "memory");
15749 +         "$14", "$15", "$24", "$25",
15750 +         VDSO_SYSCALL_CLOBBERS
15751 +         "memory");
15753         return error ? -ret : ret;
15755 diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
15756 index b71892064f27..0ef240adefb5 100644
15757 --- a/arch/mips/kernel/cpu-probe.c
15758 +++ b/arch/mips/kernel/cpu-probe.c
15759 @@ -1752,7 +1752,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
15760                         set_isa(c, MIPS_CPU_ISA_M64R2);
15761                         break;
15762                 }
15763 -               c->writecombine = _CACHE_UNCACHED_ACCELERATED;
15764                 c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_EXT |
15765                                 MIPS_ASE_LOONGSON_EXT2);
15766                 break;
15767 @@ -1782,7 +1781,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
15768                  * register, we correct it here.
15769                  */
15770                 c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
15771 -               c->writecombine = _CACHE_UNCACHED_ACCELERATED;
15772                 c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
15773                         MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2);
15774                 c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */
15775 @@ -1793,7 +1791,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
15776                 set_elf_platform(cpu, "loongson3a");
15777                 set_isa(c, MIPS_CPU_ISA_M64R2);
15778                 decode_cpucfg(c);
15779 -               c->writecombine = _CACHE_UNCACHED_ACCELERATED;
15780                 break;
15781         default:
15782                 panic("Unknown Loongson Processor ID!");
15783 diff --git a/arch/mips/loongson64/init.c b/arch/mips/loongson64/init.c
15784 index cfa788bca871..1c664b23c0f9 100644
15785 --- a/arch/mips/loongson64/init.c
15786 +++ b/arch/mips/loongson64/init.c
15787 @@ -126,7 +126,7 @@ static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, resource_size_
15788                 return -ENOMEM;
15790         range->fwnode = fwnode;
15791 -       range->size = size;
15792 +       range->size = size = round_up(size, PAGE_SIZE);
15793         range->hw_start = hw_start;
15794         range->flags = LOGIC_PIO_CPU_MMIO;
15796 diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c
15797 index 39052de915f3..3a909194284a 100644
15798 --- a/arch/mips/pci/pci-legacy.c
15799 +++ b/arch/mips/pci/pci-legacy.c
15800 @@ -166,8 +166,13 @@ void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node)
15801                         res = hose->mem_resource;
15802                         break;
15803                 }
15804 -               if (res != NULL)
15805 -                       of_pci_range_to_resource(&range, node, res);
15806 +               if (res != NULL) {
15807 +                       res->name = node->full_name;
15808 +                       res->flags = range.flags;
15809 +                       res->start = range.cpu_addr;
15810 +                       res->end = range.cpu_addr + range.size - 1;
15811 +                       res->parent = res->child = res->sibling = NULL;
15812 +               }
15813         }
15816 diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c
15817 index d36061603752..e032932348d6 100644
15818 --- a/arch/mips/pci/pci-mt7620.c
15819 +++ b/arch/mips/pci/pci-mt7620.c
15820 @@ -30,6 +30,7 @@
15821  #define RALINK_GPIOMODE                        0x60
15823  #define PPLL_CFG1                      0x9c
15824 +#define PPLL_LD                                BIT(23)
15826  #define PPLL_DRV                       0xa0
15827  #define PDRV_SW_SET                    BIT(31)
15828 @@ -239,8 +240,8 @@ static int mt7620_pci_hw_init(struct platform_device *pdev)
15829         rt_sysc_m32(0, RALINK_PCIE0_CLK_EN, RALINK_CLKCFG1);
15830         mdelay(100);
15832 -       if (!(rt_sysc_r32(PPLL_CFG1) & PDRV_SW_SET)) {
15833 -               dev_err(&pdev->dev, "MT7620 PPLL unlock\n");
15834 +       if (!(rt_sysc_r32(PPLL_CFG1) & PPLL_LD)) {
15835 +               dev_err(&pdev->dev, "pcie PLL not locked, aborting init\n");
15836                 reset_control_assert(rstpcie0);
15837                 rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1);
15838                 return -1;
15839 diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c
15840 index e1f12e398136..f1538d2be89e 100644
15841 --- a/arch/mips/pci/pci-rt2880.c
15842 +++ b/arch/mips/pci/pci-rt2880.c
15843 @@ -180,7 +180,6 @@ static inline void rt2880_pci_write_u32(unsigned long reg, u32 val)
15845  int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
15847 -       u16 cmd;
15848         int irq = -1;
15850         if (dev->bus->number != 0)
15851 @@ -188,8 +187,6 @@ int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
15853         switch (PCI_SLOT(dev->devfn)) {
15854         case 0x00:
15855 -               rt2880_pci_write_u32(PCI_BASE_ADDRESS_0, 0x08000000);
15856 -               (void) rt2880_pci_read_u32(PCI_BASE_ADDRESS_0);
15857                 break;
15858         case 0x11:
15859                 irq = RT288X_CPU_IRQ_PCI;
15860 @@ -201,16 +198,6 @@ int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
15861                 break;
15862         }
15864 -       pci_write_config_byte((struct pci_dev *) dev,
15865 -               PCI_CACHE_LINE_SIZE, 0x14);
15866 -       pci_write_config_byte((struct pci_dev *) dev, PCI_LATENCY_TIMER, 0xFF);
15867 -       pci_read_config_word((struct pci_dev *) dev, PCI_COMMAND, &cmd);
15868 -       cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
15869 -               PCI_COMMAND_INVALIDATE | PCI_COMMAND_FAST_BACK |
15870 -               PCI_COMMAND_SERR | PCI_COMMAND_WAIT | PCI_COMMAND_PARITY;
15871 -       pci_write_config_word((struct pci_dev *) dev, PCI_COMMAND, cmd);
15872 -       pci_write_config_byte((struct pci_dev *) dev, PCI_INTERRUPT_LINE,
15873 -                             dev->irq);
15874         return irq;
15877 @@ -251,6 +238,30 @@ static int rt288x_pci_probe(struct platform_device *pdev)
15879  int pcibios_plat_dev_init(struct pci_dev *dev)
15881 +       static bool slot0_init;
15883 +       /*
15884 +        * Nobody seems to initialize slot 0, but this platform requires it, so
15885 +        * do it once when some other slot is being enabled. The PCI subsystem
15886 +        * should configure other slots properly, so no need to do anything
15887 +        * special for those.
15888 +        */
15889 +       if (!slot0_init && dev->bus->number == 0) {
15890 +               u16 cmd;
15891 +               u32 bar0;
15893 +               slot0_init = true;
15895 +               pci_bus_write_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0,
15896 +                                          0x08000000);
15897 +               pci_bus_read_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0,
15898 +                                         &bar0);
15900 +               pci_bus_read_config_word(dev->bus, 0, PCI_COMMAND, &cmd);
15901 +               cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
15902 +               pci_bus_write_config_word(dev->bus, 0, PCI_COMMAND, cmd);
15903 +       }
15905         return 0;
15908 diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
15909 index 386ae12d8523..57c0ab71d51e 100644
15910 --- a/arch/powerpc/Kconfig
15911 +++ b/arch/powerpc/Kconfig
15912 @@ -224,7 +224,7 @@ config PPC
15913         select HAVE_LIVEPATCH                   if HAVE_DYNAMIC_FTRACE_WITH_REGS
15914         select HAVE_MOD_ARCH_SPECIFIC
15915         select HAVE_NMI                         if PERF_EVENTS || (PPC64 && PPC_BOOK3S)
15916 -       select HAVE_HARDLOCKUP_DETECTOR_ARCH    if (PPC64 && PPC_BOOK3S)
15917 +       select HAVE_HARDLOCKUP_DETECTOR_ARCH    if PPC64 && PPC_BOOK3S && SMP
15918         select HAVE_OPTPROBES                   if PPC64
15919         select HAVE_PERF_EVENTS
15920         select HAVE_PERF_EVENTS_NMI             if PPC64
15921 diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
15922 index ae084357994e..6342f9da4545 100644
15923 --- a/arch/powerpc/Kconfig.debug
15924 +++ b/arch/powerpc/Kconfig.debug
15925 @@ -353,6 +353,7 @@ config PPC_EARLY_DEBUG_CPM_ADDR
15926  config FAIL_IOMMU
15927         bool "Fault-injection capability for IOMMU"
15928         depends on FAULT_INJECTION
15929 +       depends on PCI || IBMVIO
15930         help
15931           Provide fault-injection capability for IOMMU. Each device can
15932           be selectively enabled via the fail_iommu property.
15933 diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
15934 index 058601efbc8a..b703330459b8 100644
15935 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
15936 +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
15937 @@ -7,6 +7,7 @@
15938  #ifndef __ASSEMBLY__
15939  #include <linux/mmdebug.h>
15940  #include <linux/bug.h>
15941 +#include <linux/sizes.h>
15942  #endif
15944  /*
15945 @@ -323,7 +324,8 @@ extern unsigned long pci_io_base;
15946  #define  PHB_IO_END    (KERN_IO_START + FULL_IO_SIZE)
15947  #define IOREMAP_BASE   (PHB_IO_END)
15948  #define IOREMAP_START  (ioremap_bot)
15949 -#define IOREMAP_END    (KERN_IO_END)
15950 +#define IOREMAP_END    (KERN_IO_END - FIXADDR_SIZE)
15951 +#define FIXADDR_SIZE   SZ_32M
15953  /* Advertise special mapping type for AGP */
15954  #define HAVE_PAGE_AGP
15955 diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
15956 index c7813dc628fc..59cab558e2f0 100644
15957 --- a/arch/powerpc/include/asm/book3s/64/radix.h
15958 +++ b/arch/powerpc/include/asm/book3s/64/radix.h
15959 @@ -222,8 +222,10 @@ static inline void radix__set_pte_at(struct mm_struct *mm, unsigned long addr,
15960          * from ptesync, it should probably go into update_mmu_cache, rather
15961          * than set_pte_at (which is used to set ptes unrelated to faults).
15962          *
15963 -        * Spurious faults to vmalloc region are not tolerated, so there is
15964 -        * a ptesync in flush_cache_vmap.
15965 +        * Spurious faults from the kernel memory are not tolerated, so there
15966 +        * is a ptesync in flush_cache_vmap, and __map_kernel_page() follows
15967 +        * the pte update sequence from ISA Book III 6.10 Translation Table
15968 +        * Update Synchronization Requirements.
15969          */
15972 diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
15973 index 8d03c16a3663..947b5b9c4424 100644
15974 --- a/arch/powerpc/include/asm/fixmap.h
15975 +++ b/arch/powerpc/include/asm/fixmap.h
15976 @@ -23,12 +23,17 @@
15977  #include <asm/kmap_size.h>
15978  #endif
15980 +#ifdef CONFIG_PPC64
15981 +#define FIXADDR_TOP    (IOREMAP_END + FIXADDR_SIZE)
15982 +#else
15983 +#define FIXADDR_SIZE   0
15984  #ifdef CONFIG_KASAN
15985  #include <asm/kasan.h>
15986  #define FIXADDR_TOP    (KASAN_SHADOW_START - PAGE_SIZE)
15987  #else
15988  #define FIXADDR_TOP    ((unsigned long)(-PAGE_SIZE))
15989  #endif
15990 +#endif
15992  /*
15993   * Here we define all the compile-time 'special' virtual
15994 @@ -50,6 +55,7 @@
15995   */
15996  enum fixed_addresses {
15997         FIX_HOLE,
15998 +#ifdef CONFIG_PPC32
15999         /* reserve the top 128K for early debugging purposes */
16000         FIX_EARLY_DEBUG_TOP = FIX_HOLE,
16001         FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1,
16002 @@ -72,6 +78,7 @@ enum fixed_addresses {
16003                        FIX_IMMR_SIZE,
16004  #endif
16005         /* FIX_PCIE_MCFG, */
16006 +#endif /* CONFIG_PPC32 */
16007         __end_of_permanent_fixed_addresses,
16009  #define NR_FIX_BTMAPS          (SZ_256K / PAGE_SIZE)
16010 @@ -98,6 +105,8 @@ enum fixed_addresses {
16011  static inline void __set_fixmap(enum fixed_addresses idx,
16012                                 phys_addr_t phys, pgprot_t flags)
16014 +       BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC64) && __FIXADDR_SIZE > FIXADDR_SIZE);
16016         if (__builtin_constant_p(idx))
16017                 BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
16018         else if (WARN_ON(idx >= __end_of_fixed_addresses))
16019 diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
16020 index e8d09a841373..31ed5356590a 100644
16021 --- a/arch/powerpc/include/asm/interrupt.h
16022 +++ b/arch/powerpc/include/asm/interrupt.h
16023 @@ -138,6 +138,13 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
16024         local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
16025         local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
16027 +       if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !(regs->msr & MSR_PR) &&
16028 +                               regs->nip < (unsigned long)__end_interrupts) {
16029 +               // Kernel code running below __end_interrupts is
16030 +               // implicitly soft-masked.
16031 +               regs->softe = IRQS_ALL_DISABLED;
16032 +       }
16034         /* Don't do any per-CPU operations until interrupt state is fixed */
16035  #endif
16036         /* Allow DEC and PMI to be traced when they are soft-NMI */
16037 diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
16038 index 652ce85f9410..4bc45d3ed8b0 100644
16039 --- a/arch/powerpc/include/asm/mmu_context.h
16040 +++ b/arch/powerpc/include/asm/mmu_context.h
16041 @@ -263,7 +263,7 @@ extern void arch_exit_mmap(struct mm_struct *mm);
16042  static inline void arch_unmap(struct mm_struct *mm,
16043                               unsigned long start, unsigned long end)
16045 -       unsigned long vdso_base = (unsigned long)mm->context.vdso - PAGE_SIZE;
16046 +       unsigned long vdso_base = (unsigned long)mm->context.vdso;
16048         if (start <= vdso_base && vdso_base < end)
16049                 mm->context.vdso = NULL;
16050 diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
16051 index 6cb8aa357191..57cd3892bfe0 100644
16052 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h
16053 +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
16054 @@ -6,6 +6,8 @@
16055   * the ppc64 non-hashed page table.
16056   */
16058 +#include <linux/sizes.h>
16060  #include <asm/nohash/64/pgtable-4k.h>
16061  #include <asm/barrier.h>
16062  #include <asm/asm-const.h>
16063 @@ -54,7 +56,8 @@
16064  #define  PHB_IO_END    (KERN_IO_START + FULL_IO_SIZE)
16065  #define IOREMAP_BASE   (PHB_IO_END)
16066  #define IOREMAP_START  (ioremap_bot)
16067 -#define IOREMAP_END    (KERN_VIRT_START + KERN_VIRT_SIZE)
16068 +#define IOREMAP_END    (KERN_VIRT_START + KERN_VIRT_SIZE - FIXADDR_SIZE)
16069 +#define FIXADDR_SIZE   SZ_32M
16072  /*
16073 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
16074 index da103e92c112..37d0b8c76a59 100644
16075 --- a/arch/powerpc/include/asm/reg.h
16076 +++ b/arch/powerpc/include/asm/reg.h
16077 @@ -441,6 +441,7 @@
16078  #define   LPCR_VRMA_LP1                ASM_CONST(0x0000800000000000)
16079  #define   LPCR_RMLS            0x1C000000      /* Implementation dependent RMO limit sel */
16080  #define   LPCR_RMLS_SH         26
16081 +#define   LPCR_HAIL            ASM_CONST(0x0000000004000000)   /* HV AIL (ISAv3.1) */
16082  #define   LPCR_ILE             ASM_CONST(0x0000000002000000)   /* !HV irqs set MSR:LE */
16083  #define   LPCR_AIL             ASM_CONST(0x0000000001800000)   /* Alternate interrupt location */
16084  #define   LPCR_AIL_0           ASM_CONST(0x0000000000000000)   /* MMU off exception offset 0x0 */
16085 diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
16086 index 7a13bc20f0a0..47081a9e13ca 100644
16087 --- a/arch/powerpc/include/asm/smp.h
16088 +++ b/arch/powerpc/include/asm/smp.h
16089 @@ -121,6 +121,11 @@ static inline struct cpumask *cpu_sibling_mask(int cpu)
16090         return per_cpu(cpu_sibling_map, cpu);
16093 +static inline struct cpumask *cpu_core_mask(int cpu)
16095 +       return per_cpu(cpu_core_map, cpu);
16098  static inline struct cpumask *cpu_l2_cache_mask(int cpu)
16100         return per_cpu(cpu_l2_cache_map, cpu);
16101 diff --git a/arch/powerpc/include/uapi/asm/errno.h b/arch/powerpc/include/uapi/asm/errno.h
16102 index cc79856896a1..4ba87de32be0 100644
16103 --- a/arch/powerpc/include/uapi/asm/errno.h
16104 +++ b/arch/powerpc/include/uapi/asm/errno.h
16105 @@ -2,6 +2,7 @@
16106  #ifndef _ASM_POWERPC_ERRNO_H
16107  #define _ASM_POWERPC_ERRNO_H
16109 +#undef EDEADLOCK
16110  #include <asm-generic/errno.h>
16112  #undef EDEADLOCK
16113 diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
16114 index cd60bc1c8701..7040e430a124 100644
16115 --- a/arch/powerpc/kernel/eeh.c
16116 +++ b/arch/powerpc/kernel/eeh.c
16117 @@ -362,14 +362,11 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
16118         pa = pte_pfn(*ptep);
16120         /* On radix we can do hugepage mappings for io, so handle that */
16121 -       if (hugepage_shift) {
16122 -               pa <<= hugepage_shift;
16123 -               pa |= token & ((1ul << hugepage_shift) - 1);
16124 -       } else {
16125 -               pa <<= PAGE_SHIFT;
16126 -               pa |= token & (PAGE_SIZE - 1);
16127 -       }
16128 +       if (!hugepage_shift)
16129 +               hugepage_shift = PAGE_SHIFT;
16131 +       pa <<= PAGE_SHIFT;
16132 +       pa |= token & ((1ul << hugepage_shift) - 1);
16133         return pa;
16136 diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
16137 index 8482739d42f3..eddf362caedc 100644
16138 --- a/arch/powerpc/kernel/fadump.c
16139 +++ b/arch/powerpc/kernel/fadump.c
16140 @@ -292,7 +292,7 @@ static void fadump_show_config(void)
16141   * that is required for a kernel to boot successfully.
16142   *
16143   */
16144 -static inline u64 fadump_calculate_reserve_size(void)
16145 +static __init u64 fadump_calculate_reserve_size(void)
16147         u64 base, size, bootmem_min;
16148         int ret;
16149 diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h
16150 index 5d4706c14572..cf8ca08295bf 100644
16151 --- a/arch/powerpc/kernel/head_32.h
16152 +++ b/arch/powerpc/kernel/head_32.h
16153 @@ -261,11 +261,7 @@
16154         lis     r1, emergency_ctx@ha
16155  #endif
16156         lwz     r1, emergency_ctx@l(r1)
16157 -       cmpwi   cr1, r1, 0
16158 -       bne     cr1, 1f
16159 -       lis     r1, init_thread_union@ha
16160 -       addi    r1, r1, init_thread_union@l
16161 -1:     addi    r1, r1, THREAD_SIZE - INT_FRAME_SIZE
16162 +       addi    r1, r1, THREAD_SIZE - INT_FRAME_SIZE
16163         EXCEPTION_PROLOG_2
16164         SAVE_NVGPRS(r11)
16165         addi    r3, r1, STACK_FRAME_OVERHEAD
16166 diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
16167 index c475a229a42a..352346e14a08 100644
16168 --- a/arch/powerpc/kernel/interrupt.c
16169 +++ b/arch/powerpc/kernel/interrupt.c
16170 @@ -34,11 +34,11 @@ notrace long system_call_exception(long r3, long r4, long r5,
16171         if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
16172                 BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
16174 +       trace_hardirqs_off(); /* finish reconciling */
16176         CT_WARN_ON(ct_state() == CONTEXT_KERNEL);
16177         user_exit_irqoff();
16179 -       trace_hardirqs_off(); /* finish reconciling */
16181         if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x))
16182                 BUG_ON(!(regs->msr & MSR_RI));
16183         BUG_ON(!(regs->msr & MSR_PR));
16184 diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
16185 index c00214a4355c..4023f91defa6 100644
16186 --- a/arch/powerpc/kernel/iommu.c
16187 +++ b/arch/powerpc/kernel/iommu.c
16188 @@ -1096,7 +1096,7 @@ int iommu_take_ownership(struct iommu_table *tbl)
16190         spin_lock_irqsave(&tbl->large_pool.lock, flags);
16191         for (i = 0; i < tbl->nr_pools; i++)
16192 -               spin_lock(&tbl->pools[i].lock);
16193 +               spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
16195         iommu_table_release_pages(tbl);
16197 @@ -1124,7 +1124,7 @@ void iommu_release_ownership(struct iommu_table *tbl)
16199         spin_lock_irqsave(&tbl->large_pool.lock, flags);
16200         for (i = 0; i < tbl->nr_pools; i++)
16201 -               spin_lock(&tbl->pools[i].lock);
16202 +               spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
16204         memset(tbl->it_map, 0, sz);
16206 diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
16207 index 9a4797d1d40d..a8b2d6bfc1ca 100644
16208 --- a/arch/powerpc/kernel/prom.c
16209 +++ b/arch/powerpc/kernel/prom.c
16210 @@ -267,7 +267,7 @@ static struct feature_property {
16211  };
16213  #if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
16214 -static inline void identical_pvr_fixup(unsigned long node)
16215 +static __init void identical_pvr_fixup(unsigned long node)
16217         unsigned int pvr;
16218         const char *model = of_get_flat_dt_prop(node, "model", NULL);
16219 diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
16220 index 8ba49a6bf515..d7c1f92152af 100644
16221 --- a/arch/powerpc/kernel/setup_32.c
16222 +++ b/arch/powerpc/kernel/setup_32.c
16223 @@ -164,7 +164,7 @@ void __init irqstack_early_init(void)
16226  #ifdef CONFIG_VMAP_STACK
16227 -void *emergency_ctx[NR_CPUS] __ro_after_init;
16228 +void *emergency_ctx[NR_CPUS] __ro_after_init = {[0] = &init_stack};
16230  void __init emergency_stack_init(void)
16232 diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
16233 index 560ed8b975e7..830fee91b2d9 100644
16234 --- a/arch/powerpc/kernel/setup_64.c
16235 +++ b/arch/powerpc/kernel/setup_64.c
16236 @@ -232,10 +232,23 @@ static void cpu_ready_for_interrupts(void)
16237          * If we are not in hypervisor mode the job is done once for
16238          * the whole partition in configure_exceptions().
16239          */
16240 -       if (cpu_has_feature(CPU_FTR_HVMODE) &&
16241 -           cpu_has_feature(CPU_FTR_ARCH_207S)) {
16242 +       if (cpu_has_feature(CPU_FTR_HVMODE)) {
16243                 unsigned long lpcr = mfspr(SPRN_LPCR);
16244 -               mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
16245 +               unsigned long new_lpcr = lpcr;
16247 +               if (cpu_has_feature(CPU_FTR_ARCH_31)) {
16248 +                       /* P10 DD1 does not have HAIL */
16249 +                       if (pvr_version_is(PVR_POWER10) &&
16250 +                                       (mfspr(SPRN_PVR) & 0xf00) == 0x100)
16251 +                               new_lpcr |= LPCR_AIL_3;
16252 +                       else
16253 +                               new_lpcr |= LPCR_HAIL;
16254 +               } else if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
16255 +                       new_lpcr |= LPCR_AIL_3;
16256 +               }
16258 +               if (new_lpcr != lpcr)
16259 +                       mtspr(SPRN_LPCR, new_lpcr);
16260         }
16262         /*
16263 diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
16264 index 5a4d59a1070d..c2473e20f5f5 100644
16265 --- a/arch/powerpc/kernel/smp.c
16266 +++ b/arch/powerpc/kernel/smp.c
16267 @@ -1057,17 +1057,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
16268                                 local_memory_node(numa_cpu_lookup_table[cpu]));
16269                 }
16270  #endif
16271 -               /*
16272 -                * cpu_core_map is now more updated and exists only since
16273 -                * its been exported for long. It only will have a snapshot
16274 -                * of cpu_cpu_mask.
16275 -                */
16276 -               cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu));
16277         }
16279         /* Init the cpumasks so the boot CPU is related to itself */
16280         cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
16281         cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
16282 +       cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
16284         if (has_coregroup_support())
16285                 cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid));
16286 @@ -1408,6 +1403,9 @@ static void remove_cpu_from_masks(int cpu)
16287                         set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
16288         }
16290 +       for_each_cpu(i, cpu_core_mask(cpu))
16291 +               set_cpus_unrelated(cpu, i, cpu_core_mask);
16293         if (has_coregroup_support()) {
16294                 for_each_cpu(i, cpu_coregroup_mask(cpu))
16295                         set_cpus_unrelated(cpu, i, cpu_coregroup_mask);
16296 @@ -1468,8 +1466,11 @@ static void update_coregroup_mask(int cpu, cpumask_var_t *mask)
16298  static void add_cpu_to_masks(int cpu)
16300 +       struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
16301         int first_thread = cpu_first_thread_sibling(cpu);
16302 +       int chip_id = cpu_to_chip_id(cpu);
16303         cpumask_var_t mask;
16304 +       bool ret;
16305         int i;
16307         /*
16308 @@ -1485,12 +1486,36 @@ static void add_cpu_to_masks(int cpu)
16309         add_cpu_to_smallcore_masks(cpu);
16311         /* In CPU-hotplug path, hence use GFP_ATOMIC */
16312 -       alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
16313 +       ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
16314         update_mask_by_l2(cpu, &mask);
16316         if (has_coregroup_support())
16317                 update_coregroup_mask(cpu, &mask);
16319 +       if (chip_id == -1 || !ret) {
16320 +               cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu));
16321 +               goto out;
16322 +       }
16324 +       if (shared_caches)
16325 +               submask_fn = cpu_l2_cache_mask;
16327 +       /* Update core_mask with all the CPUs that are part of submask */
16328 +       or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask);
16330 +       /* Skip all CPUs already part of current CPU core mask */
16331 +       cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu));
16333 +       for_each_cpu(i, mask) {
16334 +               if (chip_id == cpu_to_chip_id(i)) {
16335 +                       or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask);
16336 +                       cpumask_andnot(mask, mask, submask_fn(i));
16337 +               } else {
16338 +                       cpumask_andnot(mask, mask, cpu_core_mask(i));
16339 +               }
16340 +       }
16342 +out:
16343         free_cpumask_var(mask);
16346 @@ -1521,6 +1546,9 @@ void start_secondary(void *unused)
16348         vdso_getcpu_init();
16349  #endif
16350 +       set_numa_node(numa_cpu_lookup_table[cpu]);
16351 +       set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
16353         /* Update topology CPU masks */
16354         add_cpu_to_masks(cpu);
16356 @@ -1539,9 +1567,6 @@ void start_secondary(void *unused)
16357                         shared_caches = true;
16358         }
16360 -       set_numa_node(numa_cpu_lookup_table[cpu]);
16361 -       set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
16363         smp_wmb();
16364         notify_cpu_starting(cpu);
16365         set_cpu_online(cpu, true);
16366 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
16367 index e839a906fdf2..b14907209822 100644
16368 --- a/arch/powerpc/kernel/vdso.c
16369 +++ b/arch/powerpc/kernel/vdso.c
16370 @@ -55,10 +55,10 @@ static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struc
16372         unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
16374 -       if (new_size != text_size + PAGE_SIZE)
16375 +       if (new_size != text_size)
16376                 return -EINVAL;
16378 -       current->mm->context.vdso = (void __user *)new_vma->vm_start + PAGE_SIZE;
16379 +       current->mm->context.vdso = (void __user *)new_vma->vm_start;
16381         return 0;
16383 @@ -73,6 +73,10 @@ static int vdso64_mremap(const struct vm_special_mapping *sm, struct vm_area_str
16384         return vdso_mremap(sm, new_vma, &vdso64_end - &vdso64_start);
16387 +static struct vm_special_mapping vvar_spec __ro_after_init = {
16388 +       .name = "[vvar]",
16391  static struct vm_special_mapping vdso32_spec __ro_after_init = {
16392         .name = "[vdso]",
16393         .mremap = vdso32_mremap,
16394 @@ -89,11 +93,11 @@ static struct vm_special_mapping vdso64_spec __ro_after_init = {
16395   */
16396  static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
16398 -       struct mm_struct *mm = current->mm;
16399 +       unsigned long vdso_size, vdso_base, mappings_size;
16400         struct vm_special_mapping *vdso_spec;
16401 +       unsigned long vvar_size = PAGE_SIZE;
16402 +       struct mm_struct *mm = current->mm;
16403         struct vm_area_struct *vma;
16404 -       unsigned long vdso_size;
16405 -       unsigned long vdso_base;
16407         if (is_32bit_task()) {
16408                 vdso_spec = &vdso32_spec;
16409 @@ -110,8 +114,8 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
16410                 vdso_base = 0;
16411         }
16413 -       /* Add a page to the vdso size for the data page */
16414 -       vdso_size += PAGE_SIZE;
16415 +       mappings_size = vdso_size + vvar_size;
16416 +       mappings_size += (VDSO_ALIGNMENT - 1) & PAGE_MASK;
16418         /*
16419          * pick a base address for the vDSO in process space. We try to put it
16420 @@ -119,9 +123,7 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
16421          * and end up putting it elsewhere.
16422          * Add enough to the size so that the result can be aligned.
16423          */
16424 -       vdso_base = get_unmapped_area(NULL, vdso_base,
16425 -                                     vdso_size + ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
16426 -                                     0, 0);
16427 +       vdso_base = get_unmapped_area(NULL, vdso_base, mappings_size, 0, 0);
16428         if (IS_ERR_VALUE(vdso_base))
16429                 return vdso_base;
16431 @@ -133,7 +135,13 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
16432          * install_special_mapping or the perf counter mmap tracking code
16433          * will fail to recognise it as a vDSO.
16434          */
16435 -       mm->context.vdso = (void __user *)vdso_base + PAGE_SIZE;
16436 +       mm->context.vdso = (void __user *)vdso_base + vvar_size;
16438 +       vma = _install_special_mapping(mm, vdso_base, vvar_size,
16439 +                                      VM_READ | VM_MAYREAD | VM_IO |
16440 +                                      VM_DONTDUMP | VM_PFNMAP, &vvar_spec);
16441 +       if (IS_ERR(vma))
16442 +               return PTR_ERR(vma);
16444         /*
16445          * our vma flags don't have VM_WRITE so by default, the process isn't
16446 @@ -145,9 +153,12 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
16447          * It's fine to use that for setting breakpoints in the vDSO code
16448          * pages though.
16449          */
16450 -       vma = _install_special_mapping(mm, vdso_base, vdso_size,
16451 +       vma = _install_special_mapping(mm, vdso_base + vvar_size, vdso_size,
16452                                        VM_READ | VM_EXEC | VM_MAYREAD |
16453                                        VM_MAYWRITE | VM_MAYEXEC, vdso_spec);
16454 +       if (IS_ERR(vma))
16455 +               do_munmap(mm, vdso_base, vvar_size, NULL);
16457         return PTR_ERR_OR_ZERO(vma);
16460 @@ -249,11 +260,22 @@ static struct page ** __init vdso_setup_pages(void *start, void *end)
16461         if (!pagelist)
16462                 panic("%s: Cannot allocate page list for VDSO", __func__);
16464 -       pagelist[0] = virt_to_page(vdso_data);
16466         for (i = 0; i < pages; i++)
16467 -               pagelist[i + 1] = virt_to_page(start + i * PAGE_SIZE);
16468 +               pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
16470 +       return pagelist;
16473 +static struct page ** __init vvar_setup_pages(void)
16475 +       struct page **pagelist;
16477 +       /* .pages is NULL-terminated */
16478 +       pagelist = kcalloc(2, sizeof(struct page *), GFP_KERNEL);
16479 +       if (!pagelist)
16480 +               panic("%s: Cannot allocate page list for VVAR", __func__);
16482 +       pagelist[0] = virt_to_page(vdso_data);
16483         return pagelist;
16486 @@ -295,6 +317,8 @@ static int __init vdso_init(void)
16487         if (IS_ENABLED(CONFIG_PPC64))
16488                 vdso64_spec.pages = vdso_setup_pages(&vdso64_start, &vdso64_end);
16490 +       vvar_spec.pages = vvar_setup_pages();
16492         smp_wmb();
16494         return 0;
16495 diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c
16496 index 02b9e4d0dc40..a8a7cb71086b 100644
16497 --- a/arch/powerpc/kexec/file_load_64.c
16498 +++ b/arch/powerpc/kexec/file_load_64.c
16499 @@ -960,6 +960,93 @@ unsigned int kexec_fdt_totalsize_ppc64(struct kimage *image)
16500         return fdt_size;
16503 +/**
16504 + * add_node_props - Reads node properties from device node structure and add
16505 + *                  them to fdt.
16506 + * @fdt:            Flattened device tree of the kernel
16507 + * @node_offset:    offset of the node to add a property at
16508 + * @dn:             device node pointer
16509 + *
16510 + * Returns 0 on success, negative errno on error.
16511 + */
16512 +static int add_node_props(void *fdt, int node_offset, const struct device_node *dn)
16514 +       int ret = 0;
16515 +       struct property *pp;
16517 +       if (!dn)
16518 +               return -EINVAL;
16520 +       for_each_property_of_node(dn, pp) {
16521 +               ret = fdt_setprop(fdt, node_offset, pp->name, pp->value, pp->length);
16522 +               if (ret < 0) {
16523 +                       pr_err("Unable to add %s property: %s\n", pp->name, fdt_strerror(ret));
16524 +                       return ret;
16525 +               }
16526 +       }
16527 +       return ret;
16530 +/**
16531 + * update_cpus_node - Update cpus node of flattened device tree using of_root
16532 + *                    device node.
16533 + * @fdt:              Flattened device tree of the kernel.
16534 + *
16535 + * Returns 0 on success, negative errno on error.
16536 + */
16537 +static int update_cpus_node(void *fdt)
16539 +       struct device_node *cpus_node, *dn;
16540 +       int cpus_offset, cpus_subnode_offset, ret = 0;
16542 +       cpus_offset = fdt_path_offset(fdt, "/cpus");
16543 +       if (cpus_offset < 0 && cpus_offset != -FDT_ERR_NOTFOUND) {
16544 +               pr_err("Malformed device tree: error reading /cpus node: %s\n",
16545 +                      fdt_strerror(cpus_offset));
16546 +               return cpus_offset;
16547 +       }
16549 +       if (cpus_offset > 0) {
16550 +               ret = fdt_del_node(fdt, cpus_offset);
16551 +               if (ret < 0) {
16552 +                       pr_err("Error deleting /cpus node: %s\n", fdt_strerror(ret));
16553 +                       return -EINVAL;
16554 +               }
16555 +       }
16557 +       /* Add cpus node to fdt */
16558 +       cpus_offset = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"), "cpus");
16559 +       if (cpus_offset < 0) {
16560 +               pr_err("Error creating /cpus node: %s\n", fdt_strerror(cpus_offset));
16561 +               return -EINVAL;
16562 +       }
16564 +       /* Add cpus node properties */
16565 +       cpus_node = of_find_node_by_path("/cpus");
16566 +       ret = add_node_props(fdt, cpus_offset, cpus_node);
16567 +       of_node_put(cpus_node);
16568 +       if (ret < 0)
16569 +               return ret;
16571 +       /* Loop through all subnodes of cpus and add them to fdt */
16572 +       for_each_node_by_type(dn, "cpu") {
16573 +               cpus_subnode_offset = fdt_add_subnode(fdt, cpus_offset, dn->full_name);
16574 +               if (cpus_subnode_offset < 0) {
16575 +                       pr_err("Unable to add %s subnode: %s\n", dn->full_name,
16576 +                              fdt_strerror(cpus_subnode_offset));
16577 +                       ret = cpus_subnode_offset;
16578 +                       goto out;
16579 +               }
16581 +               ret = add_node_props(fdt, cpus_subnode_offset, dn);
16582 +               if (ret < 0)
16583 +                       goto out;
16584 +       }
16585 +out:
16586 +       of_node_put(dn);
16587 +       return ret;
16590  /**
16591   * setup_new_fdt_ppc64 - Update the flattend device-tree of the kernel
16592   *                       being loaded.
16593 @@ -1020,6 +1107,11 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
16594                 }
16595         }
16597 +       /* Update cpus nodes information to account hotplug CPUs. */
16598 +       ret =  update_cpus_node(fdt);
16599 +       if (ret < 0)
16600 +               goto out;
16602         /* Update memory reserve map */
16603         ret = get_reserved_memory_ranges(&rmem);
16604         if (ret)
16605 diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
16606 index e452158a18d7..c3e31fef0be1 100644
16607 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c
16608 +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
16609 @@ -8,6 +8,7 @@
16610   */
16612  #include <linux/kvm_host.h>
16613 +#include <linux/pkeys.h>
16615  #include <asm/kvm_ppc.h>
16616  #include <asm/kvm_book3s.h>
16617 @@ -133,6 +134,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
16618         else
16619                 kvmppc_mmu_flush_icache(pfn);
16621 +       rflags |= pte_to_hpte_pkey_bits(0, HPTE_USE_KERNEL_KEY);
16622         rflags = (rflags & ~HPTE_R_WIMG) | orig_pte->wimg;
16624         /*
16625 diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
16626 index 13bad6bf4c95..208a053c9adf 100644
16627 --- a/arch/powerpc/kvm/book3s_hv.c
16628 +++ b/arch/powerpc/kvm/book3s_hv.c
16629 @@ -3728,7 +3728,10 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
16630         vcpu->arch.dec_expires = dec + tb;
16631         vcpu->cpu = -1;
16632         vcpu->arch.thread_cpu = -1;
16633 +       /* Save guest CTRL register, set runlatch to 1 */
16634         vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
16635 +       if (!(vcpu->arch.ctrl & 1))
16636 +               mtspr(SPRN_CTRLT, vcpu->arch.ctrl | 1);
16638         vcpu->arch.iamr = mfspr(SPRN_IAMR);
16639         vcpu->arch.pspb = mfspr(SPRN_PSPB);
16640 diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
16641 index d4efc182662a..248f7c9e36fc 100644
16642 --- a/arch/powerpc/lib/Makefile
16643 +++ b/arch/powerpc/lib/Makefile
16644 @@ -5,6 +5,9 @@
16646  ccflags-$(CONFIG_PPC64)        := $(NO_MINIMAL_TOC)
16648 +CFLAGS_code-patching.o += -fno-stack-protector
16649 +CFLAGS_feature-fixups.o += -fno-stack-protector
16651  CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE)
16652  CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE)
16654 diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
16655 index 1fd31b4b0e13..0aefa6a4a259 100644
16656 --- a/arch/powerpc/lib/feature-fixups.c
16657 +++ b/arch/powerpc/lib/feature-fixups.c
16658 @@ -14,6 +14,7 @@
16659  #include <linux/string.h>
16660  #include <linux/init.h>
16661  #include <linux/sched/mm.h>
16662 +#include <linux/stop_machine.h>
16663  #include <asm/cputable.h>
16664  #include <asm/code-patching.h>
16665  #include <asm/page.h>
16666 @@ -227,11 +228,25 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
16667                                                            : "unknown");
16670 +static int __do_stf_barrier_fixups(void *data)
16672 +       enum stf_barrier_type *types = data;
16674 +       do_stf_entry_barrier_fixups(*types);
16675 +       do_stf_exit_barrier_fixups(*types);
16677 +       return 0;
16680  void do_stf_barrier_fixups(enum stf_barrier_type types)
16682 -       do_stf_entry_barrier_fixups(types);
16683 -       do_stf_exit_barrier_fixups(types);
16684 +       /*
16685 +        * The call to the fallback entry flush, and the fallback/sync-ori exit
16686 +        * flush can not be safely patched in/out while other CPUs are executing
16687 +        * them. So call __do_stf_barrier_fixups() on one CPU while all other CPUs
16688 +        * spin in the stop machine core with interrupts hard disabled.
16689 +        */
16690 +       stop_machine(__do_stf_barrier_fixups, &types, NULL);
16693  void do_uaccess_flush_fixups(enum l1d_flush_type types)
16694 @@ -284,8 +299,9 @@ void do_uaccess_flush_fixups(enum l1d_flush_type types)
16695                                                 : "unknown");
16698 -void do_entry_flush_fixups(enum l1d_flush_type types)
16699 +static int __do_entry_flush_fixups(void *data)
16701 +       enum l1d_flush_type types = *(enum l1d_flush_type *)data;
16702         unsigned int instrs[3], *dest;
16703         long *start, *end;
16704         int i;
16705 @@ -354,6 +370,19 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
16706                                                         : "ori type" :
16707                 (types &  L1D_FLUSH_MTTRIG)     ? "mttrig type"
16708                                                 : "unknown");
16710 +       return 0;
16713 +void do_entry_flush_fixups(enum l1d_flush_type types)
16715 +       /*
16716 +        * The call to the fallback flush can not be safely patched in/out while
16717 +        * other CPUs are executing it. So call __do_entry_flush_fixups() on one
16718 +        * CPU while all other CPUs spin in the stop machine core with interrupts
16719 +        * hard disabled.
16720 +        */
16721 +       stop_machine(__do_entry_flush_fixups, &types, NULL);
16724  void do_rfi_flush_fixups(enum l1d_flush_type types)
16725 diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c
16726 index 567e0c6b3978..03819c259f0a 100644
16727 --- a/arch/powerpc/mm/book3s64/hash_pgtable.c
16728 +++ b/arch/powerpc/mm/book3s64/hash_pgtable.c
16729 @@ -428,12 +428,14 @@ static bool hash__change_memory_range(unsigned long start, unsigned long end,
16731  void hash__mark_rodata_ro(void)
16733 -       unsigned long start, end;
16734 +       unsigned long start, end, pp;
16736         start = (unsigned long)_stext;
16737         end = (unsigned long)__init_begin;
16739 -       WARN_ON(!hash__change_memory_range(start, end, PP_RXXX));
16740 +       pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL_ROX), HPTE_USE_KERNEL_KEY);
16742 +       WARN_ON(!hash__change_memory_range(start, end, pp));
16745  void hash__mark_initmem_nx(void)
16746 diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
16747 index 581b20a2feaf..12de1906e97b 100644
16748 --- a/arch/powerpc/mm/book3s64/hash_utils.c
16749 +++ b/arch/powerpc/mm/book3s64/hash_utils.c
16750 @@ -338,7 +338,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
16751  int htab_remove_mapping(unsigned long vstart, unsigned long vend,
16752                       int psize, int ssize)
16754 -       unsigned long vaddr;
16755 +       unsigned long vaddr, time_limit;
16756         unsigned int step, shift;
16757         int rc;
16758         int ret = 0;
16759 @@ -351,8 +351,19 @@ int htab_remove_mapping(unsigned long vstart, unsigned long vend,
16761         /* Unmap the full range specificied */
16762         vaddr = ALIGN_DOWN(vstart, step);
16763 +       time_limit = jiffies + HZ;
16765         for (;vaddr < vend; vaddr += step) {
16766                 rc = mmu_hash_ops.hpte_removebolted(vaddr, psize, ssize);
16768 +               /*
16769 +                * For large number of mappings introduce a cond_resched()
16770 +                * to prevent softlockup warnings.
16771 +                */
16772 +               if (time_after(jiffies, time_limit)) {
16773 +                       cond_resched();
16774 +                       time_limit = jiffies + HZ;
16775 +               }
16776                 if (rc == -ENOENT) {
16777                         ret = -ENOENT;
16778                         continue;
16779 @@ -1545,10 +1556,10 @@ DEFINE_INTERRUPT_HANDLER_RET(__do_hash_fault)
16780         if (user_mode(regs) || (region_id == USER_REGION_ID))
16781                 access &= ~_PAGE_PRIVILEGED;
16783 -       if (regs->trap == 0x400)
16784 +       if (TRAP(regs) == 0x400)
16785                 access |= _PAGE_EXEC;
16787 -       err = hash_page_mm(mm, ea, access, regs->trap, flags);
16788 +       err = hash_page_mm(mm, ea, access, TRAP(regs), flags);
16789         if (unlikely(err < 0)) {
16790                 // failed to instert a hash PTE due to an hypervisor error
16791                 if (user_mode(regs)) {
16792 diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
16793 index 98f0b243c1ab..39d488a212a0 100644
16794 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c
16795 +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
16796 @@ -108,7 +108,7 @@ static int early_map_kernel_page(unsigned long ea, unsigned long pa,
16798  set_the_pte:
16799         set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
16800 -       smp_wmb();
16801 +       asm volatile("ptesync": : :"memory");
16802         return 0;
16805 @@ -168,7 +168,7 @@ static int __map_kernel_page(unsigned long ea, unsigned long pa,
16807  set_the_pte:
16808         set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
16809 -       smp_wmb();
16810 +       asm volatile("ptesync": : :"memory");
16811         return 0;
16814 diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
16815 index 4e8ce6d85232..7a59a5c9aa5d 100644
16816 --- a/arch/powerpc/mm/mem.c
16817 +++ b/arch/powerpc/mm/mem.c
16818 @@ -54,7 +54,6 @@
16820  #include <mm/mmu_decl.h>
16822 -static DEFINE_MUTEX(linear_mapping_mutex);
16823  unsigned long long memory_limit;
16824  bool init_mem_is_free;
16826 @@ -72,6 +71,7 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
16827  EXPORT_SYMBOL(phys_mem_access_prot);
16829  #ifdef CONFIG_MEMORY_HOTPLUG
16830 +static DEFINE_MUTEX(linear_mapping_mutex);
16832  #ifdef CONFIG_NUMA
16833  int memory_add_physaddr_to_nid(u64 start)
16834 diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
16835 index e4f577da33d8..8b5eeb6fb2fb 100644
16836 --- a/arch/powerpc/perf/isa207-common.c
16837 +++ b/arch/powerpc/perf/isa207-common.c
16838 @@ -447,8 +447,8 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp,
16839          * EBB events are pinned & exclusive, so this should never actually
16840          * hit, but we leave it as a fallback in case.
16841          */
16842 -       mask  |= CNST_EBB_VAL(ebb);
16843 -       value |= CNST_EBB_MASK;
16844 +       mask  |= CNST_EBB_MASK;
16845 +       value |= CNST_EBB_VAL(ebb);
16847         *maskp = mask;
16848         *valp = value;
16849 diff --git a/arch/powerpc/perf/power10-events-list.h b/arch/powerpc/perf/power10-events-list.h
16850 index e45dafe818ed..93be7197d250 100644
16851 --- a/arch/powerpc/perf/power10-events-list.h
16852 +++ b/arch/powerpc/perf/power10-events-list.h
16853 @@ -75,5 +75,5 @@ EVENT(PM_RUN_INST_CMPL_ALT,                   0x00002);
16854   *     thresh end (TE)
16855   */
16857 -EVENT(MEM_LOADS,                               0x34340401e0);
16858 -EVENT(MEM_STORES,                              0x343c0401e0);
16859 +EVENT(MEM_LOADS,                               0x35340401e0);
16860 +EVENT(MEM_STORES,                              0x353c0401e0);
16861 diff --git a/arch/powerpc/platforms/52xx/lite5200_sleep.S b/arch/powerpc/platforms/52xx/lite5200_sleep.S
16862 index 11475c58ea43..afee8b1515a8 100644
16863 --- a/arch/powerpc/platforms/52xx/lite5200_sleep.S
16864 +++ b/arch/powerpc/platforms/52xx/lite5200_sleep.S
16865 @@ -181,7 +181,7 @@ sram_code:
16866    udelay: /* r11 - tb_ticks_per_usec, r12 - usecs, overwrites r13 */
16867         mullw   r12, r12, r11
16868         mftb    r13     /* start */
16869 -       addi    r12, r13, r12 /* end */
16870 +       add     r12, r13, r12 /* end */
16871      1:
16872         mftb    r13     /* current */
16873         cmp     cr0, r13, r12
16874 diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
16875 index 019669eb21d2..4ab7c3ef5826 100644
16876 --- a/arch/powerpc/platforms/powernv/memtrace.c
16877 +++ b/arch/powerpc/platforms/powernv/memtrace.c
16878 @@ -88,8 +88,8 @@ static void memtrace_clear_range(unsigned long start_pfn,
16879          * Before we go ahead and use this range as cache inhibited range
16880          * flush the cache.
16881          */
16882 -       flush_dcache_range_chunked(PFN_PHYS(start_pfn),
16883 -                                  PFN_PHYS(start_pfn + nr_pages),
16884 +       flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn),
16885 +                                  (unsigned long)pfn_to_kaddr(start_pfn + nr_pages),
16886                                    FLUSH_CHUNK_SIZE);
16889 diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
16890 index 12cbffd3c2e3..325f3b220f36 100644
16891 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
16892 +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
16893 @@ -47,9 +47,6 @@ static void rtas_stop_self(void)
16895         BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
16897 -       printk("cpu %u (hwid %u) Ready to die...\n",
16898 -              smp_processor_id(), hard_smp_processor_id());
16900         rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL);
16902         panic("Alas, I survived.\n");
16903 diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
16904 index 9fc5217f0c8e..836cbbe0ecc5 100644
16905 --- a/arch/powerpc/platforms/pseries/iommu.c
16906 +++ b/arch/powerpc/platforms/pseries/iommu.c
16907 @@ -1229,7 +1229,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
16908         if (pmem_present) {
16909                 if (query.largest_available_block >=
16910                     (1ULL << (MAX_PHYSMEM_BITS - page_shift)))
16911 -                       len = MAX_PHYSMEM_BITS - page_shift;
16912 +                       len = MAX_PHYSMEM_BITS;
16913                 else
16914                         dev_info(&dev->dev, "Skipping ibm,pmemory");
16915         }
16916 diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
16917 index 3805519a6469..cd38bd421f38 100644
16918 --- a/arch/powerpc/platforms/pseries/lpar.c
16919 +++ b/arch/powerpc/platforms/pseries/lpar.c
16920 @@ -977,11 +977,13 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
16921         slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
16922         BUG_ON(slot == -1);
16924 -       flags = newpp & 7;
16925 +       flags = newpp & (HPTE_R_PP | HPTE_R_N);
16926         if (mmu_has_feature(MMU_FTR_KERNEL_RO))
16927                 /* Move pp0 into bit 8 (IBM 55) */
16928                 flags |= (newpp & HPTE_R_PP0) >> 55;
16930 +       flags |= ((newpp & HPTE_R_KEY_HI) >> 48) | (newpp & HPTE_R_KEY_LO);
16932         lpar_rc = plpar_pte_protect(flags, slot, 0);
16934         BUG_ON(lpar_rc != H_SUCCESS);
16935 diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
16936 index f9ae17e8a0f4..a8f9140a24fa 100644
16937 --- a/arch/powerpc/platforms/pseries/pci_dlpar.c
16938 +++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
16939 @@ -50,6 +50,7 @@ EXPORT_SYMBOL_GPL(init_phb_dynamic);
16940  int remove_phb_dynamic(struct pci_controller *phb)
16942         struct pci_bus *b = phb->bus;
16943 +       struct pci_host_bridge *host_bridge = to_pci_host_bridge(b->bridge);
16944         struct resource *res;
16945         int rc, i;
16947 @@ -76,7 +77,8 @@ int remove_phb_dynamic(struct pci_controller *phb)
16948         /* Remove the PCI bus and unregister the bridge device from sysfs */
16949         phb->bus = NULL;
16950         pci_remove_bus(b);
16951 -       device_unregister(b->bridge);
16952 +       host_bridge->bus = NULL;
16953 +       device_unregister(&host_bridge->dev);
16955         /* Now release the IO resource */
16956         if (res->flags & IORESOURCE_IO)
16957 diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
16958 index 9cb4fc839fd5..429053d0402a 100644
16959 --- a/arch/powerpc/platforms/pseries/vio.c
16960 +++ b/arch/powerpc/platforms/pseries/vio.c
16961 @@ -1285,6 +1285,10 @@ static int vio_bus_remove(struct device *dev)
16962  int __vio_register_driver(struct vio_driver *viodrv, struct module *owner,
16963                           const char *mod_name)
16965 +       // vio_bus_type is only initialised for pseries
16966 +       if (!machine_is(pseries))
16967 +               return -ENODEV;
16969         pr_debug("%s: driver %s registering\n", __func__, viodrv->name);
16971         /* fill in 'struct driver' fields */
16972 diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
16973 index 595310e056f4..31b657c37735 100644
16974 --- a/arch/powerpc/sysdev/xive/common.c
16975 +++ b/arch/powerpc/sysdev/xive/common.c
16976 @@ -253,17 +253,20 @@ notrace void xmon_xive_do_dump(int cpu)
16977         xmon_printf("\n");
16980 +static struct irq_data *xive_get_irq_data(u32 hw_irq)
16982 +       unsigned int irq = irq_find_mapping(xive_irq_domain, hw_irq);
16984 +       return irq ? irq_get_irq_data(irq) : NULL;
16987  int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
16989 -       struct irq_chip *chip = irq_data_get_irq_chip(d);
16990         int rc;
16991         u32 target;
16992         u8 prio;
16993         u32 lirq;
16995 -       if (!is_xive_irq(chip))
16996 -               return -EINVAL;
16998         rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
16999         if (rc) {
17000                 xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
17001 @@ -273,6 +276,9 @@ int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
17002         xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
17003                     hw_irq, target, prio, lirq);
17005 +       if (!d)
17006 +               d = xive_get_irq_data(hw_irq);
17008         if (d) {
17009                 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
17010                 u64 val = xive_esb_read(xd, XIVE_ESB_GET);
17011 @@ -1335,17 +1341,14 @@ static int xive_prepare_cpu(unsigned int cpu)
17013         xc = per_cpu(xive_cpu, cpu);
17014         if (!xc) {
17015 -               struct device_node *np;
17017                 xc = kzalloc_node(sizeof(struct xive_cpu),
17018                                   GFP_KERNEL, cpu_to_node(cpu));
17019                 if (!xc)
17020                         return -ENOMEM;
17021 -               np = of_get_cpu_node(cpu, NULL);
17022 -               if (np)
17023 -                       xc->chip_id = of_get_ibm_chip_id(np);
17024 -               of_node_put(np);
17025                 xc->hw_ipi = XIVE_BAD_IRQ;
17026 +               xc->chip_id = XIVE_INVALID_CHIP_ID;
17027 +               if (xive_ops->prepare_cpu)
17028 +                       xive_ops->prepare_cpu(cpu, xc);
17030                 per_cpu(xive_cpu, cpu) = xc;
17031         }
17032 @@ -1599,6 +1602,8 @@ static void xive_debug_show_irq(struct seq_file *m, u32 hw_irq, struct irq_data
17033         u32 target;
17034         u8 prio;
17035         u32 lirq;
17036 +       struct xive_irq_data *xd;
17037 +       u64 val;
17039         if (!is_xive_irq(chip))
17040                 return;
17041 @@ -1612,17 +1617,14 @@ static void xive_debug_show_irq(struct seq_file *m, u32 hw_irq, struct irq_data
17042         seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
17043                    hw_irq, target, prio, lirq);
17045 -       if (d) {
17046 -               struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
17047 -               u64 val = xive_esb_read(xd, XIVE_ESB_GET);
17049 -               seq_printf(m, "flags=%c%c%c PQ=%c%c",
17050 -                          xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
17051 -                          xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
17052 -                          xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
17053 -                          val & XIVE_ESB_VAL_P ? 'P' : '-',
17054 -                          val & XIVE_ESB_VAL_Q ? 'Q' : '-');
17055 -       }
17056 +       xd = irq_data_get_irq_handler_data(d);
17057 +       val = xive_esb_read(xd, XIVE_ESB_GET);
17058 +       seq_printf(m, "flags=%c%c%c PQ=%c%c",
17059 +                  xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
17060 +                  xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
17061 +                  xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
17062 +                  val & XIVE_ESB_VAL_P ? 'P' : '-',
17063 +                  val & XIVE_ESB_VAL_Q ? 'Q' : '-');
17064         seq_puts(m, "\n");
17067 diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
17068 index 05a800a3104e..57e3f1540435 100644
17069 --- a/arch/powerpc/sysdev/xive/native.c
17070 +++ b/arch/powerpc/sysdev/xive/native.c
17071 @@ -380,6 +380,11 @@ static void xive_native_update_pending(struct xive_cpu *xc)
17072         }
17075 +static void xive_native_prepare_cpu(unsigned int cpu, struct xive_cpu *xc)
17077 +       xc->chip_id = cpu_to_chip_id(cpu);
17080  static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
17082         s64 rc;
17083 @@ -462,6 +467,7 @@ static const struct xive_ops xive_native_ops = {
17084         .match                  = xive_native_match,
17085         .shutdown               = xive_native_shutdown,
17086         .update_pending         = xive_native_update_pending,
17087 +       .prepare_cpu            = xive_native_prepare_cpu,
17088         .setup_cpu              = xive_native_setup_cpu,
17089         .teardown_cpu           = xive_native_teardown_cpu,
17090         .sync_source            = xive_native_sync_source,
17091 diff --git a/arch/powerpc/sysdev/xive/xive-internal.h b/arch/powerpc/sysdev/xive/xive-internal.h
17092 index 9cf57c722faa..6478be19b4d3 100644
17093 --- a/arch/powerpc/sysdev/xive/xive-internal.h
17094 +++ b/arch/powerpc/sysdev/xive/xive-internal.h
17095 @@ -46,6 +46,7 @@ struct xive_ops {
17096                                   u32 *sw_irq);
17097         int     (*setup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
17098         void    (*cleanup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
17099 +       void    (*prepare_cpu)(unsigned int cpu, struct xive_cpu *xc);
17100         void    (*setup_cpu)(unsigned int cpu, struct xive_cpu *xc);
17101         void    (*teardown_cpu)(unsigned int cpu, struct xive_cpu *xc);
17102         bool    (*match)(struct device_node *np);
17103 diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
17104 index 4515a10c5d22..d9522fc35ca5 100644
17105 --- a/arch/riscv/Kconfig
17106 +++ b/arch/riscv/Kconfig
17107 @@ -227,7 +227,7 @@ config ARCH_RV64I
17108         bool "RV64I"
17109         select 64BIT
17110         select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
17111 -       select HAVE_DYNAMIC_FTRACE if MMU
17112 +       select HAVE_DYNAMIC_FTRACE if MMU && $(cc-option,-fpatchable-function-entry=8)
17113         select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
17114         select HAVE_FTRACE_MCOUNT_RECORD
17115         select HAVE_FUNCTION_GRAPH_TRACER
17116 diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
17117 index 845002cc2e57..04dad3380041 100644
17118 --- a/arch/riscv/include/asm/ftrace.h
17119 +++ b/arch/riscv/include/asm/ftrace.h
17120 @@ -13,9 +13,19 @@
17121  #endif
17122  #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
17125 + * Clang prior to 13 had "mcount" instead of "_mcount":
17126 + * https://reviews.llvm.org/D98881
17127 + */
17128 +#if defined(CONFIG_CC_IS_GCC) || CONFIG_CLANG_VERSION >= 130000
17129 +#define MCOUNT_NAME _mcount
17130 +#else
17131 +#define MCOUNT_NAME mcount
17132 +#endif
17134  #define ARCH_SUPPORTS_FTRACE_OPS 1
17135  #ifndef __ASSEMBLY__
17136 -void _mcount(void);
17137 +void MCOUNT_NAME(void);
17138  static inline unsigned long ftrace_call_adjust(unsigned long addr)
17140         return addr;
17141 @@ -36,7 +46,7 @@ struct dyn_arch_ftrace {
17142   * both auipc and jalr at the same time.
17143   */
17145 -#define MCOUNT_ADDR            ((unsigned long)_mcount)
17146 +#define MCOUNT_ADDR            ((unsigned long)MCOUNT_NAME)
17147  #define JALR_SIGN_MASK         (0x00000800)
17148  #define JALR_OFFSET_MASK       (0x00000fff)
17149  #define AUIPC_OFFSET_MASK      (0xfffff000)
17150 diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S
17151 index 8a5593ff9ff3..6d462681c9c0 100644
17152 --- a/arch/riscv/kernel/mcount.S
17153 +++ b/arch/riscv/kernel/mcount.S
17154 @@ -47,8 +47,8 @@
17156  ENTRY(ftrace_stub)
17157  #ifdef CONFIG_DYNAMIC_FTRACE
17158 -       .global _mcount
17159 -       .set    _mcount, ftrace_stub
17160 +       .global MCOUNT_NAME
17161 +       .set    MCOUNT_NAME, ftrace_stub
17162  #endif
17163         ret
17164  ENDPROC(ftrace_stub)
17165 @@ -78,7 +78,7 @@ ENDPROC(return_to_handler)
17166  #endif
17168  #ifndef CONFIG_DYNAMIC_FTRACE
17169 -ENTRY(_mcount)
17170 +ENTRY(MCOUNT_NAME)
17171         la      t4, ftrace_stub
17172  #ifdef CONFIG_FUNCTION_GRAPH_TRACER
17173         la      t0, ftrace_graph_return
17174 @@ -124,6 +124,6 @@ do_trace:
17175         jalr    t5
17176         RESTORE_ABI_STATE
17177         ret
17178 -ENDPROC(_mcount)
17179 +ENDPROC(MCOUNT_NAME)
17180  #endif
17181 -EXPORT_SYMBOL(_mcount)
17182 +EXPORT_SYMBOL(MCOUNT_NAME)
17183 diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c
17184 index 7e2c78e2ca6b..d71f7c49a721 100644
17185 --- a/arch/riscv/kernel/probes/kprobes.c
17186 +++ b/arch/riscv/kernel/probes/kprobes.c
17187 @@ -260,8 +260,10 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
17189                 if (kcb->kprobe_status == KPROBE_REENTER)
17190                         restore_previous_kprobe(kcb);
17191 -               else
17192 +               else {
17193 +                       kprobes_restore_local_irqflag(kcb, regs);
17194                         reset_current_kprobe();
17195 +               }
17197                 break;
17198         case KPROBE_HIT_ACTIVE:
17199 diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
17200 index ea028d9e0d24..d44567490d91 100644
17201 --- a/arch/riscv/kernel/smp.c
17202 +++ b/arch/riscv/kernel/smp.c
17203 @@ -54,7 +54,7 @@ int riscv_hartid_to_cpuid(int hartid)
17204                         return i;
17206         pr_err("Couldn't find cpu id for hartid [%d]\n", hartid);
17207 -       return i;
17208 +       return -ENOENT;
17211  void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
17212 diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
17213 index 71a315e73cbe..ca2b40dfd24b 100644
17214 --- a/arch/riscv/kernel/vdso/Makefile
17215 +++ b/arch/riscv/kernel/vdso/Makefile
17216 @@ -41,11 +41,10 @@ KASAN_SANITIZE := n
17217  $(obj)/vdso.o: $(obj)/vdso.so
17219  # link rule for the .so file, .lds has to be first
17220 -SYSCFLAGS_vdso.so.dbg = $(c_flags)
17221  $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
17222         $(call if_changed,vdsold)
17223 -SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
17224 -       -Wl,--build-id=sha1 -Wl,--hash-style=both
17225 +LDFLAGS_vdso.so.dbg = -shared -s -soname=linux-vdso.so.1 \
17226 +       --build-id=sha1 --hash-style=both --eh-frame-hdr
17228  # We also create a special relocatable object that should mirror the symbol
17229  # table and layout of the linked DSO. With ld --just-symbols we can then
17230 @@ -60,13 +59,10 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
17232  # actual build commands
17233  # The DSO images are built using a special linker script
17234 -# Add -lgcc so rv32 gets static muldi3 and lshrdi3 definitions.
17235  # Make sure only to export the intended __vdso_xxx symbol offsets.
17236  quiet_cmd_vdsold = VDSOLD  $@
17237 -      cmd_vdsold = $(CC) $(KBUILD_CFLAGS) $(call cc-option, -no-pie) -nostdlib -nostartfiles $(SYSCFLAGS_$(@F)) \
17238 -                           -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp && \
17239 -                   $(CROSS_COMPILE)objcopy \
17240 -                           $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
17241 +      cmd_vdsold = $(LD) $(ld_flags) -T $(filter-out FORCE,$^) -o $@.tmp && \
17242 +                   $(OBJCOPY) $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
17243                     rm $@.tmp
17245  # Extracts symbol offsets from the VDSO, converting them into an assembly file
17246 diff --git a/arch/s390/crypto/arch_random.c b/arch/s390/crypto/arch_random.c
17247 index 7b947728d57e..56007c763902 100644
17248 --- a/arch/s390/crypto/arch_random.c
17249 +++ b/arch/s390/crypto/arch_random.c
17250 @@ -54,6 +54,10 @@ static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer);
17252  bool s390_arch_random_generate(u8 *buf, unsigned int nbytes)
17254 +       /* max hunk is ARCH_RNG_BUF_SIZE */
17255 +       if (nbytes > ARCH_RNG_BUF_SIZE)
17256 +               return false;
17258         /* lock rng buffer */
17259         if (!spin_trylock(&arch_rng_lock))
17260                 return false;
17261 diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
17262 index d9215c7106f0..8fc52679543d 100644
17263 --- a/arch/s390/include/asm/qdio.h
17264 +++ b/arch/s390/include/asm/qdio.h
17265 @@ -246,21 +246,8 @@ struct slsb {
17266         u8 val[QDIO_MAX_BUFFERS_PER_Q];
17267  } __attribute__ ((packed, aligned(256)));
17269 -/**
17270 - * struct qdio_outbuf_state - SBAL related asynchronous operation information
17271 - *   (for communication with upper layer programs)
17272 - *   (only required for use with completion queues)
17273 - * @user: pointer to upper layer program's state information related to SBAL
17274 - *        (stored in user1 data of QAOB)
17275 - */
17276 -struct qdio_outbuf_state {
17277 -       void *user;
17280 -#define CHSC_AC1_INITIATE_INPUTQ       0x80
17283  /* qdio adapter-characteristics-1 flag */
17284 +#define CHSC_AC1_INITIATE_INPUTQ       0x80
17285  #define AC1_SIGA_INPUT_NEEDED          0x40    /* process input queues */
17286  #define AC1_SIGA_OUTPUT_NEEDED         0x20    /* process output queues */
17287  #define AC1_SIGA_SYNC_NEEDED           0x10    /* ask hypervisor to sync */
17288 @@ -338,7 +325,6 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
17289   * @int_parm: interruption parameter
17290   * @input_sbal_addr_array:  per-queue array, each element points to 128 SBALs
17291   * @output_sbal_addr_array: per-queue array, each element points to 128 SBALs
17292 - * @output_sbal_state_array: no_output_qs * 128 state info (for CQ or NULL)
17293   */
17294  struct qdio_initialize {
17295         unsigned char q_format;
17296 @@ -357,7 +343,6 @@ struct qdio_initialize {
17297         unsigned long int_parm;
17298         struct qdio_buffer ***input_sbal_addr_array;
17299         struct qdio_buffer ***output_sbal_addr_array;
17300 -       struct qdio_outbuf_state *output_sbal_state_array;
17301  };
17303  #define QDIO_STATE_INACTIVE            0x00000002 /* after qdio_cleanup */
17304 @@ -378,9 +363,10 @@ extern int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
17305  extern int qdio_establish(struct ccw_device *cdev,
17306                           struct qdio_initialize *init_data);
17307  extern int qdio_activate(struct ccw_device *);
17308 +extern struct qaob *qdio_allocate_aob(void);
17309  extern void qdio_release_aob(struct qaob *);
17310 -extern int do_QDIO(struct ccw_device *, unsigned int, int, unsigned int,
17311 -                  unsigned int);
17312 +extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags, int q_nr,
17313 +                  unsigned int bufnr, unsigned int count, struct qaob *aob);
17314  extern int qdio_start_irq(struct ccw_device *cdev);
17315  extern int qdio_stop_irq(struct ccw_device *cdev);
17316  extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *);
17317 diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
17318 index a7eab7be4db0..5412efe328f8 100644
17319 --- a/arch/s390/kernel/dis.c
17320 +++ b/arch/s390/kernel/dis.c
17321 @@ -563,7 +563,7 @@ void show_code(struct pt_regs *regs)
17323  void print_fn_code(unsigned char *code, unsigned long len)
17325 -       char buffer[64], *ptr;
17326 +       char buffer[128], *ptr;
17327         int opsize, i;
17329         while (len) {
17330 diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
17331 index 72134f9f6ff5..5aab59ad5688 100644
17332 --- a/arch/s390/kernel/setup.c
17333 +++ b/arch/s390/kernel/setup.c
17334 @@ -937,9 +937,9 @@ static int __init setup_hwcaps(void)
17335         if (MACHINE_HAS_VX) {
17336                 elf_hwcap |= HWCAP_S390_VXRS;
17337                 if (test_facility(134))
17338 -                       elf_hwcap |= HWCAP_S390_VXRS_EXT;
17339 -               if (test_facility(135))
17340                         elf_hwcap |= HWCAP_S390_VXRS_BCD;
17341 +               if (test_facility(135))
17342 +                       elf_hwcap |= HWCAP_S390_VXRS_EXT;
17343                 if (test_facility(148))
17344                         elf_hwcap |= HWCAP_S390_VXRS_EXT2;
17345                 if (test_facility(152))
17346 diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
17347 index 6d6b57059493..b9f85b2dc053 100644
17348 --- a/arch/s390/kvm/gaccess.c
17349 +++ b/arch/s390/kvm/gaccess.c
17350 @@ -976,7 +976,9 @@ int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
17351   * kvm_s390_shadow_tables - walk the guest page table and create shadow tables
17352   * @sg: pointer to the shadow guest address space structure
17353   * @saddr: faulting address in the shadow gmap
17354 - * @pgt: pointer to the page table address result
17355 + * @pgt: pointer to the beginning of the page table for the given address if
17356 + *      successful (return value 0), or to the first invalid DAT entry in
17357 + *      case of exceptions (return value > 0)
17358   * @fake: pgt references contiguous guest memory block, not a pgtable
17359   */
17360  static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
17361 @@ -1034,6 +1036,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
17362                         rfte.val = ptr;
17363                         goto shadow_r2t;
17364                 }
17365 +               *pgt = ptr + vaddr.rfx * 8;
17366                 rc = gmap_read_table(parent, ptr + vaddr.rfx * 8, &rfte.val);
17367                 if (rc)
17368                         return rc;
17369 @@ -1060,6 +1063,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
17370                         rste.val = ptr;
17371                         goto shadow_r3t;
17372                 }
17373 +               *pgt = ptr + vaddr.rsx * 8;
17374                 rc = gmap_read_table(parent, ptr + vaddr.rsx * 8, &rste.val);
17375                 if (rc)
17376                         return rc;
17377 @@ -1087,6 +1091,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
17378                         rtte.val = ptr;
17379                         goto shadow_sgt;
17380                 }
17381 +               *pgt = ptr + vaddr.rtx * 8;
17382                 rc = gmap_read_table(parent, ptr + vaddr.rtx * 8, &rtte.val);
17383                 if (rc)
17384                         return rc;
17385 @@ -1123,6 +1128,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
17386                         ste.val = ptr;
17387                         goto shadow_pgt;
17388                 }
17389 +               *pgt = ptr + vaddr.sx * 8;
17390                 rc = gmap_read_table(parent, ptr + vaddr.sx * 8, &ste.val);
17391                 if (rc)
17392                         return rc;
17393 @@ -1157,6 +1163,8 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
17394   * @vcpu: virtual cpu
17395   * @sg: pointer to the shadow guest address space structure
17396   * @saddr: faulting address in the shadow gmap
17397 + * @datptr: will contain the address of the faulting DAT table entry, or of
17398 + *         the valid leaf, plus some flags
17399   *
17400   * Returns: - 0 if the shadow fault was successfully resolved
17401   *         - > 0 (pgm exception code) on exceptions while faulting
17402 @@ -1165,11 +1173,11 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
17403   *         - -ENOMEM if out of memory
17404   */
17405  int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
17406 -                         unsigned long saddr)
17407 +                         unsigned long saddr, unsigned long *datptr)
17409         union vaddress vaddr;
17410         union page_table_entry pte;
17411 -       unsigned long pgt;
17412 +       unsigned long pgt = 0;
17413         int dat_protection, fake;
17414         int rc;
17416 @@ -1191,8 +1199,20 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
17417                 pte.val = pgt + vaddr.px * PAGE_SIZE;
17418                 goto shadow_page;
17419         }
17420 -       if (!rc)
17421 -               rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val);
17423 +       switch (rc) {
17424 +       case PGM_SEGMENT_TRANSLATION:
17425 +       case PGM_REGION_THIRD_TRANS:
17426 +       case PGM_REGION_SECOND_TRANS:
17427 +       case PGM_REGION_FIRST_TRANS:
17428 +               pgt |= PEI_NOT_PTE;
17429 +               break;
17430 +       case 0:
17431 +               pgt += vaddr.px * 8;
17432 +               rc = gmap_read_table(sg->parent, pgt, &pte.val);
17433 +       }
17434 +       if (datptr)
17435 +               *datptr = pgt | dat_protection * PEI_DAT_PROT;
17436         if (!rc && pte.i)
17437                 rc = PGM_PAGE_TRANSLATION;
17438         if (!rc && pte.z)
17439 diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
17440 index f4c51756c462..7c72a5e3449f 100644
17441 --- a/arch/s390/kvm/gaccess.h
17442 +++ b/arch/s390/kvm/gaccess.h
17443 @@ -18,17 +18,14 @@
17445  /**
17446   * kvm_s390_real_to_abs - convert guest real address to guest absolute address
17447 - * @vcpu - guest virtual cpu
17448 + * @prefix - guest prefix
17449   * @gra - guest real address
17450   *
17451   * Returns the guest absolute address that corresponds to the passed guest real
17452 - * address @gra of a virtual guest cpu by applying its prefix.
17453 + * address @gra of by applying the given prefix.
17454   */
17455 -static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
17456 -                                                unsigned long gra)
17457 +static inline unsigned long _kvm_s390_real_to_abs(u32 prefix, unsigned long gra)
17459 -       unsigned long prefix  = kvm_s390_get_prefix(vcpu);
17461         if (gra < 2 * PAGE_SIZE)
17462                 gra += prefix;
17463         else if (gra >= prefix && gra < prefix + 2 * PAGE_SIZE)
17464 @@ -36,6 +33,43 @@ static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
17465         return gra;
17468 +/**
17469 + * kvm_s390_real_to_abs - convert guest real address to guest absolute address
17470 + * @vcpu - guest virtual cpu
17471 + * @gra - guest real address
17472 + *
17473 + * Returns the guest absolute address that corresponds to the passed guest real
17474 + * address @gra of a virtual guest cpu by applying its prefix.
17475 + */
17476 +static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
17477 +                                                unsigned long gra)
17479 +       return _kvm_s390_real_to_abs(kvm_s390_get_prefix(vcpu), gra);
17482 +/**
17483 + * _kvm_s390_logical_to_effective - convert guest logical to effective address
17484 + * @psw: psw of the guest
17485 + * @ga: guest logical address
17486 + *
17487 + * Convert a guest logical address to an effective address by applying the
17488 + * rules of the addressing mode defined by bits 31 and 32 of the given PSW
17489 + * (extendended/basic addressing mode).
17490 + *
17491 + * Depending on the addressing mode, the upper 40 bits (24 bit addressing
17492 + * mode), 33 bits (31 bit addressing mode) or no bits (64 bit addressing
17493 + * mode) of @ga will be zeroed and the remaining bits will be returned.
17494 + */
17495 +static inline unsigned long _kvm_s390_logical_to_effective(psw_t *psw,
17496 +                                                          unsigned long ga)
17498 +       if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT)
17499 +               return ga;
17500 +       if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT)
17501 +               return ga & ((1UL << 31) - 1);
17502 +       return ga & ((1UL << 24) - 1);
17505  /**
17506   * kvm_s390_logical_to_effective - convert guest logical to effective address
17507   * @vcpu: guest virtual cpu
17508 @@ -52,13 +86,7 @@ static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
17509  static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu,
17510                                                           unsigned long ga)
17512 -       psw_t *psw = &vcpu->arch.sie_block->gpsw;
17514 -       if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT)
17515 -               return ga;
17516 -       if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT)
17517 -               return ga & ((1UL << 31) - 1);
17518 -       return ga & ((1UL << 24) - 1);
17519 +       return _kvm_s390_logical_to_effective(&vcpu->arch.sie_block->gpsw, ga);
17522  /*
17523 @@ -359,7 +387,11 @@ void ipte_unlock(struct kvm_vcpu *vcpu);
17524  int ipte_lock_held(struct kvm_vcpu *vcpu);
17525  int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
17527 +/* MVPG PEI indication bits */
17528 +#define PEI_DAT_PROT 2
17529 +#define PEI_NOT_PTE 4
17531  int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *shadow,
17532 -                         unsigned long saddr);
17533 +                         unsigned long saddr, unsigned long *datptr);
17535  #endif /* __KVM_S390_GACCESS_H */
17536 diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
17537 index 2f09e9d7dc95..24ad447e648c 100644
17538 --- a/arch/s390/kvm/kvm-s390.c
17539 +++ b/arch/s390/kvm/kvm-s390.c
17540 @@ -4307,16 +4307,16 @@ static void store_regs_fmt2(struct kvm_vcpu *vcpu)
17541         kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
17542         kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
17543         if (MACHINE_HAS_GS) {
17544 +               preempt_disable();
17545                 __ctl_set_bit(2, 4);
17546                 if (vcpu->arch.gs_enabled)
17547                         save_gs_cb(current->thread.gs_cb);
17548 -               preempt_disable();
17549                 current->thread.gs_cb = vcpu->arch.host_gscb;
17550                 restore_gs_cb(vcpu->arch.host_gscb);
17551 -               preempt_enable();
17552                 if (!vcpu->arch.host_gscb)
17553                         __ctl_clear_bit(2, 4);
17554                 vcpu->arch.host_gscb = NULL;
17555 +               preempt_enable();
17556         }
17557         /* SIE will save etoken directly into SDNX and therefore kvm_run */
17559 diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
17560 index bd803e091918..4002a24bc43a 100644
17561 --- a/arch/s390/kvm/vsie.c
17562 +++ b/arch/s390/kvm/vsie.c
17563 @@ -417,11 +417,6 @@ static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
17564                 memcpy((void *)((u64)scb_o + 0xc0),
17565                        (void *)((u64)scb_s + 0xc0), 0xf0 - 0xc0);
17566                 break;
17567 -       case ICPT_PARTEXEC:
17568 -               /* MVPG only */
17569 -               memcpy((void *)((u64)scb_o + 0xc0),
17570 -                      (void *)((u64)scb_s + 0xc0), 0xd0 - 0xc0);
17571 -               break;
17572         }
17574         if (scb_s->ihcpu != 0xffffU)
17575 @@ -620,10 +615,10 @@ static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
17576         /* with mso/msl, the prefix lies at offset *mso* */
17577         prefix += scb_s->mso;
17579 -       rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix);
17580 +       rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix, NULL);
17581         if (!rc && (scb_s->ecb & ECB_TE))
17582                 rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
17583 -                                          prefix + PAGE_SIZE);
17584 +                                          prefix + PAGE_SIZE, NULL);
17585         /*
17586          * We don't have to mprotect, we will be called for all unshadows.
17587          * SIE will detect if protection applies and trigger a validity.
17588 @@ -914,7 +909,7 @@ static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
17589                                     current->thread.gmap_addr, 1);
17591         rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
17592 -                                  current->thread.gmap_addr);
17593 +                                  current->thread.gmap_addr, NULL);
17594         if (rc > 0) {
17595                 rc = inject_fault(vcpu, rc,
17596                                   current->thread.gmap_addr,
17597 @@ -936,7 +931,7 @@ static void handle_last_fault(struct kvm_vcpu *vcpu,
17599         if (vsie_page->fault_addr)
17600                 kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
17601 -                                     vsie_page->fault_addr);
17602 +                                     vsie_page->fault_addr, NULL);
17603         vsie_page->fault_addr = 0;
17606 @@ -983,6 +978,98 @@ static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
17607         return 0;
17611 + * Get a register for a nested guest.
17612 + * @vcpu the vcpu of the guest
17613 + * @vsie_page the vsie_page for the nested guest
17614 + * @reg the register number, the upper 4 bits are ignored.
17615 + * returns: the value of the register.
17616 + */
17617 +static u64 vsie_get_register(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, u8 reg)
17619 +       /* no need to validate the parameter and/or perform error handling */
17620 +       reg &= 0xf;
17621 +       switch (reg) {
17622 +       case 15:
17623 +               return vsie_page->scb_s.gg15;
17624 +       case 14:
17625 +               return vsie_page->scb_s.gg14;
17626 +       default:
17627 +               return vcpu->run->s.regs.gprs[reg];
17628 +       }
17631 +static int vsie_handle_mvpg(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
17633 +       struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
17634 +       unsigned long pei_dest, pei_src, src, dest, mask, prefix;
17635 +       u64 *pei_block = &vsie_page->scb_o->mcic;
17636 +       int edat, rc_dest, rc_src;
17637 +       union ctlreg0 cr0;
17639 +       cr0.val = vcpu->arch.sie_block->gcr[0];
17640 +       edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
17641 +       mask = _kvm_s390_logical_to_effective(&scb_s->gpsw, PAGE_MASK);
17642 +       prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
17644 +       dest = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 20) & mask;
17645 +       dest = _kvm_s390_real_to_abs(prefix, dest) + scb_s->mso;
17646 +       src = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 16) & mask;
17647 +       src = _kvm_s390_real_to_abs(prefix, src) + scb_s->mso;
17649 +       rc_dest = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, dest, &pei_dest);
17650 +       rc_src = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, src, &pei_src);
17651 +       /*
17652 +        * Either everything went well, or something non-critical went wrong
17653 +        * e.g. because of a race. In either case, simply retry.
17654 +        */
17655 +       if (rc_dest == -EAGAIN || rc_src == -EAGAIN || (!rc_dest && !rc_src)) {
17656 +               retry_vsie_icpt(vsie_page);
17657 +               return -EAGAIN;
17658 +       }
17659 +       /* Something more serious went wrong, propagate the error */
17660 +       if (rc_dest < 0)
17661 +               return rc_dest;
17662 +       if (rc_src < 0)
17663 +               return rc_src;
17665 +       /* The only possible suppressing exception: just deliver it */
17666 +       if (rc_dest == PGM_TRANSLATION_SPEC || rc_src == PGM_TRANSLATION_SPEC) {
17667 +               clear_vsie_icpt(vsie_page);
17668 +               rc_dest = kvm_s390_inject_program_int(vcpu, PGM_TRANSLATION_SPEC);
17669 +               WARN_ON_ONCE(rc_dest);
17670 +               return 1;
17671 +       }
17673 +       /*
17674 +        * Forward the PEI intercept to the guest if it was a page fault, or
17675 +        * also for segment and region table faults if EDAT applies.
17676 +        */
17677 +       if (edat) {
17678 +               rc_dest = rc_dest == PGM_ASCE_TYPE ? rc_dest : 0;
17679 +               rc_src = rc_src == PGM_ASCE_TYPE ? rc_src : 0;
17680 +       } else {
17681 +               rc_dest = rc_dest != PGM_PAGE_TRANSLATION ? rc_dest : 0;
17682 +               rc_src = rc_src != PGM_PAGE_TRANSLATION ? rc_src : 0;
17683 +       }
17684 +       if (!rc_dest && !rc_src) {
17685 +               pei_block[0] = pei_dest;
17686 +               pei_block[1] = pei_src;
17687 +               return 1;
17688 +       }
17690 +       retry_vsie_icpt(vsie_page);
17692 +       /*
17693 +        * The host has edat, and the guest does not, or it was an ASCE type
17694 +        * exception. The host needs to inject the appropriate DAT interrupts
17695 +        * into the guest.
17696 +        */
17697 +       if (rc_dest)
17698 +               return inject_fault(vcpu, rc_dest, dest, 1);
17699 +       return inject_fault(vcpu, rc_src, src, 0);
17702  /*
17703   * Run the vsie on a shadow scb and a shadow gmap, without any further
17704   * sanity checks, handling SIE faults.
17705 @@ -1071,6 +1158,10 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
17706                 if ((scb_s->ipa & 0xf000) != 0xf000)
17707                         scb_s->ipa += 0x1000;
17708                 break;
17709 +       case ICPT_PARTEXEC:
17710 +               if (scb_s->ipa == 0xb254)
17711 +                       rc = vsie_handle_mvpg(vcpu, vsie_page);
17712 +               break;
17713         }
17714         return rc;
17716 diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
17717 index f5beecdac693..e76b22157099 100644
17718 --- a/arch/sh/kernel/traps.c
17719 +++ b/arch/sh/kernel/traps.c
17720 @@ -180,7 +180,6 @@ static inline void arch_ftrace_nmi_exit(void) { }
17722  BUILD_TRAP_HANDLER(nmi)
17724 -       unsigned int cpu = smp_processor_id();
17725         TRAP_HANDLER_DECL;
17727         arch_ftrace_nmi_enter();
17728 diff --git a/arch/um/Kconfig.debug b/arch/um/Kconfig.debug
17729 index 315d368e63ad..1dfb2959c73b 100644
17730 --- a/arch/um/Kconfig.debug
17731 +++ b/arch/um/Kconfig.debug
17732 @@ -17,6 +17,7 @@ config GCOV
17733         bool "Enable gcov support"
17734         depends on DEBUG_INFO
17735         depends on !KCOV
17736 +       depends on !MODULES
17737         help
17738           This option allows developers to retrieve coverage data from a UML
17739           session.
17740 diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
17741 index 5aa882011e04..e698e0c7dbdc 100644
17742 --- a/arch/um/kernel/Makefile
17743 +++ b/arch/um/kernel/Makefile
17744 @@ -21,7 +21,6 @@ obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \
17746  obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
17747  obj-$(CONFIG_GPROF)    += gprof_syms.o
17748 -obj-$(CONFIG_GCOV)     += gmon_syms.o
17749  obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
17750  obj-$(CONFIG_STACKTRACE) += stacktrace.o
17752 diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S
17753 index dacbfabf66d8..2f2a8ce92f1e 100644
17754 --- a/arch/um/kernel/dyn.lds.S
17755 +++ b/arch/um/kernel/dyn.lds.S
17756 @@ -6,6 +6,12 @@ OUTPUT_ARCH(ELF_ARCH)
17757  ENTRY(_start)
17758  jiffies = jiffies_64;
17760 +VERSION {
17761 +  {
17762 +    local: *;
17763 +  };
17766  SECTIONS
17768    PROVIDE (__executable_start = START);
17769 diff --git a/arch/um/kernel/gmon_syms.c b/arch/um/kernel/gmon_syms.c
17770 deleted file mode 100644
17771 index 9361a8eb9bf1..000000000000
17772 --- a/arch/um/kernel/gmon_syms.c
17773 +++ /dev/null
17774 @@ -1,16 +0,0 @@
17775 -// SPDX-License-Identifier: GPL-2.0
17777 - * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
17778 - */
17780 -#include <linux/module.h>
17782 -extern void __bb_init_func(void *)  __attribute__((weak));
17783 -EXPORT_SYMBOL(__bb_init_func);
17785 -extern void __gcov_init(void *)  __attribute__((weak));
17786 -EXPORT_SYMBOL(__gcov_init);
17787 -extern void __gcov_merge_add(void *, unsigned int)  __attribute__((weak));
17788 -EXPORT_SYMBOL(__gcov_merge_add);
17789 -extern void __gcov_exit(void)  __attribute__((weak));
17790 -EXPORT_SYMBOL(__gcov_exit);
17791 diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S
17792 index 45d957d7004c..7a8e2b123e29 100644
17793 --- a/arch/um/kernel/uml.lds.S
17794 +++ b/arch/um/kernel/uml.lds.S
17795 @@ -7,6 +7,12 @@ OUTPUT_ARCH(ELF_ARCH)
17796  ENTRY(_start)
17797  jiffies = jiffies_64;
17799 +VERSION {
17800 +  {
17801 +    local: *;
17802 +  };
17805  SECTIONS
17807    /* This must contain the right address - not quite the default ELF one.*/
17808 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
17809 index 2792879d398e..ab2e8502c27c 100644
17810 --- a/arch/x86/Kconfig
17811 +++ b/arch/x86/Kconfig
17812 @@ -163,6 +163,7 @@ config X86
17813         select HAVE_ARCH_TRACEHOOK
17814         select HAVE_ARCH_TRANSPARENT_HUGEPAGE
17815         select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64
17816 +       select HAVE_ARCH_PARENT_PMD_YOUNG       if X86_64
17817         select HAVE_ARCH_USERFAULTFD_WP         if X86_64 && USERFAULTFD
17818         select HAVE_ARCH_VMAP_STACK             if X86_64
17819         select HAVE_ARCH_WITHIN_STACK_FRAMES
17820 @@ -571,6 +572,7 @@ config X86_UV
17821         depends on X86_EXTENDED_PLATFORM
17822         depends on NUMA
17823         depends on EFI
17824 +       depends on KEXEC_CORE
17825         depends on X86_X2APIC
17826         depends on PCI
17827         help
17828 @@ -1406,7 +1408,7 @@ config HIGHMEM4G
17830  config HIGHMEM64G
17831         bool "64GB"
17832 -       depends on !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6
17833 +       depends on !M486SX && !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6
17834         select X86_PAE
17835         help
17836           Select this if you have a 32-bit processor and more than 4
17837 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
17838 index 814fe0d349b0..872b9cf598e3 100644
17839 --- a/arch/x86/Kconfig.cpu
17840 +++ b/arch/x86/Kconfig.cpu
17841 @@ -157,7 +157,7 @@ config MPENTIUM4
17844  config MK6
17845 -       bool "K6/K6-II/K6-III"
17846 +       bool "AMD K6/K6-II/K6-III"
17847         depends on X86_32
17848         help
17849           Select this for an AMD K6-family processor.  Enables use of
17850 @@ -165,7 +165,7 @@ config MK6
17851           flags to GCC.
17853  config MK7
17854 -       bool "Athlon/Duron/K7"
17855 +       bool "AMD Athlon/Duron/K7"
17856         depends on X86_32
17857         help
17858           Select this for an AMD Athlon K7-family processor.  Enables use of
17859 @@ -173,12 +173,98 @@ config MK7
17860           flags to GCC.
17862  config MK8
17863 -       bool "Opteron/Athlon64/Hammer/K8"
17864 +       bool "AMD Opteron/Athlon64/Hammer/K8"
17865         help
17866           Select this for an AMD Opteron or Athlon64 Hammer-family processor.
17867           Enables use of some extended instructions, and passes appropriate
17868           optimization flags to GCC.
17870 +config MK8SSE3
17871 +       bool "AMD Opteron/Athlon64/Hammer/K8 with SSE3"
17872 +       help
17873 +         Select this for improved AMD Opteron or Athlon64 Hammer-family processors.
17874 +         Enables use of some extended instructions, and passes appropriate
17875 +         optimization flags to GCC.
17877 +config MK10
17878 +       bool "AMD 61xx/7x50/PhenomX3/X4/II/K10"
17879 +       help
17880 +         Select this for an AMD 61xx Eight-Core Magny-Cours, Athlon X2 7x50,
17881 +         Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor.
17882 +         Enables use of some extended instructions, and passes appropriate
17883 +         optimization flags to GCC.
17885 +config MBARCELONA
17886 +       bool "AMD Barcelona"
17887 +       help
17888 +         Select this for AMD Family 10h Barcelona processors.
17890 +         Enables -march=barcelona
17892 +config MBOBCAT
17893 +       bool "AMD Bobcat"
17894 +       help
17895 +         Select this for AMD Family 14h Bobcat processors.
17897 +         Enables -march=btver1
17899 +config MJAGUAR
17900 +       bool "AMD Jaguar"
17901 +       help
17902 +         Select this for AMD Family 16h Jaguar processors.
17904 +         Enables -march=btver2
17906 +config MBULLDOZER
17907 +       bool "AMD Bulldozer"
17908 +       help
17909 +         Select this for AMD Family 15h Bulldozer processors.
17911 +         Enables -march=bdver1
17913 +config MPILEDRIVER
17914 +       bool "AMD Piledriver"
17915 +       help
17916 +         Select this for AMD Family 15h Piledriver processors.
17918 +         Enables -march=bdver2
17920 +config MSTEAMROLLER
17921 +       bool "AMD Steamroller"
17922 +       help
17923 +         Select this for AMD Family 15h Steamroller processors.
17925 +         Enables -march=bdver3
17927 +config MEXCAVATOR
17928 +       bool "AMD Excavator"
17929 +       help
17930 +         Select this for AMD Family 15h Excavator processors.
17932 +         Enables -march=bdver4
17934 +config MZEN
17935 +       bool "AMD Zen"
17936 +       help
17937 +         Select this for AMD Family 17h Zen processors.
17939 +         Enables -march=znver1
17941 +config MZEN2
17942 +       bool "AMD Zen 2"
17943 +       help
17944 +         Select this for AMD Family 17h Zen 2 processors.
17946 +         Enables -march=znver2
17948 +config MZEN3
17949 +       bool "AMD Zen 3"
17950 +       depends on GCC_VERSION > 100300
17951 +       help
17952 +         Select this for AMD Family 19h Zen 3 processors.
17954 +         Enables -march=znver3
17956  config MCRUSOE
17957         bool "Crusoe"
17958         depends on X86_32
17959 @@ -270,7 +356,7 @@ config MPSC
17960           in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
17962  config MCORE2
17963 -       bool "Core 2/newer Xeon"
17964 +       bool "Intel Core 2"
17965         help
17967           Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
17968 @@ -278,6 +364,8 @@ config MCORE2
17969           family in /proc/cpuinfo. Newer ones have 6 and older ones 15
17970           (not a typo)
17972 +         Enables -march=core2
17974  config MATOM
17975         bool "Intel Atom"
17976         help
17977 @@ -287,6 +375,182 @@ config MATOM
17978           accordingly optimized code. Use a recent GCC with specific Atom
17979           support in order to fully benefit from selecting this option.
17981 +config MNEHALEM
17982 +       bool "Intel Nehalem"
17983 +       select X86_P6_NOP
17984 +       help
17986 +         Select this for 1st Gen Core processors in the Nehalem family.
17988 +         Enables -march=nehalem
17990 +config MWESTMERE
17991 +       bool "Intel Westmere"
17992 +       select X86_P6_NOP
17993 +       help
17995 +         Select this for the Intel Westmere formerly Nehalem-C family.
17997 +         Enables -march=westmere
17999 +config MSILVERMONT
18000 +       bool "Intel Silvermont"
18001 +       select X86_P6_NOP
18002 +       help
18004 +         Select this for the Intel Silvermont platform.
18006 +         Enables -march=silvermont
18008 +config MGOLDMONT
18009 +       bool "Intel Goldmont"
18010 +       select X86_P6_NOP
18011 +       help
18013 +         Select this for the Intel Goldmont platform including Apollo Lake and Denverton.
18015 +         Enables -march=goldmont
18017 +config MGOLDMONTPLUS
18018 +       bool "Intel Goldmont Plus"
18019 +       select X86_P6_NOP
18020 +       help
18022 +         Select this for the Intel Goldmont Plus platform including Gemini Lake.
18024 +         Enables -march=goldmont-plus
18026 +config MSANDYBRIDGE
18027 +       bool "Intel Sandy Bridge"
18028 +       select X86_P6_NOP
18029 +       help
18031 +         Select this for 2nd Gen Core processors in the Sandy Bridge family.
18033 +         Enables -march=sandybridge
18035 +config MIVYBRIDGE
18036 +       bool "Intel Ivy Bridge"
18037 +       select X86_P6_NOP
18038 +       help
18040 +         Select this for 3rd Gen Core processors in the Ivy Bridge family.
18042 +         Enables -march=ivybridge
18044 +config MHASWELL
18045 +       bool "Intel Haswell"
18046 +       select X86_P6_NOP
18047 +       help
18049 +         Select this for 4th Gen Core processors in the Haswell family.
18051 +         Enables -march=haswell
18053 +config MBROADWELL
18054 +       bool "Intel Broadwell"
18055 +       select X86_P6_NOP
18056 +       help
18058 +         Select this for 5th Gen Core processors in the Broadwell family.
18060 +         Enables -march=broadwell
18062 +config MSKYLAKE
18063 +       bool "Intel Skylake"
18064 +       select X86_P6_NOP
18065 +       help
18067 +         Select this for 6th Gen Core processors in the Skylake family.
18069 +         Enables -march=skylake
18071 +config MSKYLAKEX
18072 +       bool "Intel Skylake X"
18073 +       select X86_P6_NOP
18074 +       help
18076 +         Select this for 6th Gen Core processors in the Skylake X family.
18078 +         Enables -march=skylake-avx512
18080 +config MCANNONLAKE
18081 +       bool "Intel Cannon Lake"
18082 +       select X86_P6_NOP
18083 +       help
18085 +         Select this for 8th Gen Core processors
18087 +         Enables -march=cannonlake
18089 +config MICELAKE
18090 +       bool "Intel Ice Lake"
18091 +       select X86_P6_NOP
18092 +       help
18094 +         Select this for 10th Gen Core processors in the Ice Lake family.
18096 +         Enables -march=icelake-client
18098 +config MCASCADELAKE
18099 +       bool "Intel Cascade Lake"
18100 +       select X86_P6_NOP
18101 +       help
18103 +         Select this for Xeon processors in the Cascade Lake family.
18105 +         Enables -march=cascadelake
18107 +config MCOOPERLAKE
18108 +       bool "Intel Cooper Lake"
18109 +       depends on GCC_VERSION > 100100
18110 +       select X86_P6_NOP
18111 +       help
18113 +         Select this for Xeon processors in the Cooper Lake family.
18115 +         Enables -march=cooperlake
18117 +config MTIGERLAKE
18118 +       bool "Intel Tiger Lake"
18119 +       depends on GCC_VERSION > 100100
18120 +       select X86_P6_NOP
18121 +       help
18123 +         Select this for third-generation 10 nm process processors in the Tiger Lake family.
18125 +         Enables -march=tigerlake
18127 +config MSAPPHIRERAPIDS
18128 +       bool "Intel Sapphire Rapids"
18129 +       depends on GCC_VERSION > 110000
18130 +       select X86_P6_NOP
18131 +       help
18133 +         Select this for third-generation 10 nm process processors in the Sapphire Rapids family.
18135 +         Enables -march=sapphirerapids
18137 +config MROCKETLAKE
18138 +       bool "Intel Rocket Lake"
18139 +       depends on GCC_VERSION > 110000
18140 +       select X86_P6_NOP
18141 +       help
18143 +         Select this for eleventh-generation processors in the Rocket Lake family.
18145 +         Enables -march=rocketlake
18147 +config MALDERLAKE
18148 +       bool "Intel Alder Lake"
18149 +       depends on GCC_VERSION > 110000
18150 +       select X86_P6_NOP
18151 +       help
18153 +         Select this for twelfth-generation processors in the Alder Lake family.
18155 +         Enables -march=alderlake
18157  config GENERIC_CPU
18158         bool "Generic-x86-64"
18159         depends on X86_64
18160 @@ -294,6 +558,50 @@ config GENERIC_CPU
18161           Generic x86-64 CPU.
18162           Run equally well on all x86-64 CPUs.
18164 +config GENERIC_CPU2
18165 +       bool "Generic-x86-64-v2"
18166 +       depends on GCC_VERSION > 110000
18167 +       depends on X86_64
18168 +       help
18169 +         Generic x86-64 CPU.
18170 +         Run equally well on all x86-64 CPUs with min support of x86-64-v2.
18172 +config GENERIC_CPU3
18173 +       bool "Generic-x86-64-v3"
18174 +       depends on GCC_VERSION > 110000
18175 +       depends on X86_64
18176 +       help
18177 +         Generic x86-64-v3 CPU with v3 instructions.
18178 +         Run equally well on all x86-64 CPUs with min support of x86-64-v3.
18180 +config GENERIC_CPU4
18181 +       bool "Generic-x86-64-v4"
18182 +       depends on GCC_VERSION > 110000
18183 +       depends on X86_64
18184 +       help
18185 +         Generic x86-64 CPU with v4 instructions.
18186 +         Run equally well on all x86-64 CPUs with min support of x86-64-v4.
18188 +config MNATIVE_INTEL
18189 +       bool "Intel-Native optimizations autodetected by GCC"
18190 +       help
18192 +         GCC 4.2 and above support -march=native, which automatically detects
18193 +         the optimum settings to use based on your processor. Do NOT use this
18194 +         for AMD CPUs.  Intel Only!
18196 +         Enables -march=native
18198 +config MNATIVE_AMD
18199 +       bool "AMD-Native optimizations autodetected by GCC"
18200 +       help
18202 +         GCC 4.2 and above support -march=native, which automatically detects
18203 +         the optimum settings to use based on your processor. Do NOT use this
18204 +         for Intel CPUs.  AMD Only!
18206 +         Enables -march=native
18208  endchoice
18210  config X86_GENERIC
18211 @@ -318,7 +626,7 @@ config X86_INTERNODE_CACHE_SHIFT
18212  config X86_L1_CACHE_SHIFT
18213         int
18214         default "7" if MPENTIUM4 || MPSC
18215 -       default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
18216 +       default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD || X86_GENERIC || GENERIC_CPU || GENERIC_CPU2 || GENERIC_CPU3 || GENERIC_CPU4
18217         default "4" if MELAN || M486SX || M486 || MGEODEGX1
18218         default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
18220 @@ -336,11 +644,11 @@ config X86_ALIGNMENT_16
18222  config X86_INTEL_USERCOPY
18223         def_bool y
18224 -       depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
18225 +       depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL
18227  config X86_USE_PPRO_CHECKSUM
18228         def_bool y
18229 -       depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
18230 +       depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD
18232  config X86_USE_3DNOW
18233         def_bool y
18234 @@ -360,26 +668,26 @@ config X86_USE_3DNOW
18235  config X86_P6_NOP
18236         def_bool y
18237         depends on X86_64
18238 -       depends on (MCORE2 || MPENTIUM4 || MPSC)
18239 +       depends on (MCORE2 || MPENTIUM4 || MPSC || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL)
18241  config X86_TSC
18242         def_bool y
18243 -       depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
18244 +       depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD) || X86_64
18246  config X86_CMPXCHG64
18247         def_bool y
18248 -       depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8
18249 +       depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD
18251  # this should be set for all -march=.. options where the compiler
18252  # generates cmov.
18253  config X86_CMOV
18254         def_bool y
18255 -       depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
18256 +       depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD)
18258  config X86_MINIMUM_CPU_FAMILY
18259         int
18260         default "64" if X86_64
18261 -       default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8)
18262 +       default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8 ||  MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD)
18263         default "5" if X86_32 && X86_CMPXCHG64
18264         default "4"
18266 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
18267 index 9a85eae37b17..ee0cd507af8b 100644
18268 --- a/arch/x86/Makefile
18269 +++ b/arch/x86/Makefile
18270 @@ -33,6 +33,7 @@ REALMODE_CFLAGS += -ffreestanding
18271  REALMODE_CFLAGS += -fno-stack-protector
18272  REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
18273  REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
18274 +REALMODE_CFLAGS += $(CLANG_FLAGS)
18275  export REALMODE_CFLAGS
18277  # BITS is used as extension for files which are available in a 32 bit
18278 @@ -113,11 +114,48 @@ else
18279          # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
18280          cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
18281          cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
18283 -        cflags-$(CONFIG_MCORE2) += \
18284 -                $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
18285 -       cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
18286 -               $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
18287 +        cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3)
18288 +        cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10)
18289 +        cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona)
18290 +        cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1)
18291 +        cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2)
18292 +        cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1)
18293 +        cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2)
18294 +        cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-mno-tbm)
18295 +        cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-march=bdver3)
18296 +        cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-mno-tbm)
18297 +        cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4)
18298 +        cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-mno-tbm)
18299 +        cflags-$(CONFIG_MZEN) += $(call cc-option,-march=znver1)
18300 +        cflags-$(CONFIG_MZEN2) += $(call cc-option,-march=znver2)
18301 +        cflags-$(CONFIG_MZEN3) += $(call cc-option,-march=znver3)
18303 +        cflags-$(CONFIG_MNATIVE_INTEL) += $(call cc-option,-march=native)
18304 +        cflags-$(CONFIG_MNATIVE_AMD) += $(call cc-option,-march=native)
18305 +        cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell)
18306 +        cflags-$(CONFIG_MCORE2) += $(call cc-option,-march=core2)
18307 +        cflags-$(CONFIG_MNEHALEM) += $(call cc-option,-march=nehalem)
18308 +        cflags-$(CONFIG_MWESTMERE) += $(call cc-option,-march=westmere)
18309 +        cflags-$(CONFIG_MSILVERMONT) += $(call cc-option,-march=silvermont)
18310 +        cflags-$(CONFIG_MGOLDMONT) += $(call cc-option,-march=goldmont)
18311 +        cflags-$(CONFIG_MGOLDMONTPLUS) += $(call cc-option,-march=goldmont-plus)
18312 +        cflags-$(CONFIG_MSANDYBRIDGE) += $(call cc-option,-march=sandybridge)
18313 +        cflags-$(CONFIG_MIVYBRIDGE) += $(call cc-option,-march=ivybridge)
18314 +        cflags-$(CONFIG_MHASWELL) += $(call cc-option,-march=haswell)
18315 +        cflags-$(CONFIG_MBROADWELL) += $(call cc-option,-march=broadwell)
18316 +        cflags-$(CONFIG_MSKYLAKE) += $(call cc-option,-march=skylake)
18317 +        cflags-$(CONFIG_MSKYLAKEX) += $(call cc-option,-march=skylake-avx512)
18318 +        cflags-$(CONFIG_MCANNONLAKE) += $(call cc-option,-march=cannonlake)
18319 +        cflags-$(CONFIG_MICELAKE) += $(call cc-option,-march=icelake-client)
18320 +        cflags-$(CONFIG_MCASCADELAKE) += $(call cc-option,-march=cascadelake)
18321 +        cflags-$(CONFIG_MCOOPERLAKE) += $(call cc-option,-march=cooperlake)
18322 +        cflags-$(CONFIG_MTIGERLAKE) += $(call cc-option,-march=tigerlake)
18323 +        cflags-$(CONFIG_MSAPPHIRERAPIDS) += $(call cc-option,-march=sapphirerapids)
18324 +        cflags-$(CONFIG_MROCKETLAKE) += $(call cc-option,-march=rocketlake)
18325 +        cflags-$(CONFIG_MALDERLAKE) += $(call cc-option,-march=alderlake)
18326 +        cflags-$(CONFIG_GENERIC_CPU2) += $(call cc-option,-march=x86-64-v2)
18327 +        cflags-$(CONFIG_GENERIC_CPU3) += $(call cc-option,-march=x86-64-v3)
18328 +        cflags-$(CONFIG_GENERIC_CPU4) += $(call cc-option,-march=x86-64-v4)
18329          cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
18330          KBUILD_CFLAGS += $(cflags-y)
18332 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
18333 index e0bc3988c3fa..6e5522aebbbd 100644
18334 --- a/arch/x86/boot/compressed/Makefile
18335 +++ b/arch/x86/boot/compressed/Makefile
18336 @@ -46,6 +46,7 @@ KBUILD_CFLAGS += -D__DISABLE_EXPORTS
18337  # Disable relocation relaxation in case the link is not PIE.
18338  KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no)
18339  KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h
18340 +KBUILD_CFLAGS += $(CLANG_FLAGS)
18342  # sev-es.c indirectly inludes inat-table.h which is generated during
18343  # compilation and stored in $(objtree). Add the directory to the includes so
18344 diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
18345 index aa561795efd1..a6dea4e8a082 100644
18346 --- a/arch/x86/boot/compressed/mem_encrypt.S
18347 +++ b/arch/x86/boot/compressed/mem_encrypt.S
18348 @@ -23,12 +23,6 @@ SYM_FUNC_START(get_sev_encryption_bit)
18349         push    %ecx
18350         push    %edx
18352 -       /* Check if running under a hypervisor */
18353 -       movl    $1, %eax
18354 -       cpuid
18355 -       bt      $31, %ecx               /* Check the hypervisor bit */
18356 -       jnc     .Lno_sev
18358         movl    $0x80000000, %eax       /* CPUID to check the highest leaf */
18359         cpuid
18360         cmpl    $0x8000001f, %eax       /* See if 0x8000001f is available */
18361 diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
18362 index 646da46e8d10..1dfb8af48a3c 100644
18363 --- a/arch/x86/crypto/poly1305_glue.c
18364 +++ b/arch/x86/crypto/poly1305_glue.c
18365 @@ -16,7 +16,7 @@
18366  #include <asm/simd.h>
18368  asmlinkage void poly1305_init_x86_64(void *ctx,
18369 -                                    const u8 key[POLY1305_KEY_SIZE]);
18370 +                                    const u8 key[POLY1305_BLOCK_SIZE]);
18371  asmlinkage void poly1305_blocks_x86_64(void *ctx, const u8 *inp,
18372                                        const size_t len, const u32 padbit);
18373  asmlinkage void poly1305_emit_x86_64(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
18374 @@ -81,7 +81,7 @@ static void convert_to_base2_64(void *ctx)
18375         state->is_base2_26 = 0;
18378 -static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_KEY_SIZE])
18379 +static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_BLOCK_SIZE])
18381         poly1305_init_x86_64(ctx, key);
18383 @@ -129,7 +129,7 @@ static void poly1305_simd_emit(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
18384                 poly1305_emit_avx(ctx, mac, nonce);
18387 -void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
18388 +void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
18390         poly1305_simd_init(&dctx->h, key);
18391         dctx->s[0] = get_unaligned_le32(&key[16]);
18392 diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
18393 index a1c9f496fca6..4d0111f44d79 100644
18394 --- a/arch/x86/entry/syscalls/syscall_32.tbl
18395 +++ b/arch/x86/entry/syscalls/syscall_32.tbl
18396 @@ -447,3 +447,7 @@
18397  440    i386    process_madvise         sys_process_madvise
18398  441    i386    epoll_pwait2            sys_epoll_pwait2                compat_sys_epoll_pwait2
18399  442    i386    mount_setattr           sys_mount_setattr
18400 +443    i386    futex_wait              sys_futex_wait
18401 +444    i386    futex_wake              sys_futex_wake
18402 +445    i386    futex_waitv             sys_futex_waitv                 compat_sys_futex_waitv
18403 +446    i386    futex_requeue           sys_futex_requeue               compat_sys_futex_requeue
18404 diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
18405 index 7bf01cbe582f..61c0b47365e3 100644
18406 --- a/arch/x86/entry/syscalls/syscall_64.tbl
18407 +++ b/arch/x86/entry/syscalls/syscall_64.tbl
18408 @@ -364,6 +364,10 @@
18409  440    common  process_madvise         sys_process_madvise
18410  441    common  epoll_pwait2            sys_epoll_pwait2
18411  442    common  mount_setattr           sys_mount_setattr
18412 +443    common  futex_wait              sys_futex_wait
18413 +444    common  futex_wake              sys_futex_wake
18414 +445    common  futex_waitv             sys_futex_waitv
18415 +446    common  futex_requeue           sys_futex_requeue
18418  # Due to a historical design error, certain syscalls are numbered differently
18419 diff --git a/arch/x86/entry/vdso/vdso2c.h b/arch/x86/entry/vdso/vdso2c.h
18420 index 1c7cfac7e64a..5264daa8859f 100644
18421 --- a/arch/x86/entry/vdso/vdso2c.h
18422 +++ b/arch/x86/entry/vdso/vdso2c.h
18423 @@ -35,7 +35,7 @@ static void BITSFUNC(extract)(const unsigned char *data, size_t data_len,
18424         if (offset + len > data_len)
18425                 fail("section to extract overruns input data");
18427 -       fprintf(outfile, "static const unsigned char %s[%lu] = {", name, len);
18428 +       fprintf(outfile, "static const unsigned char %s[%zu] = {", name, len);
18429         BITSFUNC(copy)(outfile, data + offset, len);
18430         fprintf(outfile, "\n};\n\n");
18432 diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
18433 index be50ef8572cc..6a98a7651621 100644
18434 --- a/arch/x86/events/amd/iommu.c
18435 +++ b/arch/x86/events/amd/iommu.c
18436 @@ -81,12 +81,12 @@ static struct attribute_group amd_iommu_events_group = {
18437  };
18439  struct amd_iommu_event_desc {
18440 -       struct kobj_attribute attr;
18441 +       struct device_attribute attr;
18442         const char *event;
18443  };
18445 -static ssize_t _iommu_event_show(struct kobject *kobj,
18446 -                               struct kobj_attribute *attr, char *buf)
18447 +static ssize_t _iommu_event_show(struct device *dev,
18448 +                               struct device_attribute *attr, char *buf)
18450         struct amd_iommu_event_desc *event =
18451                 container_of(attr, struct amd_iommu_event_desc, attr);
18452 diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
18453 index 7f014d450bc2..582c0ffb5e98 100644
18454 --- a/arch/x86/events/amd/uncore.c
18455 +++ b/arch/x86/events/amd/uncore.c
18456 @@ -275,14 +275,14 @@ static struct attribute_group amd_uncore_attr_group = {
18457  };
18459  #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format)                        \
18460 -static ssize_t __uncore_##_var##_show(struct kobject *kobj,            \
18461 -                               struct kobj_attribute *attr,            \
18462 +static ssize_t __uncore_##_var##_show(struct device *dev,              \
18463 +                               struct device_attribute *attr,          \
18464                                 char *page)                             \
18465  {                                                                      \
18466         BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);                     \
18467         return sprintf(page, _format "\n");                             \
18468  }                                                                      \
18469 -static struct kobj_attribute format_attr_##_var =                      \
18470 +static struct device_attribute format_attr_##_var =                    \
18471         __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
18473  DEFINE_UNCORE_FORMAT_ATTR(event12,     event,          "config:0-7,32-35");
18474 diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
18475 index 5eb3bdf36a41..06b0789d61b9 100644
18476 --- a/arch/x86/include/asm/idtentry.h
18477 +++ b/arch/x86/include/asm/idtentry.h
18478 @@ -588,6 +588,21 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_MC,  xenpv_exc_machine_check);
18479  #endif
18481  /* NMI */
18483 +#if defined(CONFIG_X86_64) && IS_ENABLED(CONFIG_KVM_INTEL)
18485 + * Special NOIST entry point for VMX which invokes this on the kernel
18486 + * stack. asm_exc_nmi() requires an IST to work correctly vs. the NMI
18487 + * 'executing' marker.
18488 + *
18489 + * On 32bit this just uses the regular NMI entry point because 32-bit does
18490 + * not have ISTs.
18491 + */
18492 +DECLARE_IDTENTRY(X86_TRAP_NMI,         exc_nmi_noist);
18493 +#else
18494 +#define asm_exc_nmi_noist              asm_exc_nmi
18495 +#endif
18497  DECLARE_IDTENTRY_NMI(X86_TRAP_NMI,     exc_nmi);
18498  #ifdef CONFIG_XEN_PV
18499  DECLARE_IDTENTRY_RAW(X86_TRAP_NMI,     xenpv_exc_nmi);
18500 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
18501 index 3768819693e5..eec2dcca2f39 100644
18502 --- a/arch/x86/include/asm/kvm_host.h
18503 +++ b/arch/x86/include/asm/kvm_host.h
18504 @@ -1753,6 +1753,7 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
18505                     unsigned long icr, int op_64_bit);
18507  void kvm_define_user_return_msr(unsigned index, u32 msr);
18508 +int kvm_probe_user_return_msr(u32 msr);
18509  int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask);
18511  u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
18512 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
18513 index a02c67291cfc..a6b5cfe1fc5a 100644
18514 --- a/arch/x86/include/asm/pgtable.h
18515 +++ b/arch/x86/include/asm/pgtable.h
18516 @@ -846,7 +846,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
18518  static inline int pmd_bad(pmd_t pmd)
18520 -       return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
18521 +       return ((pmd_flags(pmd) | _PAGE_ACCESSED) & ~_PAGE_USER) != _KERNPG_TABLE;
18524  static inline unsigned long pages_to_mb(unsigned long npg)
18525 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
18526 index f1b9ed5efaa9..908bcaea1361 100644
18527 --- a/arch/x86/include/asm/processor.h
18528 +++ b/arch/x86/include/asm/processor.h
18529 @@ -804,8 +804,10 @@ DECLARE_PER_CPU(u64, msr_misc_features_shadow);
18531  #ifdef CONFIG_CPU_SUP_AMD
18532  extern u32 amd_get_nodes_per_socket(void);
18533 +extern u32 amd_get_highest_perf(void);
18534  #else
18535  static inline u32 amd_get_nodes_per_socket(void)       { return 0; }
18536 +static inline u32 amd_get_highest_perf(void)           { return 0; }
18537  #endif
18539  static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
18540 diff --git a/arch/x86/include/asm/vermagic.h b/arch/x86/include/asm/vermagic.h
18541 index 75884d2cdec3..4e6a08d4c7e5 100644
18542 --- a/arch/x86/include/asm/vermagic.h
18543 +++ b/arch/x86/include/asm/vermagic.h
18544 @@ -17,6 +17,48 @@
18545  #define MODULE_PROC_FAMILY "586MMX "
18546  #elif defined CONFIG_MCORE2
18547  #define MODULE_PROC_FAMILY "CORE2 "
18548 +#elif defined CONFIG_MNATIVE_INTEL
18549 +#define MODULE_PROC_FAMILY "NATIVE_INTEL "
18550 +#elif defined CONFIG_MNATIVE_AMD
18551 +#define MODULE_PROC_FAMILY "NATIVE_AMD "
18552 +#elif defined CONFIG_MNEHALEM
18553 +#define MODULE_PROC_FAMILY "NEHALEM "
18554 +#elif defined CONFIG_MWESTMERE
18555 +#define MODULE_PROC_FAMILY "WESTMERE "
18556 +#elif defined CONFIG_MSILVERMONT
18557 +#define MODULE_PROC_FAMILY "SILVERMONT "
18558 +#elif defined CONFIG_MGOLDMONT
18559 +#define MODULE_PROC_FAMILY "GOLDMONT "
18560 +#elif defined CONFIG_MGOLDMONTPLUS
18561 +#define MODULE_PROC_FAMILY "GOLDMONTPLUS "
18562 +#elif defined CONFIG_MSANDYBRIDGE
18563 +#define MODULE_PROC_FAMILY "SANDYBRIDGE "
18564 +#elif defined CONFIG_MIVYBRIDGE
18565 +#define MODULE_PROC_FAMILY "IVYBRIDGE "
18566 +#elif defined CONFIG_MHASWELL
18567 +#define MODULE_PROC_FAMILY "HASWELL "
18568 +#elif defined CONFIG_MBROADWELL
18569 +#define MODULE_PROC_FAMILY "BROADWELL "
18570 +#elif defined CONFIG_MSKYLAKE
18571 +#define MODULE_PROC_FAMILY "SKYLAKE "
18572 +#elif defined CONFIG_MSKYLAKEX
18573 +#define MODULE_PROC_FAMILY "SKYLAKEX "
18574 +#elif defined CONFIG_MCANNONLAKE
18575 +#define MODULE_PROC_FAMILY "CANNONLAKE "
18576 +#elif defined CONFIG_MICELAKE
18577 +#define MODULE_PROC_FAMILY "ICELAKE "
18578 +#elif defined CONFIG_MCASCADELAKE
18579 +#define MODULE_PROC_FAMILY "CASCADELAKE "
18580 +#elif defined CONFIG_MCOOPERLAKE
18581 +#define MODULE_PROC_FAMILY "COOPERLAKE "
18582 +#elif defined CONFIG_MTIGERLAKE
18583 +#define MODULE_PROC_FAMILY "TIGERLAKE "
18584 +#elif defined CONFIG_MSAPPHIRERAPIDS
18585 +#define MODULE_PROC_FAMILY "SAPPHIRERAPIDS "
18586 +#elif defined CONFIG_ROCKETLAKE
18587 +#define MODULE_PROC_FAMILY "ROCKETLAKE "
18588 +#elif defined CONFIG_MALDERLAKE
18589 +#define MODULE_PROC_FAMILY "ALDERLAKE "
18590  #elif defined CONFIG_MATOM
18591  #define MODULE_PROC_FAMILY "ATOM "
18592  #elif defined CONFIG_M686
18593 @@ -35,6 +77,30 @@
18594  #define MODULE_PROC_FAMILY "K7 "
18595  #elif defined CONFIG_MK8
18596  #define MODULE_PROC_FAMILY "K8 "
18597 +#elif defined CONFIG_MK8SSE3
18598 +#define MODULE_PROC_FAMILY "K8SSE3 "
18599 +#elif defined CONFIG_MK10
18600 +#define MODULE_PROC_FAMILY "K10 "
18601 +#elif defined CONFIG_MBARCELONA
18602 +#define MODULE_PROC_FAMILY "BARCELONA "
18603 +#elif defined CONFIG_MBOBCAT
18604 +#define MODULE_PROC_FAMILY "BOBCAT "
18605 +#elif defined CONFIG_MBULLDOZER
18606 +#define MODULE_PROC_FAMILY "BULLDOZER "
18607 +#elif defined CONFIG_MPILEDRIVER
18608 +#define MODULE_PROC_FAMILY "PILEDRIVER "
18609 +#elif defined CONFIG_MSTEAMROLLER
18610 +#define MODULE_PROC_FAMILY "STEAMROLLER "
18611 +#elif defined CONFIG_MJAGUAR
18612 +#define MODULE_PROC_FAMILY "JAGUAR "
18613 +#elif defined CONFIG_MEXCAVATOR
18614 +#define MODULE_PROC_FAMILY "EXCAVATOR "
18615 +#elif defined CONFIG_MZEN
18616 +#define MODULE_PROC_FAMILY "ZEN "
18617 +#elif defined CONFIG_MZEN2
18618 +#define MODULE_PROC_FAMILY "ZEN2 "
18619 +#elif defined CONFIG_MZEN3
18620 +#define MODULE_PROC_FAMILY "ZEN3 "
18621  #elif defined CONFIG_MELAN
18622  #define MODULE_PROC_FAMILY "ELAN "
18623  #elif defined CONFIG_MCRUSOE
18624 diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
18625 index 52bc217ca8c3..c9ddd233e32f 100644
18626 --- a/arch/x86/kernel/apic/x2apic_uv_x.c
18627 +++ b/arch/x86/kernel/apic/x2apic_uv_x.c
18628 @@ -1671,6 +1671,9 @@ static __init int uv_system_init_hubless(void)
18629         if (rc < 0)
18630                 return rc;
18632 +       /* Set section block size for current node memory */
18633 +       set_block_size();
18635         /* Create user access node */
18636         if (rc >= 0)
18637                 uv_setup_proc_files(1);
18638 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
18639 index 347a956f71ca..eedb2b320946 100644
18640 --- a/arch/x86/kernel/cpu/amd.c
18641 +++ b/arch/x86/kernel/cpu/amd.c
18642 @@ -1170,3 +1170,19 @@ void set_dr_addr_mask(unsigned long mask, int dr)
18643                 break;
18644         }
18647 +u32 amd_get_highest_perf(void)
18649 +       struct cpuinfo_x86 *c = &boot_cpu_data;
18651 +       if (c->x86 == 0x17 && ((c->x86_model >= 0x30 && c->x86_model < 0x40) ||
18652 +                              (c->x86_model >= 0x70 && c->x86_model < 0x80)))
18653 +               return 166;
18655 +       if (c->x86 == 0x19 && ((c->x86_model >= 0x20 && c->x86_model < 0x30) ||
18656 +                              (c->x86_model >= 0x40 && c->x86_model < 0x70)))
18657 +               return 166;
18659 +       return 255;
18661 +EXPORT_SYMBOL_GPL(amd_get_highest_perf);
18662 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
18663 index ab640abe26b6..1e576cc831c1 100644
18664 --- a/arch/x86/kernel/cpu/common.c
18665 +++ b/arch/x86/kernel/cpu/common.c
18666 @@ -1850,7 +1850,7 @@ static inline void setup_getcpu(int cpu)
18667         unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
18668         struct desc_struct d = { };
18670 -       if (boot_cpu_has(X86_FEATURE_RDTSCP))
18671 +       if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID))
18672                 write_rdtscp_aux(cpudata);
18674         /* Store CPU and node number in limit. */
18675 diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
18676 index b935e1b5f115..6a6318e9590c 100644
18677 --- a/arch/x86/kernel/cpu/microcode/core.c
18678 +++ b/arch/x86/kernel/cpu/microcode/core.c
18679 @@ -629,16 +629,16 @@ static ssize_t reload_store(struct device *dev,
18680         if (val != 1)
18681                 return size;
18683 -       tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
18684 -       if (tmp_ret != UCODE_NEW)
18685 -               return size;
18687         get_online_cpus();
18689         ret = check_online_cpus();
18690         if (ret)
18691                 goto put;
18693 +       tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
18694 +       if (tmp_ret != UCODE_NEW)
18695 +               goto put;
18697         mutex_lock(&microcode_mutex);
18698         ret = microcode_reload_late();
18699         mutex_unlock(&microcode_mutex);
18700 diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
18701 index 22aad412f965..629c4994f165 100644
18702 --- a/arch/x86/kernel/e820.c
18703 +++ b/arch/x86/kernel/e820.c
18704 @@ -31,8 +31,8 @@
18705   *       - inform the user about the firmware's notion of memory layout
18706   *         via /sys/firmware/memmap
18707   *
18708 - *       - the hibernation code uses it to generate a kernel-independent MD5
18709 - *         fingerprint of the physical memory layout of a system.
18710 + *       - the hibernation code uses it to generate a kernel-independent CRC32
18711 + *         checksum of the physical memory layout of a system.
18712   *
18713   * - 'e820_table_kexec': a slightly modified (by the kernel) firmware version
18714   *   passed to us by the bootloader - the major difference between
18715 diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
18716 index df776cdca327..0bb9fe021bbe 100644
18717 --- a/arch/x86/kernel/kprobes/core.c
18718 +++ b/arch/x86/kernel/kprobes/core.c
18719 @@ -139,6 +139,8 @@ NOKPROBE_SYMBOL(synthesize_relcall);
18720  int can_boost(struct insn *insn, void *addr)
18722         kprobe_opcode_t opcode;
18723 +       insn_byte_t prefix;
18724 +       int i;
18726         if (search_exception_tables((unsigned long)addr))
18727                 return 0;       /* Page fault may occur on this address. */
18728 @@ -151,9 +153,14 @@ int can_boost(struct insn *insn, void *addr)
18729         if (insn->opcode.nbytes != 1)
18730                 return 0;
18732 -       /* Can't boost Address-size override prefix */
18733 -       if (unlikely(inat_is_address_size_prefix(insn->attr)))
18734 -               return 0;
18735 +       for_each_insn_prefix(insn, i, prefix) {
18736 +               insn_attr_t attr;
18738 +               attr = inat_get_opcode_attribute(prefix);
18739 +               /* Can't boost Address-size override prefix and CS override prefix */
18740 +               if (prefix == 0x2e || inat_is_address_size_prefix(attr))
18741 +                       return 0;
18742 +       }
18744         opcode = insn->opcode.bytes[0];
18746 @@ -178,8 +185,8 @@ int can_boost(struct insn *insn, void *addr)
18747                 /* clear and set flags are boostable */
18748                 return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
18749         default:
18750 -               /* CS override prefix and call are not boostable */
18751 -               return (opcode != 0x2e && opcode != 0x9a);
18752 +               /* call is not boostable */
18753 +               return opcode != 0x9a;
18754         }
18757 @@ -448,7 +455,11 @@ static void set_resume_flags(struct kprobe *p, struct insn *insn)
18758                 break;
18759  #endif
18760         case 0xff:
18761 -               opcode = insn->opcode.bytes[1];
18762 +               /*
18763 +                * Since the 0xff is an extended group opcode, the instruction
18764 +                * is determined by the MOD/RM byte.
18765 +                */
18766 +               opcode = insn->modrm.bytes[0];
18767                 if ((opcode & 0x30) == 0x10) {
18768                         /*
18769                          * call absolute, indirect
18770 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
18771 index bf250a339655..2ef961cf4cfc 100644
18772 --- a/arch/x86/kernel/nmi.c
18773 +++ b/arch/x86/kernel/nmi.c
18774 @@ -524,6 +524,16 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
18775                 mds_user_clear_cpu_buffers();
18778 +#if defined(CONFIG_X86_64) && IS_ENABLED(CONFIG_KVM_INTEL)
18779 +DEFINE_IDTENTRY_RAW(exc_nmi_noist)
18781 +       exc_nmi(regs);
18783 +#endif
18784 +#if IS_MODULE(CONFIG_KVM_INTEL)
18785 +EXPORT_SYMBOL_GPL(asm_exc_nmi_noist);
18786 +#endif
18788  void stop_nmi(void)
18790         ignore_nmis++;
18791 diff --git a/arch/x86/kernel/sev-es-shared.c b/arch/x86/kernel/sev-es-shared.c
18792 index cdc04d091242..387b71669818 100644
18793 --- a/arch/x86/kernel/sev-es-shared.c
18794 +++ b/arch/x86/kernel/sev-es-shared.c
18795 @@ -186,7 +186,6 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
18796          * make it accessible to the hypervisor.
18797          *
18798          * In particular, check for:
18799 -        *      - Hypervisor CPUID bit
18800          *      - Availability of CPUID leaf 0x8000001f
18801          *      - SEV CPUID bit.
18802          *
18803 @@ -194,10 +193,7 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
18804          * can't be checked here.
18805          */
18807 -       if ((fn == 1 && !(regs->cx & BIT(31))))
18808 -               /* Hypervisor bit */
18809 -               goto fail;
18810 -       else if (fn == 0x80000000 && (regs->ax < 0x8000001f))
18811 +       if (fn == 0x80000000 && (regs->ax < 0x8000001f))
18812                 /* SEV leaf check */
18813                 goto fail;
18814         else if ((fn == 0x8000001f && !(regs->ax & BIT(1))))
18815 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
18816 index 16703c35a944..363b36bbd791 100644
18817 --- a/arch/x86/kernel/smpboot.c
18818 +++ b/arch/x86/kernel/smpboot.c
18819 @@ -458,29 +458,52 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
18820         return false;
18823 +static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
18825 +       if (c->phys_proc_id == o->phys_proc_id &&
18826 +           c->cpu_die_id == o->cpu_die_id)
18827 +               return true;
18828 +       return false;
18831  /*
18832 - * Define snc_cpu[] for SNC (Sub-NUMA Cluster) CPUs.
18833 + * Unlike the other levels, we do not enforce keeping a
18834 + * multicore group inside a NUMA node.  If this happens, we will
18835 + * discard the MC level of the topology later.
18836 + */
18837 +static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
18839 +       if (c->phys_proc_id == o->phys_proc_id)
18840 +               return true;
18841 +       return false;
18845 + * Define intel_cod_cpu[] for Intel COD (Cluster-on-Die) CPUs.
18846   *
18847 - * These are Intel CPUs that enumerate an LLC that is shared by
18848 - * multiple NUMA nodes. The LLC on these systems is shared for
18849 - * off-package data access but private to the NUMA node (half
18850 - * of the package) for on-package access.
18851 + * Any Intel CPU that has multiple nodes per package and does not
18852 + * match intel_cod_cpu[] has the SNC (Sub-NUMA Cluster) topology.
18853   *
18854 - * CPUID (the source of the information about the LLC) can only
18855 - * enumerate the cache as being shared *or* unshared, but not
18856 - * this particular configuration. The CPU in this case enumerates
18857 - * the cache to be shared across the entire package (spanning both
18858 - * NUMA nodes).
18859 + * When in SNC mode, these CPUs enumerate an LLC that is shared
18860 + * by multiple NUMA nodes. The LLC is shared for off-package data
18861 + * access but private to the NUMA node (half of the package) for
18862 + * on-package access. CPUID (the source of the information about
18863 + * the LLC) can only enumerate the cache as shared or unshared,
18864 + * but not this particular configuration.
18865   */
18867 -static const struct x86_cpu_id snc_cpu[] = {
18868 -       X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
18869 +static const struct x86_cpu_id intel_cod_cpu[] = {
18870 +       X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, 0),       /* COD */
18871 +       X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, 0),     /* COD */
18872 +       X86_MATCH_INTEL_FAM6_MODEL(ANY, 1),             /* SNC */
18873         {}
18874  };
18876  static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
18878 +       const struct x86_cpu_id *id = x86_match_cpu(intel_cod_cpu);
18879         int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
18880 +       bool intel_snc = id && id->driver_data;
18882         /* Do not match if we do not have a valid APICID for cpu: */
18883         if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID)
18884 @@ -495,32 +518,12 @@ static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
18885          * means 'c' does not share the LLC of 'o'. This will be
18886          * reflected to userspace.
18887          */
18888 -       if (!topology_same_node(c, o) && x86_match_cpu(snc_cpu))
18889 +       if (match_pkg(c, o) && !topology_same_node(c, o) && intel_snc)
18890                 return false;
18892         return topology_sane(c, o, "llc");
18896 - * Unlike the other levels, we do not enforce keeping a
18897 - * multicore group inside a NUMA node.  If this happens, we will
18898 - * discard the MC level of the topology later.
18899 - */
18900 -static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
18902 -       if (c->phys_proc_id == o->phys_proc_id)
18903 -               return true;
18904 -       return false;
18907 -static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
18909 -       if ((c->phys_proc_id == o->phys_proc_id) &&
18910 -               (c->cpu_die_id == o->cpu_die_id))
18911 -               return true;
18912 -       return false;
18916  #if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC)
18917  static inline int x86_sched_itmt_flags(void)
18918 @@ -592,14 +595,23 @@ void set_cpu_sibling_map(int cpu)
18919         for_each_cpu(i, cpu_sibling_setup_mask) {
18920                 o = &cpu_data(i);
18922 +               if (match_pkg(c, o) && !topology_same_node(c, o))
18923 +                       x86_has_numa_in_package = true;
18925                 if ((i == cpu) || (has_smt && match_smt(c, o)))
18926                         link_mask(topology_sibling_cpumask, cpu, i);
18928                 if ((i == cpu) || (has_mp && match_llc(c, o)))
18929                         link_mask(cpu_llc_shared_mask, cpu, i);
18931 +               if ((i == cpu) || (has_mp && match_die(c, o)))
18932 +                       link_mask(topology_die_cpumask, cpu, i);
18933         }
18935 +       threads = cpumask_weight(topology_sibling_cpumask(cpu));
18936 +       if (threads > __max_smt_threads)
18937 +               __max_smt_threads = threads;
18939         /*
18940          * This needs a separate iteration over the cpus because we rely on all
18941          * topology_sibling_cpumask links to be set-up.
18942 @@ -613,8 +625,7 @@ void set_cpu_sibling_map(int cpu)
18943                         /*
18944                          *  Does this new cpu bringup a new core?
18945                          */
18946 -                       if (cpumask_weight(
18947 -                           topology_sibling_cpumask(cpu)) == 1) {
18948 +                       if (threads == 1) {
18949                                 /*
18950                                  * for each core in package, increment
18951                                  * the booted_cores for this new cpu
18952 @@ -631,16 +642,7 @@ void set_cpu_sibling_map(int cpu)
18953                         } else if (i != cpu && !c->booted_cores)
18954                                 c->booted_cores = cpu_data(i).booted_cores;
18955                 }
18956 -               if (match_pkg(c, o) && !topology_same_node(c, o))
18957 -                       x86_has_numa_in_package = true;
18959 -               if ((i == cpu) || (has_mp && match_die(c, o)))
18960 -                       link_mask(topology_die_cpumask, cpu, i);
18961         }
18963 -       threads = cpumask_weight(topology_sibling_cpumask(cpu));
18964 -       if (threads > __max_smt_threads)
18965 -               __max_smt_threads = threads;
18968  /* maps the cpu to the sched domain representing multi-core */
18969 @@ -2044,7 +2046,7 @@ static bool amd_set_max_freq_ratio(void)
18970                 return false;
18971         }
18973 -       highest_perf = perf_caps.highest_perf;
18974 +       highest_perf = amd_get_highest_perf();
18975         nominal_perf = perf_caps.nominal_perf;
18977         if (!highest_perf || !nominal_perf) {
18978 diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
18979 index 6bd2f8b830e4..62f795352c02 100644
18980 --- a/arch/x86/kvm/cpuid.c
18981 +++ b/arch/x86/kvm/cpuid.c
18982 @@ -589,7 +589,8 @@ static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
18983         case 7:
18984                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
18985                 entry->eax = 0;
18986 -               entry->ecx = F(RDPID);
18987 +               if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
18988 +                       entry->ecx = F(RDPID);
18989                 ++array->nent;
18990         default:
18991                 break;
18992 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
18993 index f7970ba6219f..8fc71e70857d 100644
18994 --- a/arch/x86/kvm/emulate.c
18995 +++ b/arch/x86/kvm/emulate.c
18996 @@ -4220,7 +4220,7 @@ static bool valid_cr(int nr)
18997         }
19000 -static int check_cr_read(struct x86_emulate_ctxt *ctxt)
19001 +static int check_cr_access(struct x86_emulate_ctxt *ctxt)
19003         if (!valid_cr(ctxt->modrm_reg))
19004                 return emulate_ud(ctxt);
19005 @@ -4228,80 +4228,6 @@ static int check_cr_read(struct x86_emulate_ctxt *ctxt)
19006         return X86EMUL_CONTINUE;
19009 -static int check_cr_write(struct x86_emulate_ctxt *ctxt)
19011 -       u64 new_val = ctxt->src.val64;
19012 -       int cr = ctxt->modrm_reg;
19013 -       u64 efer = 0;
19015 -       static u64 cr_reserved_bits[] = {
19016 -               0xffffffff00000000ULL,
19017 -               0, 0, 0, /* CR3 checked later */
19018 -               CR4_RESERVED_BITS,
19019 -               0, 0, 0,
19020 -               CR8_RESERVED_BITS,
19021 -       };
19023 -       if (!valid_cr(cr))
19024 -               return emulate_ud(ctxt);
19026 -       if (new_val & cr_reserved_bits[cr])
19027 -               return emulate_gp(ctxt, 0);
19029 -       switch (cr) {
19030 -       case 0: {
19031 -               u64 cr4;
19032 -               if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
19033 -                   ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
19034 -                       return emulate_gp(ctxt, 0);
19036 -               cr4 = ctxt->ops->get_cr(ctxt, 4);
19037 -               ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
19039 -               if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
19040 -                   !(cr4 & X86_CR4_PAE))
19041 -                       return emulate_gp(ctxt, 0);
19043 -               break;
19044 -               }
19045 -       case 3: {
19046 -               u64 rsvd = 0;
19048 -               ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
19049 -               if (efer & EFER_LMA) {
19050 -                       u64 maxphyaddr;
19051 -                       u32 eax, ebx, ecx, edx;
19053 -                       eax = 0x80000008;
19054 -                       ecx = 0;
19055 -                       if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
19056 -                                                &edx, true))
19057 -                               maxphyaddr = eax & 0xff;
19058 -                       else
19059 -                               maxphyaddr = 36;
19060 -                       rsvd = rsvd_bits(maxphyaddr, 63);
19061 -                       if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
19062 -                               rsvd &= ~X86_CR3_PCID_NOFLUSH;
19063 -               }
19065 -               if (new_val & rsvd)
19066 -                       return emulate_gp(ctxt, 0);
19068 -               break;
19069 -               }
19070 -       case 4: {
19071 -               ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
19073 -               if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
19074 -                       return emulate_gp(ctxt, 0);
19076 -               break;
19077 -               }
19078 -       }
19080 -       return X86EMUL_CONTINUE;
19083  static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
19085         unsigned long dr7;
19086 @@ -4576,7 +4502,7 @@ static const struct opcode group8[] = {
19087   * from the register case of group9.
19088   */
19089  static const struct gprefix pfx_0f_c7_7 = {
19090 -       N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp),
19091 +       N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdpid),
19092  };
19095 @@ -4841,10 +4767,10 @@ static const struct opcode twobyte_table[256] = {
19096         D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
19097         D(ImplicitOps | ModRM | SrcMem | NoAccess), /* NOP + 7 * reserved NOP */
19098         /* 0x20 - 0x2F */
19099 -       DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
19100 +       DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_access),
19101         DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
19102         IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
19103 -                                               check_cr_write),
19104 +                                               check_cr_access),
19105         IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
19106                                                 check_dr_write),
19107         N, N, N, N,
19108 diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
19109 index 0d359115429a..f016838faedd 100644
19110 --- a/arch/x86/kvm/kvm_emulate.h
19111 +++ b/arch/x86/kvm/kvm_emulate.h
19112 @@ -468,6 +468,7 @@ enum x86_intercept {
19113         x86_intercept_clgi,
19114         x86_intercept_skinit,
19115         x86_intercept_rdtscp,
19116 +       x86_intercept_rdpid,
19117         x86_intercept_icebp,
19118         x86_intercept_wbinvd,
19119         x86_intercept_monitor,
19120 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
19121 index cc369b9ad8f1..fa023f3feb25 100644
19122 --- a/arch/x86/kvm/lapic.c
19123 +++ b/arch/x86/kvm/lapic.c
19124 @@ -296,6 +296,10 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
19126                 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
19127         }
19129 +       /* Check if there are APF page ready requests pending */
19130 +       if (enabled)
19131 +               kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
19134  static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
19135 @@ -1909,8 +1913,8 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
19136         if (!apic->lapic_timer.hv_timer_in_use)
19137                 goto out;
19138         WARN_ON(rcuwait_active(&vcpu->wait));
19139 -       cancel_hv_timer(apic);
19140         apic_timer_expired(apic, false);
19141 +       cancel_hv_timer(apic);
19143         if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
19144                 advance_periodic_target_expiration(apic);
19145 @@ -2261,6 +2265,8 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
19146                 if (value & MSR_IA32_APICBASE_ENABLE) {
19147                         kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
19148                         static_branch_slow_dec_deferred(&apic_hw_disabled);
19149 +                       /* Check if there are APF page ready requests pending */
19150 +                       kvm_make_request(KVM_REQ_APF_READY, vcpu);
19151                 } else {
19152                         static_branch_inc(&apic_hw_disabled.key);
19153                         atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
19154 diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
19155 index 951dae4e7175..cd0faa187674 100644
19156 --- a/arch/x86/kvm/mmu/mmu.c
19157 +++ b/arch/x86/kvm/mmu/mmu.c
19158 @@ -3193,14 +3193,14 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
19159                 if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
19160                     (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
19161                         mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list);
19162 -               } else {
19163 +               } else if (mmu->pae_root) {
19164                         for (i = 0; i < 4; ++i)
19165                                 if (mmu->pae_root[i] != 0)
19166                                         mmu_free_root_page(kvm,
19167                                                            &mmu->pae_root[i],
19168                                                            &invalid_list);
19169 -                       mmu->root_hpa = INVALID_PAGE;
19170                 }
19171 +               mmu->root_hpa = INVALID_PAGE;
19172                 mmu->root_pgd = 0;
19173         }
19175 @@ -3312,9 +3312,23 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
19176          * the shadow page table may be a PAE or a long mode page table.
19177          */
19178         pm_mask = PT_PRESENT_MASK;
19179 -       if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL)
19180 +       if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
19181                 pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
19183 +               /*
19184 +                * Allocate the page for the PDPTEs when shadowing 32-bit NPT
19185 +                * with 64-bit only when needed.  Unlike 32-bit NPT, it doesn't
19186 +                * need to be in low mem.  See also lm_root below.
19187 +                */
19188 +               if (!vcpu->arch.mmu->pae_root) {
19189 +                       WARN_ON_ONCE(!tdp_enabled);
19191 +                       vcpu->arch.mmu->pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
19192 +                       if (!vcpu->arch.mmu->pae_root)
19193 +                               return -ENOMEM;
19194 +               }
19195 +       }
19197         for (i = 0; i < 4; ++i) {
19198                 MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i]));
19199                 if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) {
19200 @@ -3337,21 +3351,19 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
19201         vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
19203         /*
19204 -        * If we shadow a 32 bit page table with a long mode page
19205 -        * table we enter this path.
19206 +        * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
19207 +        * tables are allocated and initialized at MMU creation as there is no
19208 +        * equivalent level in the guest's NPT to shadow.  Allocate the tables
19209 +        * on demand, as running a 32-bit L1 VMM is very rare.  The PDP is
19210 +        * handled above (to share logic with PAE), deal with the PML4 here.
19211          */
19212         if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
19213                 if (vcpu->arch.mmu->lm_root == NULL) {
19214 -                       /*
19215 -                        * The additional page necessary for this is only
19216 -                        * allocated on demand.
19217 -                        */
19219                         u64 *lm_root;
19221                         lm_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT);
19222 -                       if (lm_root == NULL)
19223 -                               return 1;
19224 +                       if (!lm_root)
19225 +                               return -ENOMEM;
19227                         lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask;
19229 @@ -3653,6 +3665,14 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
19230         struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
19231         bool async;
19233 +       /*
19234 +        * Retry the page fault if the gfn hit a memslot that is being deleted
19235 +        * or moved.  This ensures any existing SPTEs for the old memslot will
19236 +        * be zapped before KVM inserts a new MMIO SPTE for the gfn.
19237 +        */
19238 +       if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
19239 +               return true;
19241         /* Don't expose private memslots to L2. */
19242         if (is_guest_mode(vcpu) && !kvm_is_visible_memslot(slot)) {
19243                 *pfn = KVM_PFN_NOSLOT;
19244 @@ -4615,12 +4635,17 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
19245         struct kvm_mmu *context = &vcpu->arch.guest_mmu;
19246         union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu);
19248 -       context->shadow_root_level = new_role.base.level;
19250         __kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base, false, false);
19252 -       if (new_role.as_u64 != context->mmu_role.as_u64)
19253 +       if (new_role.as_u64 != context->mmu_role.as_u64) {
19254                 shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
19256 +               /*
19257 +                * Override the level set by the common init helper, nested TDP
19258 +                * always uses the host's TDP configuration.
19259 +                */
19260 +               context->shadow_root_level = new_role.base.level;
19261 +       }
19263  EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
19265 @@ -5240,9 +5265,11 @@ static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
19266          * while the PDP table is a per-vCPU construct that's allocated at MMU
19267          * creation.  When emulating 32-bit mode, cr3 is only 32 bits even on
19268          * x86_64.  Therefore we need to allocate the PDP table in the first
19269 -        * 4GB of memory, which happens to fit the DMA32 zone.  Except for
19270 -        * SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can
19271 -        * skip allocating the PDP table.
19272 +        * 4GB of memory, which happens to fit the DMA32 zone.  TDP paging
19273 +        * generally doesn't use PAE paging and can skip allocating the PDP
19274 +        * table.  The main exception, handled here, is SVM's 32-bit NPT.  The
19275 +        * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
19276 +        * KVM; that horror is handled on-demand by mmu_alloc_shadow_roots().
19277          */
19278         if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
19279                 return 0;
19280 diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
19281 index 874ea309279f..dbc6214d69de 100644
19282 --- a/arch/x86/kvm/svm/sev.c
19283 +++ b/arch/x86/kvm/svm/sev.c
19284 @@ -87,7 +87,7 @@ static bool __sev_recycle_asids(int min_asid, int max_asid)
19285         return true;
19288 -static int sev_asid_new(struct kvm_sev_info *sev)
19289 +static int sev_asid_new(bool es_active)
19291         int pos, min_asid, max_asid;
19292         bool retry = true;
19293 @@ -98,8 +98,8 @@ static int sev_asid_new(struct kvm_sev_info *sev)
19294          * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
19295          * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
19296          */
19297 -       min_asid = sev->es_active ? 0 : min_sev_asid - 1;
19298 -       max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
19299 +       min_asid = es_active ? 0 : min_sev_asid - 1;
19300 +       max_asid = es_active ? min_sev_asid - 1 : max_sev_asid;
19301  again:
19302         pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_asid);
19303         if (pos >= max_asid) {
19304 @@ -179,13 +179,17 @@ static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
19305  static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
19307         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
19308 +       bool es_active = argp->id == KVM_SEV_ES_INIT;
19309         int asid, ret;
19311 +       if (kvm->created_vcpus)
19312 +               return -EINVAL;
19314         ret = -EBUSY;
19315         if (unlikely(sev->active))
19316                 return ret;
19318 -       asid = sev_asid_new(sev);
19319 +       asid = sev_asid_new(es_active);
19320         if (asid < 0)
19321                 return ret;
19323 @@ -194,6 +198,7 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
19324                 goto e_free;
19326         sev->active = true;
19327 +       sev->es_active = es_active;
19328         sev->asid = asid;
19329         INIT_LIST_HEAD(&sev->regions_list);
19331 @@ -204,16 +209,6 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
19332         return ret;
19335 -static int sev_es_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
19337 -       if (!sev_es)
19338 -               return -ENOTTY;
19340 -       to_kvm_svm(kvm)->sev_info.es_active = true;
19342 -       return sev_guest_init(kvm, argp);
19345  static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
19347         struct sev_data_activate *data;
19348 @@ -564,6 +559,7 @@ static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
19350         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
19351         struct sev_data_launch_update_vmsa *vmsa;
19352 +       struct kvm_vcpu *vcpu;
19353         int i, ret;
19355         if (!sev_es_guest(kvm))
19356 @@ -573,8 +569,8 @@ static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
19357         if (!vmsa)
19358                 return -ENOMEM;
19360 -       for (i = 0; i < kvm->created_vcpus; i++) {
19361 -               struct vcpu_svm *svm = to_svm(kvm->vcpus[i]);
19362 +       kvm_for_each_vcpu(i, vcpu, kvm) {
19363 +               struct vcpu_svm *svm = to_svm(vcpu);
19365                 /* Perform some pre-encryption checks against the VMSA */
19366                 ret = sev_es_sync_vmsa(svm);
19367 @@ -1127,12 +1123,15 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
19368         mutex_lock(&kvm->lock);
19370         switch (sev_cmd.id) {
19371 +       case KVM_SEV_ES_INIT:
19372 +               if (!sev_es) {
19373 +                       r = -ENOTTY;
19374 +                       goto out;
19375 +               }
19376 +               fallthrough;
19377         case KVM_SEV_INIT:
19378                 r = sev_guest_init(kvm, &sev_cmd);
19379                 break;
19380 -       case KVM_SEV_ES_INIT:
19381 -               r = sev_es_guest_init(kvm, &sev_cmd);
19382 -               break;
19383         case KVM_SEV_LAUNCH_START:
19384                 r = sev_launch_start(kvm, &sev_cmd);
19385                 break;
19386 @@ -1349,8 +1348,11 @@ void __init sev_hardware_setup(void)
19387                 goto out;
19389         sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
19390 -       if (!sev_reclaim_asid_bitmap)
19391 +       if (!sev_reclaim_asid_bitmap) {
19392 +               bitmap_free(sev_asid_bitmap);
19393 +               sev_asid_bitmap = NULL;
19394                 goto out;
19395 +       }
19397         pr_info("SEV supported: %u ASIDs\n", max_sev_asid - min_sev_asid + 1);
19398         sev_supported = true;
19399 @@ -1666,7 +1668,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
19400         return -EINVAL;
19403 -static void pre_sev_es_run(struct vcpu_svm *svm)
19404 +void sev_es_unmap_ghcb(struct vcpu_svm *svm)
19406         if (!svm->ghcb)
19407                 return;
19408 @@ -1702,9 +1704,6 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu)
19409         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
19410         int asid = sev_get_asid(svm->vcpu.kvm);
19412 -       /* Perform any SEV-ES pre-run actions */
19413 -       pre_sev_es_run(svm);
19415         /* Assign the asid allocated with this SEV guest */
19416         svm->asid = asid;
19418 @@ -2104,5 +2103,8 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
19419          * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
19420          * non-zero value.
19421          */
19422 +       if (!svm->ghcb)
19423 +               return;
19425         ghcb_set_sw_exit_info_2(svm->ghcb, 1);
19427 diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
19428 index 58a45bb139f8..48ee3deab64b 100644
19429 --- a/arch/x86/kvm/svm/svm.c
19430 +++ b/arch/x86/kvm/svm/svm.c
19431 @@ -564,9 +564,8 @@ static int svm_cpu_init(int cpu)
19432         clear_page(page_address(sd->save_area));
19434         if (svm_sev_enabled()) {
19435 -               sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1,
19436 -                                             sizeof(void *),
19437 -                                             GFP_KERNEL);
19438 +               sd->sev_vmcbs = kcalloc(max_sev_asid + 1, sizeof(void *),
19439 +                                       GFP_KERNEL);
19440                 if (!sd->sev_vmcbs)
19441                         goto free_save_area;
19442         }
19443 @@ -969,21 +968,6 @@ static __init int svm_hardware_setup(void)
19444                 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
19445         }
19447 -       if (IS_ENABLED(CONFIG_KVM_AMD_SEV) && sev) {
19448 -               sev_hardware_setup();
19449 -       } else {
19450 -               sev = false;
19451 -               sev_es = false;
19452 -       }
19454 -       svm_adjust_mmio_mask();
19456 -       for_each_possible_cpu(cpu) {
19457 -               r = svm_cpu_init(cpu);
19458 -               if (r)
19459 -                       goto err;
19460 -       }
19462         /*
19463          * KVM's MMU doesn't support using 2-level paging for itself, and thus
19464          * NPT isn't supported if the host is using 2-level paging since host
19465 @@ -998,6 +982,21 @@ static __init int svm_hardware_setup(void)
19466         kvm_configure_mmu(npt_enabled, get_max_npt_level(), PG_LEVEL_1G);
19467         pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis");
19469 +       if (IS_ENABLED(CONFIG_KVM_AMD_SEV) && sev && npt_enabled) {
19470 +               sev_hardware_setup();
19471 +       } else {
19472 +               sev = false;
19473 +               sev_es = false;
19474 +       }
19476 +       svm_adjust_mmio_mask();
19478 +       for_each_possible_cpu(cpu) {
19479 +               r = svm_cpu_init(cpu);
19480 +               if (r)
19481 +                       goto err;
19482 +       }
19484         if (nrips) {
19485                 if (!boot_cpu_has(X86_FEATURE_NRIPS))
19486                         nrips = false;
19487 @@ -1417,6 +1416,9 @@ static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
19488         struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
19489         unsigned int i;
19491 +       if (sev_es_guest(vcpu->kvm))
19492 +               sev_es_unmap_ghcb(svm);
19494         if (svm->guest_state_loaded)
19495                 return;
19497 @@ -1898,7 +1900,7 @@ static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
19499  static int pf_interception(struct vcpu_svm *svm)
19501 -       u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
19502 +       u64 fault_address = svm->vmcb->control.exit_info_2;
19503         u64 error_code = svm->vmcb->control.exit_info_1;
19505         return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
19506 @@ -2738,6 +2740,10 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
19507         case MSR_TSC_AUX:
19508                 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
19509                         return 1;
19510 +               if (!msr_info->host_initiated &&
19511 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
19512 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
19513 +                       return 1;
19514                 msr_info->data = svm->tsc_aux;
19515                 break;
19516         /*
19517 @@ -2809,7 +2815,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
19518  static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
19520         struct vcpu_svm *svm = to_svm(vcpu);
19521 -       if (!sev_es_guest(svm->vcpu.kvm) || !err)
19522 +       if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->ghcb))
19523                 return kvm_complete_insn_gp(&svm->vcpu, err);
19525         ghcb_set_sw_exit_info_1(svm->ghcb, 1);
19526 @@ -2946,6 +2952,11 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
19527                 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
19528                         return 1;
19530 +               if (!msr->host_initiated &&
19531 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
19532 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
19533 +                       return 1;
19535                 /*
19536                  * This is rare, so we update the MSR here instead of using
19537                  * direct_access_msrs.  Doing that would require a rdmsr in
19538 diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
19539 index 39e071fdab0c..98da0b91f273 100644
19540 --- a/arch/x86/kvm/svm/svm.h
19541 +++ b/arch/x86/kvm/svm/svm.h
19542 @@ -571,6 +571,7 @@ void sev_es_init_vmcb(struct vcpu_svm *svm);
19543  void sev_es_create_vcpu(struct vcpu_svm *svm);
19544  void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
19545  void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu);
19546 +void sev_es_unmap_ghcb(struct vcpu_svm *svm);
19548  /* vmenter.S */
19550 diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
19551 index bcca0b80e0d0..4ba2a43e188b 100644
19552 --- a/arch/x86/kvm/vmx/nested.c
19553 +++ b/arch/x86/kvm/vmx/nested.c
19554 @@ -619,6 +619,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
19555         }
19557         /* KVM unconditionally exposes the FS/GS base MSRs to L1. */
19558 +#ifdef CONFIG_X86_64
19559         nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
19560                                              MSR_FS_BASE, MSR_TYPE_RW);
19562 @@ -627,6 +628,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
19564         nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
19565                                              MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
19566 +#endif
19568         /*
19569          * Checking the L0->L1 bitmap is trying to verify two things:
19570 @@ -3098,15 +3100,8 @@ static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
19571                         nested_vmx_handle_enlightened_vmptrld(vcpu, false);
19573                 if (evmptrld_status == EVMPTRLD_VMFAIL ||
19574 -                   evmptrld_status == EVMPTRLD_ERROR) {
19575 -                       pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
19576 -                                            __func__);
19577 -                       vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
19578 -                       vcpu->run->internal.suberror =
19579 -                               KVM_INTERNAL_ERROR_EMULATION;
19580 -                       vcpu->run->internal.ndata = 0;
19581 +                   evmptrld_status == EVMPTRLD_ERROR)
19582                         return false;
19583 -               }
19584         }
19586         return true;
19587 @@ -3194,8 +3189,16 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
19589  static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
19591 -       if (!nested_get_evmcs_page(vcpu))
19592 +       if (!nested_get_evmcs_page(vcpu)) {
19593 +               pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
19594 +                                    __func__);
19595 +               vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
19596 +               vcpu->run->internal.suberror =
19597 +                       KVM_INTERNAL_ERROR_EMULATION;
19598 +               vcpu->run->internal.ndata = 0;
19600                 return false;
19601 +       }
19603         if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
19604                 return false;
19605 @@ -4422,7 +4425,15 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
19606         /* trying to cancel vmlaunch/vmresume is a bug */
19607         WARN_ON_ONCE(vmx->nested.nested_run_pending);
19609 -       kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
19610 +       if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
19611 +               /*
19612 +                * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map
19613 +                * Enlightened VMCS after migration and we still need to
19614 +                * do that when something is forcing L2->L1 exit prior to
19615 +                * the first L2 run.
19616 +                */
19617 +               (void)nested_get_evmcs_page(vcpu);
19618 +       }
19620         /* Service the TLB flush request for L2 before switching to L1. */
19621         if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
19622 @@ -4601,9 +4612,9 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
19623         else if (addr_size == 0)
19624                 off = (gva_t)sign_extend64(off, 15);
19625         if (base_is_valid)
19626 -               off += kvm_register_read(vcpu, base_reg);
19627 +               off += kvm_register_readl(vcpu, base_reg);
19628         if (index_is_valid)
19629 -               off += kvm_register_read(vcpu, index_reg) << scaling;
19630 +               off += kvm_register_readl(vcpu, index_reg) << scaling;
19631         vmx_get_segment(vcpu, &s, seg_reg);
19633         /*
19634 @@ -5479,16 +5490,11 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
19635                 if (!nested_vmx_check_eptp(vcpu, new_eptp))
19636                         return 1;
19638 -               kvm_mmu_unload(vcpu);
19639                 mmu->ept_ad = accessed_dirty;
19640                 mmu->mmu_role.base.ad_disabled = !accessed_dirty;
19641                 vmcs12->ept_pointer = new_eptp;
19642 -               /*
19643 -                * TODO: Check what's the correct approach in case
19644 -                * mmu reload fails. Currently, we just let the next
19645 -                * reload potentially fail
19646 -                */
19647 -               kvm_mmu_reload(vcpu);
19649 +               kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
19650         }
19652         return 0;
19653 @@ -5717,7 +5723,7 @@ static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
19655         /* Decode instruction info and find the field to access */
19656         vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
19657 -       field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
19658 +       field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
19660         /* Out-of-range fields always cause a VM exit from L2 to L1 */
19661         if (field >> 15)
19662 diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
19663 index 29b40e092d13..f68ed9a1abcc 100644
19664 --- a/arch/x86/kvm/vmx/vmx.c
19665 +++ b/arch/x86/kvm/vmx/vmx.c
19666 @@ -36,6 +36,7 @@
19667  #include <asm/debugreg.h>
19668  #include <asm/desc.h>
19669  #include <asm/fpu/internal.h>
19670 +#include <asm/idtentry.h>
19671  #include <asm/io.h>
19672  #include <asm/irq_remapping.h>
19673  #include <asm/kexec.h>
19674 @@ -156,9 +157,11 @@ static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = {
19675         MSR_IA32_SPEC_CTRL,
19676         MSR_IA32_PRED_CMD,
19677         MSR_IA32_TSC,
19678 +#ifdef CONFIG_X86_64
19679         MSR_FS_BASE,
19680         MSR_GS_BASE,
19681         MSR_KERNEL_GS_BASE,
19682 +#endif
19683         MSR_IA32_SYSENTER_CS,
19684         MSR_IA32_SYSENTER_ESP,
19685         MSR_IA32_SYSENTER_EIP,
19686 @@ -1731,7 +1734,8 @@ static void setup_msrs(struct vcpu_vmx *vmx)
19687         if (update_transition_efer(vmx))
19688                 vmx_setup_uret_msr(vmx, MSR_EFER);
19690 -       if (guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
19691 +       if (guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP)  ||
19692 +           guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDPID))
19693                 vmx_setup_uret_msr(vmx, MSR_TSC_AUX);
19695         vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL);
19696 @@ -1930,7 +1934,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
19697                 break;
19698         case MSR_TSC_AUX:
19699                 if (!msr_info->host_initiated &&
19700 -                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
19701 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
19702 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
19703                         return 1;
19704                 goto find_uret_msr;
19705         case MSR_IA32_DEBUGCTLMSR:
19706 @@ -2227,7 +2232,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
19707                 break;
19708         case MSR_TSC_AUX:
19709                 if (!msr_info->host_initiated &&
19710 -                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
19711 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
19712 +                   !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
19713                         return 1;
19714                 /* Check reserved bit, higher 32 bits should be zero */
19715                 if ((data >> 32) != 0)
19716 @@ -4299,7 +4305,23 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
19717                                                   xsaves_enabled, false);
19718         }
19720 -       vmx_adjust_sec_exec_feature(vmx, &exec_control, rdtscp, RDTSCP);
19721 +       /*
19722 +        * RDPID is also gated by ENABLE_RDTSCP, turn on the control if either
19723 +        * feature is exposed to the guest.  This creates a virtualization hole
19724 +        * if both are supported in hardware but only one is exposed to the
19725 +        * guest, but letting the guest execute RDTSCP or RDPID when either one
19726 +        * is advertised is preferable to emulating the advertised instruction
19727 +        * in KVM on #UD, and obviously better than incorrectly injecting #UD.
19728 +        */
19729 +       if (cpu_has_vmx_rdtscp()) {
19730 +               bool rdpid_or_rdtscp_enabled =
19731 +                       guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) ||
19732 +                       guest_cpuid_has(vcpu, X86_FEATURE_RDPID);
19734 +               vmx_adjust_secondary_exec_control(vmx, &exec_control,
19735 +                                                 SECONDARY_EXEC_ENABLE_RDTSCP,
19736 +                                                 rdpid_or_rdtscp_enabled, false);
19737 +       }
19738         vmx_adjust_sec_exec_feature(vmx, &exec_control, invpcid, INVPCID);
19740         vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdrand, RDRAND);
19741 @@ -5062,12 +5084,12 @@ static int handle_cr(struct kvm_vcpu *vcpu)
19742                 case 3:
19743                         WARN_ON_ONCE(enable_unrestricted_guest);
19744                         val = kvm_read_cr3(vcpu);
19745 -                       kvm_register_write(vcpu, reg, val);
19746 +                       kvm_register_writel(vcpu, reg, val);
19747                         trace_kvm_cr_read(cr, val);
19748                         return kvm_skip_emulated_instruction(vcpu);
19749                 case 8:
19750                         val = kvm_get_cr8(vcpu);
19751 -                       kvm_register_write(vcpu, reg, val);
19752 +                       kvm_register_writel(vcpu, reg, val);
19753                         trace_kvm_cr_read(cr, val);
19754                         return kvm_skip_emulated_instruction(vcpu);
19755                 }
19756 @@ -5140,7 +5162,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
19757                 unsigned long val;
19759                 kvm_get_dr(vcpu, dr, &val);
19760 -               kvm_register_write(vcpu, reg, val);
19761 +               kvm_register_writel(vcpu, reg, val);
19762                 err = 0;
19763         } else {
19764                 err = kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg));
19765 @@ -5792,7 +5814,6 @@ void dump_vmcs(void)
19766         u32 vmentry_ctl, vmexit_ctl;
19767         u32 cpu_based_exec_ctrl, pin_based_exec_ctrl, secondary_exec_control;
19768         unsigned long cr4;
19769 -       u64 efer;
19771         if (!dump_invalid_vmcs) {
19772                 pr_warn_ratelimited("set kvm_intel.dump_invalid_vmcs=1 to dump internal KVM state.\n");
19773 @@ -5804,7 +5825,6 @@ void dump_vmcs(void)
19774         cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
19775         pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
19776         cr4 = vmcs_readl(GUEST_CR4);
19777 -       efer = vmcs_read64(GUEST_IA32_EFER);
19778         secondary_exec_control = 0;
19779         if (cpu_has_secondary_exec_ctrls())
19780                 secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
19781 @@ -5816,9 +5836,7 @@ void dump_vmcs(void)
19782         pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
19783                cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK));
19784         pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3));
19785 -       if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) &&
19786 -           (cr4 & X86_CR4_PAE) && !(efer & EFER_LMA))
19787 -       {
19788 +       if (cpu_has_vmx_ept()) {
19789                 pr_err("PDPTR0 = 0x%016llx  PDPTR1 = 0x%016llx\n",
19790                        vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1));
19791                 pr_err("PDPTR2 = 0x%016llx  PDPTR3 = 0x%016llx\n",
19792 @@ -5844,7 +5862,8 @@ void dump_vmcs(void)
19793         if ((vmexit_ctl & (VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER)) ||
19794             (vmentry_ctl & (VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_IA32_EFER)))
19795                 pr_err("EFER =     0x%016llx  PAT = 0x%016llx\n",
19796 -                      efer, vmcs_read64(GUEST_IA32_PAT));
19797 +                      vmcs_read64(GUEST_IA32_EFER),
19798 +                      vmcs_read64(GUEST_IA32_PAT));
19799         pr_err("DebugCtl = 0x%016llx  DebugExceptions = 0x%016lx\n",
19800                vmcs_read64(GUEST_IA32_DEBUGCTL),
19801                vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS));
19802 @@ -6395,18 +6414,17 @@ static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
19804  void vmx_do_interrupt_nmi_irqoff(unsigned long entry);
19806 -static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu, u32 intr_info)
19807 +static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu,
19808 +                                       unsigned long entry)
19810 -       unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
19811 -       gate_desc *desc = (gate_desc *)host_idt_base + vector;
19813         kvm_before_interrupt(vcpu);
19814 -       vmx_do_interrupt_nmi_irqoff(gate_offset(desc));
19815 +       vmx_do_interrupt_nmi_irqoff(entry);
19816         kvm_after_interrupt(vcpu);
19819  static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
19821 +       const unsigned long nmi_entry = (unsigned long)asm_exc_nmi_noist;
19822         u32 intr_info = vmx_get_intr_info(&vmx->vcpu);
19824         /* if exit due to PF check for async PF */
19825 @@ -6417,18 +6435,20 @@ static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
19826                 kvm_machine_check();
19827         /* We need to handle NMIs before interrupts are enabled */
19828         else if (is_nmi(intr_info))
19829 -               handle_interrupt_nmi_irqoff(&vmx->vcpu, intr_info);
19830 +               handle_interrupt_nmi_irqoff(&vmx->vcpu, nmi_entry);
19833  static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
19835         u32 intr_info = vmx_get_intr_info(vcpu);
19836 +       unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
19837 +       gate_desc *desc = (gate_desc *)host_idt_base + vector;
19839         if (WARN_ONCE(!is_external_intr(intr_info),
19840             "KVM: unexpected VM-Exit interrupt info: 0x%x", intr_info))
19841                 return;
19843 -       handle_interrupt_nmi_irqoff(vcpu, intr_info);
19844 +       handle_interrupt_nmi_irqoff(vcpu, gate_offset(desc));
19847  static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
19848 @@ -6894,12 +6914,9 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
19850         for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i) {
19851                 u32 index = vmx_uret_msrs_list[i];
19852 -               u32 data_low, data_high;
19853                 int j = vmx->nr_uret_msrs;
19855 -               if (rdmsr_safe(index, &data_low, &data_high) < 0)
19856 -                       continue;
19857 -               if (wrmsr_safe(index, data_low, data_high) < 0)
19858 +               if (kvm_probe_user_return_msr(index))
19859                         continue;
19861                 vmx->guest_uret_msrs[j].slot = i;
19862 @@ -6938,9 +6955,11 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
19863         bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
19865         vmx_disable_intercept_for_msr(vcpu, MSR_IA32_TSC, MSR_TYPE_R);
19866 +#ifdef CONFIG_X86_64
19867         vmx_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW);
19868         vmx_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW);
19869         vmx_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
19870 +#endif
19871         vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
19872         vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
19873         vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
19874 @@ -7330,9 +7349,11 @@ static __init void vmx_set_cpu_caps(void)
19875         if (!cpu_has_vmx_xsaves())
19876                 kvm_cpu_cap_clear(X86_FEATURE_XSAVES);
19878 -       /* CPUID 0x80000001 */
19879 -       if (!cpu_has_vmx_rdtscp())
19880 +       /* CPUID 0x80000001 and 0x7 (RDPID) */
19881 +       if (!cpu_has_vmx_rdtscp()) {
19882                 kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
19883 +               kvm_cpu_cap_clear(X86_FEATURE_RDPID);
19884 +       }
19886         if (cpu_has_vmx_waitpkg())
19887                 kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG);
19888 @@ -7388,8 +7409,9 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
19889         /*
19890          * RDPID causes #UD if disabled through secondary execution controls.
19891          * Because it is marked as EmulateOnUD, we need to intercept it here.
19892 +        * Note, RDPID is hidden behind ENABLE_RDTSCP.
19893          */
19894 -       case x86_intercept_rdtscp:
19895 +       case x86_intercept_rdpid:
19896                 if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_RDTSCP)) {
19897                         exception->vector = UD_VECTOR;
19898                         exception->error_code_valid = false;
19899 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
19900 index ee0dc58ac3a5..87311d39f914 100644
19901 --- a/arch/x86/kvm/x86.c
19902 +++ b/arch/x86/kvm/x86.c
19903 @@ -335,6 +335,22 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
19904         }
19907 +int kvm_probe_user_return_msr(u32 msr)
19909 +       u64 val;
19910 +       int ret;
19912 +       preempt_disable();
19913 +       ret = rdmsrl_safe(msr, &val);
19914 +       if (ret)
19915 +               goto out;
19916 +       ret = wrmsrl_safe(msr, val);
19917 +out:
19918 +       preempt_enable();
19919 +       return ret;
19921 +EXPORT_SYMBOL_GPL(kvm_probe_user_return_msr);
19923  void kvm_define_user_return_msr(unsigned slot, u32 msr)
19925         BUG_ON(slot >= KVM_MAX_NR_USER_RETURN_MSRS);
19926 @@ -1072,10 +1088,15 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
19927                 return 0;
19928         }
19930 -       if (is_long_mode(vcpu) && kvm_vcpu_is_illegal_gpa(vcpu, cr3))
19931 +       /*
19932 +        * Do not condition the GPA check on long mode, this helper is used to
19933 +        * stuff CR3, e.g. for RSM emulation, and there is no guarantee that
19934 +        * the current vCPU mode is accurate.
19935 +        */
19936 +       if (kvm_vcpu_is_illegal_gpa(vcpu, cr3))
19937                 return 1;
19938 -       else if (is_pae_paging(vcpu) &&
19939 -                !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
19941 +       if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
19942                 return 1;
19944         kvm_mmu_new_pgd(vcpu, cr3, skip_tlb_flush, skip_tlb_flush);
19945 @@ -5859,7 +5880,8 @@ static void kvm_init_msr_list(void)
19946                                 continue;
19947                         break;
19948                 case MSR_TSC_AUX:
19949 -                       if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
19950 +                       if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP) &&
19951 +                           !kvm_cpu_cap_has(X86_FEATURE_RDPID))
19952                                 continue;
19953                         break;
19954                 case MSR_IA32_UMWAIT_CONTROL:
19955 @@ -7959,6 +7981,18 @@ static void pvclock_gtod_update_fn(struct work_struct *work)
19957  static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
19960 + * Indirection to move queue_work() out of the tk_core.seq write held
19961 + * region to prevent possible deadlocks against time accessors which
19962 + * are invoked with work related locks held.
19963 + */
19964 +static void pvclock_irq_work_fn(struct irq_work *w)
19966 +       queue_work(system_long_wq, &pvclock_gtod_work);
19969 +static DEFINE_IRQ_WORK(pvclock_irq_work, pvclock_irq_work_fn);
19971  /*
19972   * Notification about pvclock gtod data update.
19973   */
19974 @@ -7970,13 +8004,14 @@ static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
19976         update_pvclock_gtod(tk);
19978 -       /* disable master clock if host does not trust, or does not
19979 -        * use, TSC based clocksource.
19980 +       /*
19981 +        * Disable master clock if host does not trust, or does not use,
19982 +        * TSC based clocksource. Delegate queue_work() to irq_work as
19983 +        * this is invoked with tk_core.seq write held.
19984          */
19985         if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) &&
19986             atomic_read(&kvm_guest_has_master_clock) != 0)
19987 -               queue_work(system_long_wq, &pvclock_gtod_work);
19989 +               irq_work_queue(&pvclock_irq_work);
19990         return 0;
19993 @@ -8091,6 +8126,8 @@ void kvm_arch_exit(void)
19994         cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
19995  #ifdef CONFIG_X86_64
19996         pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
19997 +       irq_work_sync(&pvclock_irq_work);
19998 +       cancel_work_sync(&pvclock_gtod_work);
19999  #endif
20000         kvm_x86_ops.hardware_enable = NULL;
20001         kvm_mmu_module_exit();
20002 @@ -11020,6 +11057,9 @@ bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
20004  bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
20006 +       if (vcpu->arch.guest_state_protected)
20007 +               return true;
20009         return vcpu->arch.preempted_in_kernel;
20012 @@ -11290,7 +11330,7 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
20013         if (!kvm_pv_async_pf_enabled(vcpu))
20014                 return true;
20015         else
20016 -               return apf_pageready_slot_free(vcpu);
20017 +               return kvm_lapic_enabled(vcpu) && apf_pageready_slot_free(vcpu);
20020  void kvm_arch_start_assignment(struct kvm *kvm)
20021 @@ -11539,7 +11579,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
20023                 fallthrough;
20024         case INVPCID_TYPE_ALL_INCL_GLOBAL:
20025 -               kvm_mmu_unload(vcpu);
20026 +               kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
20027                 return kvm_skip_emulated_instruction(vcpu);
20029         default:
20030 diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
20031 index ae17250e1efe..7f27bb65a572 100644
20032 --- a/arch/x86/kvm/xen.c
20033 +++ b/arch/x86/kvm/xen.c
20034 @@ -673,7 +673,7 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
20035         bool longmode;
20036         u64 input, params[6];
20038 -       input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX);
20039 +       input = (u64)kvm_register_readl(vcpu, VCPU_REGS_RAX);
20041         /* Hyper-V hypercalls get bit 31 set in EAX */
20042         if ((input & 0x80000000) &&
20043 diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c
20044 index 75a0915b0d01..40bbe56bde32 100644
20045 --- a/arch/x86/lib/msr-smp.c
20046 +++ b/arch/x86/lib/msr-smp.c
20047 @@ -252,7 +252,7 @@ static void __wrmsr_safe_regs_on_cpu(void *info)
20048         rv->err = wrmsr_safe_regs(rv->regs);
20051 -int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
20052 +int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
20054         int err;
20055         struct msr_regs_info rv;
20056 @@ -265,7 +265,7 @@ int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
20058  EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
20060 -int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
20061 +int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
20063         int err;
20064         struct msr_regs_info rv;
20065 diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
20066 index 6c5eb6f3f14f..a19374d26101 100644
20067 --- a/arch/x86/mm/mem_encrypt_identity.c
20068 +++ b/arch/x86/mm/mem_encrypt_identity.c
20069 @@ -503,14 +503,10 @@ void __init sme_enable(struct boot_params *bp)
20071  #define AMD_SME_BIT    BIT(0)
20072  #define AMD_SEV_BIT    BIT(1)
20073 -       /*
20074 -        * Set the feature mask (SME or SEV) based on whether we are
20075 -        * running under a hypervisor.
20076 -        */
20077 -       eax = 1;
20078 -       ecx = 0;
20079 -       native_cpuid(&eax, &ebx, &ecx, &edx);
20080 -       feature_mask = (ecx & BIT(31)) ? AMD_SEV_BIT : AMD_SME_BIT;
20082 +       /* Check the SEV MSR whether SEV or SME is enabled */
20083 +       sev_status   = __rdmsr(MSR_AMD64_SEV);
20084 +       feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
20086         /*
20087          * Check for the SME/SEV feature:
20088 @@ -530,19 +526,26 @@ void __init sme_enable(struct boot_params *bp)
20090         /* Check if memory encryption is enabled */
20091         if (feature_mask == AMD_SME_BIT) {
20092 +               /*
20093 +                * No SME if Hypervisor bit is set. This check is here to
20094 +                * prevent a guest from trying to enable SME. For running as a
20095 +                * KVM guest the MSR_K8_SYSCFG will be sufficient, but there
20096 +                * might be other hypervisors which emulate that MSR as non-zero
20097 +                * or even pass it through to the guest.
20098 +                * A malicious hypervisor can still trick a guest into this
20099 +                * path, but there is no way to protect against that.
20100 +                */
20101 +               eax = 1;
20102 +               ecx = 0;
20103 +               native_cpuid(&eax, &ebx, &ecx, &edx);
20104 +               if (ecx & BIT(31))
20105 +                       return;
20107                 /* For SME, check the SYSCFG MSR */
20108                 msr = __rdmsr(MSR_K8_SYSCFG);
20109                 if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
20110                         return;
20111         } else {
20112 -               /* For SEV, check the SEV MSR */
20113 -               msr = __rdmsr(MSR_AMD64_SEV);
20114 -               if (!(msr & MSR_AMD64_SEV_ENABLED))
20115 -                       return;
20117 -               /* Save SEV_STATUS to avoid reading MSR again */
20118 -               sev_status = msr;
20120                 /* SEV state cannot be controlled by a command line option */
20121                 sme_me_mask = me_mask;
20122                 sev_enabled = true;
20123 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
20124 index f6a9e2e36642..1c27e6f43f80 100644
20125 --- a/arch/x86/mm/pgtable.c
20126 +++ b/arch/x86/mm/pgtable.c
20127 @@ -550,7 +550,7 @@ int ptep_test_and_clear_young(struct vm_area_struct *vma,
20128         return ret;
20131 -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
20132 +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG)
20133  int pmdp_test_and_clear_young(struct vm_area_struct *vma,
20134                               unsigned long addr, pmd_t *pmdp)
20136 @@ -562,6 +562,9 @@ int pmdp_test_and_clear_young(struct vm_area_struct *vma,
20138         return ret;
20140 +#endif
20142 +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
20143  int pudp_test_and_clear_young(struct vm_area_struct *vma,
20144                               unsigned long addr, pud_t *pudp)
20146 diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c
20147 index cd3914fc9f3d..e94e0050a583 100644
20148 --- a/arch/x86/power/hibernate.c
20149 +++ b/arch/x86/power/hibernate.c
20150 @@ -13,8 +13,8 @@
20151  #include <linux/kdebug.h>
20152  #include <linux/cpu.h>
20153  #include <linux/pgtable.h>
20155 -#include <crypto/hash.h>
20156 +#include <linux/types.h>
20157 +#include <linux/crc32.h>
20159  #include <asm/e820/api.h>
20160  #include <asm/init.h>
20161 @@ -54,95 +54,33 @@ int pfn_is_nosave(unsigned long pfn)
20162         return pfn >= nosave_begin_pfn && pfn < nosave_end_pfn;
20166 -#define MD5_DIGEST_SIZE 16
20168  struct restore_data_record {
20169         unsigned long jump_address;
20170         unsigned long jump_address_phys;
20171         unsigned long cr3;
20172         unsigned long magic;
20173 -       u8 e820_digest[MD5_DIGEST_SIZE];
20174 +       unsigned long e820_checksum;
20175  };
20177 -#if IS_BUILTIN(CONFIG_CRYPTO_MD5)
20178  /**
20179 - * get_e820_md5 - calculate md5 according to given e820 table
20180 + * compute_e820_crc32 - calculate crc32 of a given e820 table
20181   *
20182   * @table: the e820 table to be calculated
20183 - * @buf: the md5 result to be stored to
20184 + *
20185 + * Return: the resulting checksum
20186   */
20187 -static int get_e820_md5(struct e820_table *table, void *buf)
20188 +static inline u32 compute_e820_crc32(struct e820_table *table)
20190 -       struct crypto_shash *tfm;
20191 -       struct shash_desc *desc;
20192 -       int size;
20193 -       int ret = 0;
20195 -       tfm = crypto_alloc_shash("md5", 0, 0);
20196 -       if (IS_ERR(tfm))
20197 -               return -ENOMEM;
20199 -       desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm),
20200 -                      GFP_KERNEL);
20201 -       if (!desc) {
20202 -               ret = -ENOMEM;
20203 -               goto free_tfm;
20204 -       }
20206 -       desc->tfm = tfm;
20208 -       size = offsetof(struct e820_table, entries) +
20209 +       int size = offsetof(struct e820_table, entries) +
20210                 sizeof(struct e820_entry) * table->nr_entries;
20212 -       if (crypto_shash_digest(desc, (u8 *)table, size, buf))
20213 -               ret = -EINVAL;
20215 -       kfree_sensitive(desc);
20217 -free_tfm:
20218 -       crypto_free_shash(tfm);
20219 -       return ret;
20222 -static int hibernation_e820_save(void *buf)
20224 -       return get_e820_md5(e820_table_firmware, buf);
20227 -static bool hibernation_e820_mismatch(void *buf)
20229 -       int ret;
20230 -       u8 result[MD5_DIGEST_SIZE];
20232 -       memset(result, 0, MD5_DIGEST_SIZE);
20233 -       /* If there is no digest in suspend kernel, let it go. */
20234 -       if (!memcmp(result, buf, MD5_DIGEST_SIZE))
20235 -               return false;
20237 -       ret = get_e820_md5(e820_table_firmware, result);
20238 -       if (ret)
20239 -               return true;
20241 -       return memcmp(result, buf, MD5_DIGEST_SIZE) ? true : false;
20243 -#else
20244 -static int hibernation_e820_save(void *buf)
20246 -       return 0;
20249 -static bool hibernation_e820_mismatch(void *buf)
20251 -       /* If md5 is not builtin for restore kernel, let it go. */
20252 -       return false;
20253 +       return ~crc32_le(~0, (unsigned char const *)table, size);
20255 -#endif
20257  #ifdef CONFIG_X86_64
20258 -#define RESTORE_MAGIC  0x23456789ABCDEF01UL
20259 +#define RESTORE_MAGIC  0x23456789ABCDEF02UL
20260  #else
20261 -#define RESTORE_MAGIC  0x12345678UL
20262 +#define RESTORE_MAGIC  0x12345679UL
20263  #endif
20265  /**
20266 @@ -179,7 +117,8 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
20267          */
20268         rdr->cr3 = restore_cr3 & ~CR3_PCID_MASK;
20270 -       return hibernation_e820_save(rdr->e820_digest);
20271 +       rdr->e820_checksum = compute_e820_crc32(e820_table_firmware);
20272 +       return 0;
20275  /**
20276 @@ -200,7 +139,7 @@ int arch_hibernation_header_restore(void *addr)
20277         jump_address_phys = rdr->jump_address_phys;
20278         restore_cr3 = rdr->cr3;
20280 -       if (hibernation_e820_mismatch(rdr->e820_digest)) {
20281 +       if (rdr->e820_checksum != compute_e820_crc32(e820_table_firmware)) {
20282                 pr_crit("Hibernate inconsistent memory map detected!\n");
20283                 return -ENODEV;
20284         }
20285 diff --git a/block/Kconfig b/block/Kconfig
20286 index a2297edfdde8..f688ea5f0dbd 100644
20287 --- a/block/Kconfig
20288 +++ b/block/Kconfig
20289 @@ -83,7 +83,7 @@ config BLK_DEV_INTEGRITY_T10
20291  config BLK_DEV_ZONED
20292         bool "Zoned block device support"
20293 -       select MQ_IOSCHED_DEADLINE
20294 +       select IOSCHED_BFQ
20295         help
20296         Block layer zoned block device support. This option enables
20297         support for ZAC/ZBC/ZNS host-managed and host-aware zoned block
20298 diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
20299 index 2f2158e05a91..e58b2953ac16 100644
20300 --- a/block/Kconfig.iosched
20301 +++ b/block/Kconfig.iosched
20302 @@ -5,13 +5,11 @@ menu "IO Schedulers"
20304  config MQ_IOSCHED_DEADLINE
20305         tristate "MQ deadline I/O scheduler"
20306 -       default y
20307         help
20308           MQ version of the deadline IO scheduler.
20310  config MQ_IOSCHED_KYBER
20311         tristate "Kyber I/O scheduler"
20312 -       default y
20313         help
20314           The Kyber I/O scheduler is a low-overhead scheduler suitable for
20315           multiqueue and other fast devices. Given target latencies for reads and
20316 @@ -20,6 +18,7 @@ config MQ_IOSCHED_KYBER
20318  config IOSCHED_BFQ
20319         tristate "BFQ I/O scheduler"
20320 +       default y
20321         help
20322         BFQ I/O scheduler for BLK-MQ. BFQ distributes the bandwidth of
20323         of the device among all processes according to their weights,
20324 diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
20325 index 95586137194e..bc319931d2b3 100644
20326 --- a/block/bfq-iosched.c
20327 +++ b/block/bfq-iosched.c
20328 @@ -1012,7 +1012,7 @@ static void
20329  bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
20330                       struct bfq_io_cq *bic, bool bfq_already_existing)
20332 -       unsigned int old_wr_coeff = bfqq->wr_coeff;
20333 +       unsigned int old_wr_coeff = 1;
20334         bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq);
20336         if (bic->saved_has_short_ttime)
20337 @@ -1033,7 +1033,13 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
20338         bfqq->ttime = bic->saved_ttime;
20339         bfqq->io_start_time = bic->saved_io_start_time;
20340         bfqq->tot_idle_time = bic->saved_tot_idle_time;
20341 -       bfqq->wr_coeff = bic->saved_wr_coeff;
20342 +       /*
20343 +        * Restore weight coefficient only if low_latency is on
20344 +        */
20345 +       if (bfqd->low_latency) {
20346 +               old_wr_coeff = bfqq->wr_coeff;
20347 +               bfqq->wr_coeff = bic->saved_wr_coeff;
20348 +       }
20349         bfqq->service_from_wr = bic->saved_service_from_wr;
20350         bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
20351         bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish;
20352 @@ -2257,10 +2263,9 @@ static void bfq_remove_request(struct request_queue *q,
20356 -static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
20357 +static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
20358                 unsigned int nr_segs)
20360 -       struct request_queue *q = hctx->queue;
20361         struct bfq_data *bfqd = q->elevator->elevator_data;
20362         struct request *free = NULL;
20363         /*
20364 diff --git a/block/blk-iocost.c b/block/blk-iocost.c
20365 index 98d656bdb42b..4fbc875f7cb2 100644
20366 --- a/block/blk-iocost.c
20367 +++ b/block/blk-iocost.c
20368 @@ -1073,7 +1073,17 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
20370         lockdep_assert_held(&ioc->lock);
20372 -       inuse = clamp_t(u32, inuse, 1, active);
20373 +       /*
20374 +        * For an active leaf node, its inuse shouldn't be zero or exceed
20375 +        * @active. An active internal node's inuse is solely determined by the
20376 +        * inuse to active ratio of its children regardless of @inuse.
20377 +        */
20378 +       if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
20379 +               inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
20380 +                                          iocg->child_active_sum);
20381 +       } else {
20382 +               inuse = clamp_t(u32, inuse, 1, active);
20383 +       }
20385         iocg->last_inuse = iocg->inuse;
20386         if (save)
20387 @@ -1090,7 +1100,7 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
20388                 /* update the level sums */
20389                 parent->child_active_sum += (s32)(active - child->active);
20390                 parent->child_inuse_sum += (s32)(inuse - child->inuse);
20391 -               /* apply the udpates */
20392 +               /* apply the updates */
20393                 child->active = active;
20394                 child->inuse = inuse;
20396 diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
20397 index e1e997af89a0..fdeb9773b55c 100644
20398 --- a/block/blk-mq-sched.c
20399 +++ b/block/blk-mq-sched.c
20400 @@ -348,14 +348,16 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
20401                 unsigned int nr_segs)
20403         struct elevator_queue *e = q->elevator;
20404 -       struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
20405 -       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
20406 +       struct blk_mq_ctx *ctx;
20407 +       struct blk_mq_hw_ctx *hctx;
20408         bool ret = false;
20409         enum hctx_type type;
20411         if (e && e->type->ops.bio_merge)
20412 -               return e->type->ops.bio_merge(hctx, bio, nr_segs);
20413 +               return e->type->ops.bio_merge(q, bio, nr_segs);
20415 +       ctx = blk_mq_get_ctx(q);
20416 +       hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
20417         type = hctx->type;
20418         if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
20419             list_empty_careful(&ctx->rq_lists[type]))
20420 diff --git a/block/blk-mq.c b/block/blk-mq.c
20421 index d4d7c1caa439..0e120547ccb7 100644
20422 --- a/block/blk-mq.c
20423 +++ b/block/blk-mq.c
20424 @@ -2216,8 +2216,9 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
20425                 /* Bypass scheduler for flush requests */
20426                 blk_insert_flush(rq);
20427                 blk_mq_run_hw_queue(data.hctx, true);
20428 -       } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs ||
20429 -                               !blk_queue_nonrot(q))) {
20430 +       } else if (plug && (q->nr_hw_queues == 1 ||
20431 +                  blk_mq_is_sbitmap_shared(rq->mq_hctx->flags) ||
20432 +                  q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
20433                 /*
20434                  * Use plugging if we have a ->commit_rqs() hook as well, as
20435                  * we know the driver uses bd->last in a smart fashion.
20436 @@ -3269,10 +3270,12 @@ EXPORT_SYMBOL(blk_mq_init_allocated_queue);
20437  /* tags can _not_ be used after returning from blk_mq_exit_queue */
20438  void blk_mq_exit_queue(struct request_queue *q)
20440 -       struct blk_mq_tag_set   *set = q->tag_set;
20441 +       struct blk_mq_tag_set *set = q->tag_set;
20443 -       blk_mq_del_queue_tag_set(q);
20444 +       /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
20445         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
20446 +       /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
20447 +       blk_mq_del_queue_tag_set(q);
20450  static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
20451 diff --git a/block/elevator.c b/block/elevator.c
20452 index 293c5c81397a..71111fa80628 100644
20453 --- a/block/elevator.c
20454 +++ b/block/elevator.c
20455 @@ -616,15 +616,15 @@ static inline bool elv_support_iosched(struct request_queue *q)
20458  /*
20459 - * For single queue devices, default to using mq-deadline. If we have multiple
20460 - * queues or mq-deadline is not available, default to "none".
20461 + * For single queue devices, default to using bfq. If we have multiple
20462 + * queues or bfq is not available, default to "none".
20463   */
20464  static struct elevator_type *elevator_get_default(struct request_queue *q)
20466         if (q->nr_hw_queues != 1)
20467                 return NULL;
20469 -       return elevator_get(q, "mq-deadline", false);
20470 +       return elevator_get(q, "bfq", false);
20473  /*
20474 diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
20475 index 33d34d69cade..79b69d7046d6 100644
20476 --- a/block/kyber-iosched.c
20477 +++ b/block/kyber-iosched.c
20478 @@ -560,11 +560,12 @@ static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
20479         }
20482 -static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
20483 +static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
20484                 unsigned int nr_segs)
20486 +       struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
20487 +       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
20488         struct kyber_hctx_data *khd = hctx->sched_data;
20489 -       struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue);
20490         struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
20491         unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
20492         struct list_head *rq_list = &kcq->rq_list[sched_domain];
20493 diff --git a/block/mq-deadline.c b/block/mq-deadline.c
20494 index f3631a287466..3aabcd2a7893 100644
20495 --- a/block/mq-deadline.c
20496 +++ b/block/mq-deadline.c
20497 @@ -461,10 +461,9 @@ static int dd_request_merge(struct request_queue *q, struct request **rq,
20498         return ELEVATOR_NO_MERGE;
20501 -static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
20502 +static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
20503                 unsigned int nr_segs)
20505 -       struct request_queue *q = hctx->queue;
20506         struct deadline_data *dd = q->elevator->elevator_data;
20507         struct request *free = NULL;
20508         bool ret;
20509 diff --git a/crypto/api.c b/crypto/api.c
20510 index ed08cbd5b9d3..c4eda56cff89 100644
20511 --- a/crypto/api.c
20512 +++ b/crypto/api.c
20513 @@ -562,7 +562,7 @@ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
20515         struct crypto_alg *alg;
20517 -       if (unlikely(!mem))
20518 +       if (IS_ERR_OR_NULL(mem))
20519                 return;
20521         alg = tfm->__crt_alg;
20522 diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
20523 index a057ecb1288d..6cd7f7025df4 100644
20524 --- a/crypto/async_tx/async_xor.c
20525 +++ b/crypto/async_tx/async_xor.c
20526 @@ -233,6 +233,7 @@ async_xor_offs(struct page *dest, unsigned int offset,
20527                 if (submit->flags & ASYNC_TX_XOR_DROP_DST) {
20528                         src_cnt--;
20529                         src_list++;
20530 +                       src_offs++;
20531                 }
20533                 /* wait for any prerequisite operations */
20534 diff --git a/crypto/rng.c b/crypto/rng.c
20535 index a888d84b524a..fea082b25fe4 100644
20536 --- a/crypto/rng.c
20537 +++ b/crypto/rng.c
20538 @@ -34,22 +34,18 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
20539         u8 *buf = NULL;
20540         int err;
20542 -       crypto_stats_get(alg);
20543         if (!seed && slen) {
20544                 buf = kmalloc(slen, GFP_KERNEL);
20545 -               if (!buf) {
20546 -                       crypto_alg_put(alg);
20547 +               if (!buf)
20548                         return -ENOMEM;
20549 -               }
20551                 err = get_random_bytes_wait(buf, slen);
20552 -               if (err) {
20553 -                       crypto_alg_put(alg);
20554 +               if (err)
20555                         goto out;
20556 -               }
20557                 seed = buf;
20558         }
20560 +       crypto_stats_get(alg);
20561         err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
20562         crypto_stats_rng_seed(alg, err);
20563  out:
20564 diff --git a/crypto/zstd.c b/crypto/zstd.c
20565 index 1a3309f066f7..154a969c83a8 100644
20566 --- a/crypto/zstd.c
20567 +++ b/crypto/zstd.c
20568 @@ -18,22 +18,22 @@
20569  #define ZSTD_DEF_LEVEL 3
20571  struct zstd_ctx {
20572 -       ZSTD_CCtx *cctx;
20573 -       ZSTD_DCtx *dctx;
20574 +       zstd_cctx *cctx;
20575 +       zstd_dctx *dctx;
20576         void *cwksp;
20577         void *dwksp;
20578  };
20580 -static ZSTD_parameters zstd_params(void)
20581 +static zstd_parameters zstd_params(void)
20583 -       return ZSTD_getParams(ZSTD_DEF_LEVEL, 0, 0);
20584 +       return zstd_get_params(ZSTD_DEF_LEVEL, 0);
20587  static int zstd_comp_init(struct zstd_ctx *ctx)
20589         int ret = 0;
20590 -       const ZSTD_parameters params = zstd_params();
20591 -       const size_t wksp_size = ZSTD_CCtxWorkspaceBound(params.cParams);
20592 +       const zstd_parameters params = zstd_params();
20593 +       const size_t wksp_size = zstd_cctx_workspace_bound(&params.cParams);
20595         ctx->cwksp = vzalloc(wksp_size);
20596         if (!ctx->cwksp) {
20597 @@ -41,7 +41,7 @@ static int zstd_comp_init(struct zstd_ctx *ctx)
20598                 goto out;
20599         }
20601 -       ctx->cctx = ZSTD_initCCtx(ctx->cwksp, wksp_size);
20602 +       ctx->cctx = zstd_init_cctx(ctx->cwksp, wksp_size);
20603         if (!ctx->cctx) {
20604                 ret = -EINVAL;
20605                 goto out_free;
20606 @@ -56,7 +56,7 @@ static int zstd_comp_init(struct zstd_ctx *ctx)
20607  static int zstd_decomp_init(struct zstd_ctx *ctx)
20609         int ret = 0;
20610 -       const size_t wksp_size = ZSTD_DCtxWorkspaceBound();
20611 +       const size_t wksp_size = zstd_dctx_workspace_bound();
20613         ctx->dwksp = vzalloc(wksp_size);
20614         if (!ctx->dwksp) {
20615 @@ -64,7 +64,7 @@ static int zstd_decomp_init(struct zstd_ctx *ctx)
20616                 goto out;
20617         }
20619 -       ctx->dctx = ZSTD_initDCtx(ctx->dwksp, wksp_size);
20620 +       ctx->dctx = zstd_init_dctx(ctx->dwksp, wksp_size);
20621         if (!ctx->dctx) {
20622                 ret = -EINVAL;
20623                 goto out_free;
20624 @@ -152,10 +152,10 @@ static int __zstd_compress(const u8 *src, unsigned int slen,
20626         size_t out_len;
20627         struct zstd_ctx *zctx = ctx;
20628 -       const ZSTD_parameters params = zstd_params();
20629 +       const zstd_parameters params = zstd_params();
20631 -       out_len = ZSTD_compressCCtx(zctx->cctx, dst, *dlen, src, slen, params);
20632 -       if (ZSTD_isError(out_len))
20633 +       out_len = zstd_compress_cctx(zctx->cctx, dst, *dlen, src, slen, &params);
20634 +       if (zstd_is_error(out_len))
20635                 return -EINVAL;
20636         *dlen = out_len;
20637         return 0;
20638 @@ -182,8 +182,8 @@ static int __zstd_decompress(const u8 *src, unsigned int slen,
20639         size_t out_len;
20640         struct zstd_ctx *zctx = ctx;
20642 -       out_len = ZSTD_decompressDCtx(zctx->dctx, dst, *dlen, src, slen);
20643 -       if (ZSTD_isError(out_len))
20644 +       out_len = zstd_decompress_dctx(zctx->dctx, dst, *dlen, src, slen);
20645 +       if (zstd_is_error(out_len))
20646                 return -EINVAL;
20647         *dlen = out_len;
20648         return 0;
20649 diff --git a/drivers/accessibility/speakup/speakup_acntpc.c b/drivers/accessibility/speakup/speakup_acntpc.c
20650 index c1ec087dca13..b2d0d4266f62 100644
20651 --- a/drivers/accessibility/speakup/speakup_acntpc.c
20652 +++ b/drivers/accessibility/speakup/speakup_acntpc.c
20653 @@ -198,7 +198,7 @@ static void do_catch_up(struct spk_synth *synth)
20654                 full_time_val = full_time->u.n.value;
20655                 spin_unlock_irqrestore(&speakup_info.spinlock, flags);
20656                 if (synth_full()) {
20657 -                       schedule_timeout(msecs_to_jiffies(full_time_val));
20658 +                       schedule_msec_hrtimeout((full_time_val));
20659                         continue;
20660                 }
20661                 set_current_state(TASK_RUNNING);
20662 @@ -226,7 +226,7 @@ static void do_catch_up(struct spk_synth *synth)
20663                         jiffy_delta_val = jiffy_delta->u.n.value;
20664                         delay_time_val = delay_time->u.n.value;
20665                         spin_unlock_irqrestore(&speakup_info.spinlock, flags);
20666 -                       schedule_timeout(msecs_to_jiffies(delay_time_val));
20667 +                       schedule_msec_hrtimeout(delay_time_val);
20668                         jiff_max = jiffies + jiffy_delta_val;
20669                 }
20670         }
20671 diff --git a/drivers/accessibility/speakup/speakup_apollo.c b/drivers/accessibility/speakup/speakup_apollo.c
20672 index cd63581b2e99..d636157a2844 100644
20673 --- a/drivers/accessibility/speakup/speakup_apollo.c
20674 +++ b/drivers/accessibility/speakup/speakup_apollo.c
20675 @@ -165,7 +165,7 @@ static void do_catch_up(struct spk_synth *synth)
20676                 if (!synth->io_ops->synth_out(synth, ch)) {
20677                         synth->io_ops->tiocmset(synth, 0, UART_MCR_RTS);
20678                         synth->io_ops->tiocmset(synth, UART_MCR_RTS, 0);
20679 -                       schedule_timeout(msecs_to_jiffies(full_time_val));
20680 +                       schedule_msec_hrtimeout(full_time_val);
20681                         continue;
20682                 }
20683                 if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
20684 diff --git a/drivers/accessibility/speakup/speakup_decext.c b/drivers/accessibility/speakup/speakup_decext.c
20685 index 092cfd08a9e1..e7fc85f8ce5c 100644
20686 --- a/drivers/accessibility/speakup/speakup_decext.c
20687 +++ b/drivers/accessibility/speakup/speakup_decext.c
20688 @@ -180,7 +180,7 @@ static void do_catch_up(struct spk_synth *synth)
20689                 if (ch == '\n')
20690                         ch = 0x0D;
20691                 if (synth_full() || !synth->io_ops->synth_out(synth, ch)) {
20692 -                       schedule_timeout(msecs_to_jiffies(delay_time_val));
20693 +                       schedule_msec_hrtimeout(delay_time_val);
20694                         continue;
20695                 }
20696                 set_current_state(TASK_RUNNING);
20697 diff --git a/drivers/accessibility/speakup/speakup_decpc.c b/drivers/accessibility/speakup/speakup_decpc.c
20698 index dec314dee214..2a5deb5256b2 100644
20699 --- a/drivers/accessibility/speakup/speakup_decpc.c
20700 +++ b/drivers/accessibility/speakup/speakup_decpc.c
20701 @@ -398,7 +398,7 @@ static void do_catch_up(struct spk_synth *synth)
20702                 if (ch == '\n')
20703                         ch = 0x0D;
20704                 if (dt_sendchar(ch)) {
20705 -                       schedule_timeout(msecs_to_jiffies(delay_time_val));
20706 +                       schedule_msec_hrtimeout((delay_time_val));
20707                         continue;
20708                 }
20709                 set_current_state(TASK_RUNNING);
20710 diff --git a/drivers/accessibility/speakup/speakup_dectlk.c b/drivers/accessibility/speakup/speakup_dectlk.c
20711 index 580ec796816b..67c156b90ddb 100644
20712 --- a/drivers/accessibility/speakup/speakup_dectlk.c
20713 +++ b/drivers/accessibility/speakup/speakup_dectlk.c
20714 @@ -256,7 +256,7 @@ static void do_catch_up(struct spk_synth *synth)
20715                 if (ch == '\n')
20716                         ch = 0x0D;
20717                 if (synth_full_val || !synth->io_ops->synth_out(synth, ch)) {
20718 -                       schedule_timeout(msecs_to_jiffies(delay_time_val));
20719 +                       schedule_msec_hrtimeout(delay_time_val);
20720                         continue;
20721                 }
20722                 set_current_state(TASK_RUNNING);
20723 diff --git a/drivers/accessibility/speakup/speakup_dtlk.c b/drivers/accessibility/speakup/speakup_dtlk.c
20724 index 92838d3ae9eb..b687cb4d3268 100644
20725 --- a/drivers/accessibility/speakup/speakup_dtlk.c
20726 +++ b/drivers/accessibility/speakup/speakup_dtlk.c
20727 @@ -211,7 +211,7 @@ static void do_catch_up(struct spk_synth *synth)
20728                 delay_time_val = delay_time->u.n.value;
20729                 spin_unlock_irqrestore(&speakup_info.spinlock, flags);
20730                 if (synth_full()) {
20731 -                       schedule_timeout(msecs_to_jiffies(delay_time_val));
20732 +                       schedule_msec_hrtimeout((delay_time_val));
20733                         continue;
20734                 }
20735                 set_current_state(TASK_RUNNING);
20736 @@ -227,7 +227,7 @@ static void do_catch_up(struct spk_synth *synth)
20737                         delay_time_val = delay_time->u.n.value;
20738                         jiffy_delta_val = jiffy_delta->u.n.value;
20739                         spin_unlock_irqrestore(&speakup_info.spinlock, flags);
20740 -                       schedule_timeout(msecs_to_jiffies(delay_time_val));
20741 +                       schedule_msec_hrtimeout((delay_time_val));
20742                         jiff_max = jiffies + jiffy_delta_val;
20743                 }
20744         }
20745 diff --git a/drivers/accessibility/speakup/speakup_keypc.c b/drivers/accessibility/speakup/speakup_keypc.c
20746 index 311f4aa0be22..99c523fdcc98 100644
20747 --- a/drivers/accessibility/speakup/speakup_keypc.c
20748 +++ b/drivers/accessibility/speakup/speakup_keypc.c
20749 @@ -199,7 +199,7 @@ static void do_catch_up(struct spk_synth *synth)
20750                 full_time_val = full_time->u.n.value;
20751                 spin_unlock_irqrestore(&speakup_info.spinlock, flags);
20752                 if (synth_full()) {
20753 -                       schedule_timeout(msecs_to_jiffies(full_time_val));
20754 +                       schedule_msec_hrtimeout((full_time_val));
20755                         continue;
20756                 }
20757                 set_current_state(TASK_RUNNING);
20758 @@ -232,7 +232,7 @@ static void do_catch_up(struct spk_synth *synth)
20759                         jiffy_delta_val = jiffy_delta->u.n.value;
20760                         delay_time_val = delay_time->u.n.value;
20761                         spin_unlock_irqrestore(&speakup_info.spinlock, flags);
20762 -                       schedule_timeout(msecs_to_jiffies(delay_time_val));
20763 +                       schedule_msec_hrtimeout(delay_time_val);
20764                         jiff_max = jiffies + jiffy_delta_val;
20765                 }
20766         }
20767 diff --git a/drivers/accessibility/speakup/synth.c b/drivers/accessibility/speakup/synth.c
20768 index 2b8699673bac..bf0cbdaf564f 100644
20769 --- a/drivers/accessibility/speakup/synth.c
20770 +++ b/drivers/accessibility/speakup/synth.c
20771 @@ -93,12 +93,8 @@ static void _spk_do_catch_up(struct spk_synth *synth, int unicode)
20772                 spin_unlock_irqrestore(&speakup_info.spinlock, flags);
20773                 if (ch == '\n')
20774                         ch = synth->procspeech;
20775 -               if (unicode)
20776 -                       ret = synth->io_ops->synth_out_unicode(synth, ch);
20777 -               else
20778 -                       ret = synth->io_ops->synth_out(synth, ch);
20779 -               if (!ret) {
20780 -                       schedule_timeout(msecs_to_jiffies(full_time_val));
20781 +               if (!synth->io_ops->synth_out(synth, ch)) {
20782 +                       schedule_msec_hrtimeout(full_time_val);
20783                         continue;
20784                 }
20785                 if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
20786 @@ -108,11 +104,9 @@ static void _spk_do_catch_up(struct spk_synth *synth, int unicode)
20787                         full_time_val = full_time->u.n.value;
20788                         spin_unlock_irqrestore(&speakup_info.spinlock, flags);
20789                         if (synth->io_ops->synth_out(synth, synth->procspeech))
20790 -                               schedule_timeout(
20791 -                                       msecs_to_jiffies(delay_time_val));
20792 +                               schedule_msec_hrtimeout(delay_time_val);
20793                         else
20794 -                               schedule_timeout(
20795 -                                       msecs_to_jiffies(full_time_val));
20796 +                               schedule_msec_hrtimeout(full_time_val);
20797                         jiff_max = jiffies + jiffy_delta_val;
20798                 }
20799                 set_current_state(TASK_RUNNING);
20800 diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c
20801 index f2d0e5915dab..0a0a982f9c28 100644
20802 --- a/drivers/acpi/arm64/gtdt.c
20803 +++ b/drivers/acpi/arm64/gtdt.c
20804 @@ -329,7 +329,7 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
20805                                         int index)
20807         struct platform_device *pdev;
20808 -       int irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags);
20809 +       int irq;
20811         /*
20812          * According to SBSA specification the size of refresh and control
20813 @@ -338,7 +338,7 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
20814         struct resource res[] = {
20815                 DEFINE_RES_MEM(wd->control_frame_address, SZ_4K),
20816                 DEFINE_RES_MEM(wd->refresh_frame_address, SZ_4K),
20817 -               DEFINE_RES_IRQ(irq),
20818 +               {},
20819         };
20820         int nr_res = ARRAY_SIZE(res);
20822 @@ -348,10 +348,11 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
20824         if (!(wd->refresh_frame_address && wd->control_frame_address)) {
20825                 pr_err(FW_BUG "failed to get the Watchdog base address.\n");
20826 -               acpi_unregister_gsi(wd->timer_interrupt);
20827                 return -EINVAL;
20828         }
20830 +       irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags);
20831 +       res[2] = (struct resource)DEFINE_RES_IRQ(irq);
20832         if (irq <= 0) {
20833                 pr_warn("failed to map the Watchdog interrupt.\n");
20834                 nr_res--;
20835 @@ -364,7 +365,8 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
20836          */
20837         pdev = platform_device_register_simple("sbsa-gwdt", index, res, nr_res);
20838         if (IS_ERR(pdev)) {
20839 -               acpi_unregister_gsi(wd->timer_interrupt);
20840 +               if (irq > 0)
20841 +                       acpi_unregister_gsi(wd->timer_interrupt);
20842                 return PTR_ERR(pdev);
20843         }
20845 diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
20846 index 69057fcd2c04..a5e6fd0bafa1 100644
20847 --- a/drivers/acpi/cppc_acpi.c
20848 +++ b/drivers/acpi/cppc_acpi.c
20849 @@ -119,23 +119,15 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
20850   */
20851  #define NUM_RETRIES 500ULL
20853 -struct cppc_attr {
20854 -       struct attribute attr;
20855 -       ssize_t (*show)(struct kobject *kobj,
20856 -                       struct attribute *attr, char *buf);
20857 -       ssize_t (*store)(struct kobject *kobj,
20858 -                       struct attribute *attr, const char *c, ssize_t count);
20861  #define define_one_cppc_ro(_name)              \
20862 -static struct cppc_attr _name =                        \
20863 +static struct kobj_attribute _name =           \
20864  __ATTR(_name, 0444, show_##_name, NULL)
20866  #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
20868  #define show_cppc_data(access_fn, struct_name, member_name)            \
20869         static ssize_t show_##member_name(struct kobject *kobj,         \
20870 -                                       struct attribute *attr, char *buf) \
20871 +                               struct kobj_attribute *attr, char *buf) \
20872         {                                                               \
20873                 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);           \
20874                 struct struct_name st_name = {0};                       \
20875 @@ -161,7 +153,7 @@ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
20876  show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
20878  static ssize_t show_feedback_ctrs(struct kobject *kobj,
20879 -               struct attribute *attr, char *buf)
20880 +               struct kobj_attribute *attr, char *buf)
20882         struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
20883         struct cppc_perf_fb_ctrs fb_ctrs = {0};
20884 diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
20885 index 7b54dc95d36b..4058e0241091 100644
20886 --- a/drivers/acpi/custom_method.c
20887 +++ b/drivers/acpi/custom_method.c
20888 @@ -42,6 +42,8 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
20889                                    sizeof(struct acpi_table_header)))
20890                         return -EFAULT;
20891                 uncopied_bytes = max_size = table.length;
20892 +               /* make sure the buf is not allocated */
20893 +               kfree(buf);
20894                 buf = kzalloc(max_size, GFP_KERNEL);
20895                 if (!buf)
20896                         return -ENOMEM;
20897 @@ -55,6 +57,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
20898             (*ppos + count < count) ||
20899             (count > uncopied_bytes)) {
20900                 kfree(buf);
20901 +               buf = NULL;
20902                 return -EINVAL;
20903         }
20905 @@ -76,7 +79,6 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
20906                 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
20907         }
20909 -       kfree(buf);
20910         return count;
20913 diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
20914 index 096153761ebc..58876248b192 100644
20915 --- a/drivers/acpi/device_pm.c
20916 +++ b/drivers/acpi/device_pm.c
20917 @@ -1310,6 +1310,7 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
20918                 {"PNP0C0B", }, /* Generic ACPI fan */
20919                 {"INT3404", }, /* Fan */
20920                 {"INTC1044", }, /* Fan for Tiger Lake generation */
20921 +               {"INTC1048", }, /* Fan for Alder Lake generation */
20922                 {}
20923         };
20924         struct acpi_device *adev = ACPI_COMPANION(dev);
20925 diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
20926 index 6efe7edd7b1e..345777bf7af9 100644
20927 --- a/drivers/acpi/scan.c
20928 +++ b/drivers/acpi/scan.c
20929 @@ -701,6 +701,7 @@ int acpi_device_add(struct acpi_device *device,
20931                 result = acpi_device_set_name(device, acpi_device_bus_id);
20932                 if (result) {
20933 +                       kfree_const(acpi_device_bus_id->bus_id);
20934                         kfree(acpi_device_bus_id);
20935                         goto err_unlock;
20936                 }
20937 diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
20938 index 53b22e26266c..2d821ed78453 100644
20939 --- a/drivers/android/Kconfig
20940 +++ b/drivers/android/Kconfig
20941 @@ -9,7 +9,7 @@ config ANDROID
20942  if ANDROID
20944  config ANDROID_BINDER_IPC
20945 -       bool "Android Binder IPC Driver"
20946 +       tristate "Android Binder IPC Driver"
20947         depends on MMU
20948         default n
20949         help
20950 @@ -21,8 +21,8 @@ config ANDROID_BINDER_IPC
20951           between said processes.
20953  config ANDROID_BINDERFS
20954 -       bool "Android Binderfs filesystem"
20955 -       depends on ANDROID_BINDER_IPC
20956 +       tristate "Android Binderfs filesystem"
20957 +       depends on (ANDROID_BINDER_IPC=y) || (ANDROID_BINDER_IPC=m && m)
20958         default n
20959         help
20960           Binderfs is a pseudo-filesystem for the Android Binder IPC driver
20961 diff --git a/drivers/android/Makefile b/drivers/android/Makefile
20962 index c9d3d0c99c25..b9d5ce8deca2 100644
20963 --- a/drivers/android/Makefile
20964 +++ b/drivers/android/Makefile
20965 @@ -1,6 +1,10 @@
20966  # SPDX-License-Identifier: GPL-2.0-only
20967  ccflags-y += -I$(src)                  # needed for trace events
20969 -obj-$(CONFIG_ANDROID_BINDERFS)         += binderfs.o
20970 -obj-$(CONFIG_ANDROID_BINDER_IPC)       += binder.o binder_alloc.o
20971 -obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
20972 +binder_linux-y := binder.o binder_alloc.o
20973 +obj-$(CONFIG_ANDROID_BINDER_IPC) += binder_linux.o
20974 +binder_linux-$(CONFIG_ANDROID_BINDERFS) += binderfs.o
20975 +binder_linux-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
20977 +# binder-$(CONFIG_ANDROID_BINDER_IPC) := binder.o binder_alloc.o
20978 +# binder-$(CONFIG_ANDROID_BINDERFS) += binderfs.o
20979 diff --git a/drivers/android/binder.c b/drivers/android/binder.c
20980 index c119736ca56a..569850551e88 100644
20981 --- a/drivers/android/binder.c
20982 +++ b/drivers/android/binder.c
20983 @@ -5788,9 +5788,20 @@ static int __init binder_init(void)
20984         return ret;
20987 -device_initcall(binder_init);
20988 +module_init(binder_init);
20990 + * binder will have no exit function since binderfs instances can be mounted
20991 + * multiple times and also in user namespaces finding and destroying them all
20992 + * is not feasible without introducing insane locking. Just ignoring existing
20993 + * instances on module unload also wouldn't work since we would loose track of
20994 + * what major numer was dynamically allocated and also what minor numbers are
20995 + * already given out. So this would get us into all kinds of issues with device
20996 + * number reuse. So simply don't allow unloading unless we are forced to do so.
20997 + */
20999 +MODULE_AUTHOR("Google, Inc.");
21000 +MODULE_DESCRIPTION("Driver for Android binder device");
21001 +MODULE_LICENSE("GPL v2");
21003  #define CREATE_TRACE_POINTS
21004  #include "binder_trace.h"
21006 -MODULE_LICENSE("GPL v2");
21007 diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
21008 index 7caf74ad2405..07c11e8d6dad 100644
21009 --- a/drivers/android/binder_alloc.c
21010 +++ b/drivers/android/binder_alloc.c
21011 @@ -38,8 +38,7 @@ enum {
21012  };
21013  static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
21015 -module_param_named(debug_mask, binder_alloc_debug_mask,
21016 -                  uint, 0644);
21017 +module_param_named(alloc_debug_mask, binder_alloc_debug_mask, uint, 0644);
21019  #define binder_alloc_debug(mask, x...) \
21020         do { \
21021 diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
21022 index 6e8e001381af..e4e0678e2781 100644
21023 --- a/drivers/android/binder_alloc.h
21024 +++ b/drivers/android/binder_alloc.h
21025 @@ -6,6 +6,7 @@
21026  #ifndef _LINUX_BINDER_ALLOC_H
21027  #define _LINUX_BINDER_ALLOC_H
21029 +#include <linux/kconfig.h>
21030  #include <linux/rbtree.h>
21031  #include <linux/list.h>
21032  #include <linux/mm.h>
21033 @@ -109,7 +110,7 @@ struct binder_alloc {
21034         size_t pages_high;
21035  };
21037 -#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
21038 +#if IS_ENABLED(CONFIG_ANDROID_BINDER_IPC_SELFTEST)
21039  void binder_selftest_alloc(struct binder_alloc *alloc);
21040  #else
21041  static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
21042 diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
21043 index 6cd79011e35d..da5bcb3203dc 100644
21044 --- a/drivers/android/binder_internal.h
21045 +++ b/drivers/android/binder_internal.h
21046 @@ -5,6 +5,7 @@
21048  #include <linux/export.h>
21049  #include <linux/fs.h>
21050 +#include <linux/kconfig.h>
21051  #include <linux/list.h>
21052  #include <linux/miscdevice.h>
21053  #include <linux/mutex.h>
21054 @@ -77,7 +78,7 @@ extern const struct file_operations binder_fops;
21056  extern char *binder_devices_param;
21058 -#ifdef CONFIG_ANDROID_BINDERFS
21059 +#if IS_ENABLED(CONFIG_ANDROID_BINDERFS)
21060  extern bool is_binderfs_device(const struct inode *inode);
21061  extern struct dentry *binderfs_create_file(struct dentry *dir, const char *name,
21062                                            const struct file_operations *fops,
21063 @@ -98,7 +99,7 @@ static inline struct dentry *binderfs_create_file(struct dentry *dir,
21064  static inline void binderfs_remove_file(struct dentry *dentry) {}
21065  #endif
21067 -#ifdef CONFIG_ANDROID_BINDERFS
21068 +#if IS_ENABLED(CONFIG_ANDROID_BINDERFS)
21069  extern int __init init_binderfs(void);
21070  #else
21071  static inline int __init init_binderfs(void)
21072 diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
21073 index e80ba93c62a9..1a18e9dbb2a6 100644
21074 --- a/drivers/android/binderfs.c
21075 +++ b/drivers/android/binderfs.c
21076 @@ -113,7 +113,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
21077         struct super_block *sb = ref_inode->i_sb;
21078         struct binderfs_info *info = sb->s_fs_info;
21079  #if defined(CONFIG_IPC_NS)
21080 -       bool use_reserve = (info->ipc_ns == &init_ipc_ns);
21081 +       bool use_reserve = (info->ipc_ns == show_init_ipc_ns());
21082  #else
21083         bool use_reserve = true;
21084  #endif
21085 @@ -402,7 +402,7 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
21086         struct dentry *root = sb->s_root;
21087         struct binderfs_info *info = sb->s_fs_info;
21088  #if defined(CONFIG_IPC_NS)
21089 -       bool use_reserve = (info->ipc_ns == &init_ipc_ns);
21090 +       bool use_reserve = (info->ipc_ns == show_init_ipc_ns());
21091  #else
21092         bool use_reserve = true;
21093  #endif
21094 @@ -682,7 +682,7 @@ static int binderfs_fill_super(struct super_block *sb, struct fs_context *fc)
21095                 return -ENOMEM;
21096         info = sb->s_fs_info;
21098 -       info->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
21099 +       info->ipc_ns = get_ipc_ns_exported(current->nsproxy->ipc_ns);
21101         info->root_gid = make_kgid(sb->s_user_ns, 0);
21102         if (!gid_valid(info->root_gid))
21103 diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
21104 index 00ba8e5a1ccc..33192a8f687d 100644
21105 --- a/drivers/ata/ahci.c
21106 +++ b/drivers/ata/ahci.c
21107 @@ -1772,6 +1772,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
21108                 hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
21110  #ifdef CONFIG_ARM64
21111 +       if (pdev->vendor == PCI_VENDOR_ID_HUAWEI &&
21112 +           pdev->device == 0xa235 &&
21113 +           pdev->revision < 0x30)
21114 +               hpriv->flags |= AHCI_HFLAG_NO_SXS;
21116         if (pdev->vendor == 0x177d && pdev->device == 0xa01c)
21117                 hpriv->irq_handler = ahci_thunderx_irq_handler;
21118  #endif
21119 diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
21120 index 98b8baa47dc5..d1f284f0c83d 100644
21121 --- a/drivers/ata/ahci.h
21122 +++ b/drivers/ata/ahci.h
21123 @@ -242,6 +242,7 @@ enum {
21124                                                         suspend/resume */
21125         AHCI_HFLAG_IGN_NOTSUPP_POWER_ON = (1 << 27), /* ignore -EOPNOTSUPP
21126                                                         from phy_power_on() */
21127 +       AHCI_HFLAG_NO_SXS               = (1 << 28), /* SXS not supported */
21129         /* ap->flags bits */
21131 diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
21132 index 5b32df5d33ad..6e9c5ade4c2e 100644
21133 --- a/drivers/ata/ahci_brcm.c
21134 +++ b/drivers/ata/ahci_brcm.c
21135 @@ -86,7 +86,8 @@ struct brcm_ahci_priv {
21136         u32 port_mask;
21137         u32 quirks;
21138         enum brcm_ahci_version version;
21139 -       struct reset_control *rcdev;
21140 +       struct reset_control *rcdev_rescal;
21141 +       struct reset_control *rcdev_ahci;
21142  };
21144  static inline u32 brcm_sata_readreg(void __iomem *addr)
21145 @@ -352,8 +353,8 @@ static int brcm_ahci_suspend(struct device *dev)
21146         else
21147                 ret = 0;
21149 -       if (priv->version != BRCM_SATA_BCM7216)
21150 -               reset_control_assert(priv->rcdev);
21151 +       reset_control_assert(priv->rcdev_ahci);
21152 +       reset_control_rearm(priv->rcdev_rescal);
21154         return ret;
21156 @@ -365,10 +366,10 @@ static int __maybe_unused brcm_ahci_resume(struct device *dev)
21157         struct brcm_ahci_priv *priv = hpriv->plat_data;
21158         int ret = 0;
21160 -       if (priv->version == BRCM_SATA_BCM7216)
21161 -               ret = reset_control_reset(priv->rcdev);
21162 -       else
21163 -               ret = reset_control_deassert(priv->rcdev);
21164 +       ret = reset_control_deassert(priv->rcdev_ahci);
21165 +       if (ret)
21166 +               return ret;
21167 +       ret = reset_control_reset(priv->rcdev_rescal);
21168         if (ret)
21169                 return ret;
21171 @@ -434,7 +435,6 @@ static int brcm_ahci_probe(struct platform_device *pdev)
21173         const struct of_device_id *of_id;
21174         struct device *dev = &pdev->dev;
21175 -       const char *reset_name = NULL;
21176         struct brcm_ahci_priv *priv;
21177         struct ahci_host_priv *hpriv;
21178         struct resource *res;
21179 @@ -456,15 +456,15 @@ static int brcm_ahci_probe(struct platform_device *pdev)
21180         if (IS_ERR(priv->top_ctrl))
21181                 return PTR_ERR(priv->top_ctrl);
21183 -       /* Reset is optional depending on platform and named differently */
21184 -       if (priv->version == BRCM_SATA_BCM7216)
21185 -               reset_name = "rescal";
21186 -       else
21187 -               reset_name = "ahci";
21189 -       priv->rcdev = devm_reset_control_get_optional(&pdev->dev, reset_name);
21190 -       if (IS_ERR(priv->rcdev))
21191 -               return PTR_ERR(priv->rcdev);
21192 +       if (priv->version == BRCM_SATA_BCM7216) {
21193 +               priv->rcdev_rescal = devm_reset_control_get_optional_shared(
21194 +                       &pdev->dev, "rescal");
21195 +               if (IS_ERR(priv->rcdev_rescal))
21196 +                       return PTR_ERR(priv->rcdev_rescal);
21197 +       }
21198 +       priv->rcdev_ahci = devm_reset_control_get_optional(&pdev->dev, "ahci");
21199 +       if (IS_ERR(priv->rcdev_ahci))
21200 +               return PTR_ERR(priv->rcdev_ahci);
21202         hpriv = ahci_platform_get_resources(pdev, 0);
21203         if (IS_ERR(hpriv))
21204 @@ -485,10 +485,10 @@ static int brcm_ahci_probe(struct platform_device *pdev)
21205                 break;
21206         }
21208 -       if (priv->version == BRCM_SATA_BCM7216)
21209 -               ret = reset_control_reset(priv->rcdev);
21210 -       else
21211 -               ret = reset_control_deassert(priv->rcdev);
21212 +       ret = reset_control_reset(priv->rcdev_rescal);
21213 +       if (ret)
21214 +               return ret;
21215 +       ret = reset_control_deassert(priv->rcdev_ahci);
21216         if (ret)
21217                 return ret;
21219 @@ -539,8 +539,8 @@ static int brcm_ahci_probe(struct platform_device *pdev)
21220  out_disable_clks:
21221         ahci_platform_disable_clks(hpriv);
21222  out_reset:
21223 -       if (priv->version != BRCM_SATA_BCM7216)
21224 -               reset_control_assert(priv->rcdev);
21225 +       reset_control_assert(priv->rcdev_ahci);
21226 +       reset_control_rearm(priv->rcdev_rescal);
21227         return ret;
21230 diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
21231 index ea5bf5f4cbed..fec2e9754aed 100644
21232 --- a/drivers/ata/libahci.c
21233 +++ b/drivers/ata/libahci.c
21234 @@ -493,6 +493,11 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
21235                 cap |= HOST_CAP_ALPM;
21236         }
21238 +       if ((cap & HOST_CAP_SXS) && (hpriv->flags & AHCI_HFLAG_NO_SXS)) {
21239 +               dev_info(dev, "controller does not support SXS, disabling CAP_SXS\n");
21240 +               cap &= ~HOST_CAP_SXS;
21241 +       }
21243         if (hpriv->force_port_map && port_map != hpriv->force_port_map) {
21244                 dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
21245                          port_map, hpriv->force_port_map);
21246 diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
21247 index de638dafce21..b2f552088291 100644
21248 --- a/drivers/ata/libahci_platform.c
21249 +++ b/drivers/ata/libahci_platform.c
21250 @@ -582,11 +582,13 @@ int ahci_platform_init_host(struct platform_device *pdev,
21251         int i, irq, n_ports, rc;
21253         irq = platform_get_irq(pdev, 0);
21254 -       if (irq <= 0) {
21255 +       if (irq < 0) {
21256                 if (irq != -EPROBE_DEFER)
21257                         dev_err(dev, "no irq\n");
21258                 return irq;
21259         }
21260 +       if (!irq)
21261 +               return -EINVAL;
21263         hpriv->irq = irq;
21265 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
21266 index e9cf31f38450..63f39440a9b4 100644
21267 --- a/drivers/ata/pata_arasan_cf.c
21268 +++ b/drivers/ata/pata_arasan_cf.c
21269 @@ -818,12 +818,19 @@ static int arasan_cf_probe(struct platform_device *pdev)
21270         else
21271                 quirk = CF_BROKEN_UDMA; /* as it is on spear1340 */
21273 -       /* if irq is 0, support only PIO */
21274 -       acdev->irq = platform_get_irq(pdev, 0);
21275 -       if (acdev->irq)
21276 +       /*
21277 +        * If there's an error getting IRQ (or we do get IRQ0),
21278 +        * support only PIO
21279 +        */
21280 +       ret = platform_get_irq(pdev, 0);
21281 +       if (ret > 0) {
21282 +               acdev->irq = ret;
21283                 irq_handler = arasan_cf_interrupt;
21284 -       else
21285 +       } else  if (ret == -EPROBE_DEFER) {
21286 +               return ret;
21287 +       } else  {
21288                 quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA;
21289 +       }
21291         acdev->pbase = res->start;
21292         acdev->vbase = devm_ioremap(&pdev->dev, res->start,
21293 diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
21294 index d1644a8ef9fa..abc0e87ca1a8 100644
21295 --- a/drivers/ata/pata_ixp4xx_cf.c
21296 +++ b/drivers/ata/pata_ixp4xx_cf.c
21297 @@ -165,8 +165,12 @@ static int ixp4xx_pata_probe(struct platform_device *pdev)
21298                 return -ENOMEM;
21300         irq = platform_get_irq(pdev, 0);
21301 -       if (irq)
21302 +       if (irq > 0)
21303                 irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
21304 +       else if (irq < 0)
21305 +               return irq;
21306 +       else
21307 +               return -EINVAL;
21309         /* Setup expansion bus chip selects */
21310         *data->cs0_cfg = data->cs0_bits;
21311 diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
21312 index 664ef658a955..b62446ea5f40 100644
21313 --- a/drivers/ata/sata_mv.c
21314 +++ b/drivers/ata/sata_mv.c
21315 @@ -4097,6 +4097,10 @@ static int mv_platform_probe(struct platform_device *pdev)
21316                 n_ports = mv_platform_data->n_ports;
21317                 irq = platform_get_irq(pdev, 0);
21318         }
21319 +       if (irq < 0)
21320 +               return irq;
21321 +       if (!irq)
21322 +               return -EINVAL;
21324         host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
21325         hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
21326 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
21327 index 653c8c6ac7a7..aedeb2dc1a18 100644
21328 --- a/drivers/base/devtmpfs.c
21329 +++ b/drivers/base/devtmpfs.c
21330 @@ -419,7 +419,6 @@ static int __init devtmpfs_setup(void *p)
21331         init_chroot(".");
21332  out:
21333         *(int *)p = err;
21334 -       complete(&setup_done);
21335         return err;
21338 @@ -432,6 +431,7 @@ static int __ref devtmpfsd(void *p)
21340         int err = devtmpfs_setup(p);
21342 +       complete(&setup_done);
21343         if (err)
21344                 return err;
21345         devtmpfs_work_loop();
21346 diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
21347 index 78355095e00d..7e2c79e2a88b 100644
21348 --- a/drivers/base/firmware_loader/main.c
21349 +++ b/drivers/base/firmware_loader/main.c
21350 @@ -465,6 +465,8 @@ static int fw_decompress_xz(struct device *dev, struct fw_priv *fw_priv,
21351  static char fw_path_para[256];
21352  static const char * const fw_path[] = {
21353         fw_path_para,
21354 +       "/etc/firmware/" UTS_RELEASE,
21355 +       "/etc/firmware",
21356         "/lib/firmware/updates/" UTS_RELEASE,
21357         "/lib/firmware/updates",
21358         "/lib/firmware/" UTS_RELEASE,
21359 diff --git a/drivers/base/node.c b/drivers/base/node.c
21360 index f449dbb2c746..2c36f61d30bc 100644
21361 --- a/drivers/base/node.c
21362 +++ b/drivers/base/node.c
21363 @@ -268,21 +268,20 @@ static void node_init_cache_dev(struct node *node)
21364         if (!dev)
21365                 return;
21367 +       device_initialize(dev);
21368         dev->parent = &node->dev;
21369         dev->release = node_cache_release;
21370         if (dev_set_name(dev, "memory_side_cache"))
21371 -               goto free_dev;
21372 +               goto put_device;
21374 -       if (device_register(dev))
21375 -               goto free_name;
21376 +       if (device_add(dev))
21377 +               goto put_device;
21379         pm_runtime_no_callbacks(dev);
21380         node->cache_dev = dev;
21381         return;
21382 -free_name:
21383 -       kfree_const(dev->kobj.name);
21384 -free_dev:
21385 -       kfree(dev);
21386 +put_device:
21387 +       put_device(dev);
21390  /**
21391 @@ -319,25 +318,24 @@ void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs)
21392                 return;
21394         dev = &info->dev;
21395 +       device_initialize(dev);
21396         dev->parent = node->cache_dev;
21397         dev->release = node_cacheinfo_release;
21398         dev->groups = cache_groups;
21399         if (dev_set_name(dev, "index%d", cache_attrs->level))
21400 -               goto free_cache;
21401 +               goto put_device;
21403         info->cache_attrs = *cache_attrs;
21404 -       if (device_register(dev)) {
21405 +       if (device_add(dev)) {
21406                 dev_warn(&node->dev, "failed to add cache level:%d\n",
21407                          cache_attrs->level);
21408 -               goto free_name;
21409 +               goto put_device;
21410         }
21411         pm_runtime_no_callbacks(dev);
21412         list_add_tail(&info->node, &node->cache_attrs);
21413         return;
21414 -free_name:
21415 -       kfree_const(dev->kobj.name);
21416 -free_cache:
21417 -       kfree(info);
21418 +put_device:
21419 +       put_device(dev);
21422  static void node_remove_caches(struct node *node)
21423 diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
21424 index fe1dad68aee4..ae011f2bc537 100644
21425 --- a/drivers/base/power/runtime.c
21426 +++ b/drivers/base/power/runtime.c
21427 @@ -1637,6 +1637,7 @@ void pm_runtime_init(struct device *dev)
21428         dev->power.request_pending = false;
21429         dev->power.request = RPM_REQ_NONE;
21430         dev->power.deferred_resume = false;
21431 +       dev->power.needs_force_resume = 0;
21432         INIT_WORK(&dev->power.work, pm_runtime_work);
21434         dev->power.timer_expires = 0;
21435 @@ -1804,10 +1805,12 @@ int pm_runtime_force_suspend(struct device *dev)
21436          * its parent, but set its status to RPM_SUSPENDED anyway in case this
21437          * function will be called again for it in the meantime.
21438          */
21439 -       if (pm_runtime_need_not_resume(dev))
21440 +       if (pm_runtime_need_not_resume(dev)) {
21441                 pm_runtime_set_suspended(dev);
21442 -       else
21443 +       } else {
21444                 __update_runtime_status(dev, RPM_SUSPENDED);
21445 +               dev->power.needs_force_resume = 1;
21446 +       }
21448         return 0;
21450 @@ -1834,7 +1837,7 @@ int pm_runtime_force_resume(struct device *dev)
21451         int (*callback)(struct device *);
21452         int ret = 0;
21454 -       if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
21455 +       if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
21456                 goto out;
21458         /*
21459 @@ -1853,6 +1856,7 @@ int pm_runtime_force_resume(struct device *dev)
21461         pm_runtime_mark_last_busy(dev);
21462  out:
21463 +       dev->power.needs_force_resume = 0;
21464         pm_runtime_enable(dev);
21465         return ret;
21467 diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
21468 index ff2ee87987c7..211a335a608d 100644
21469 --- a/drivers/base/regmap/regmap-debugfs.c
21470 +++ b/drivers/base/regmap/regmap-debugfs.c
21471 @@ -660,6 +660,7 @@ void regmap_debugfs_exit(struct regmap *map)
21472                 regmap_debugfs_free_dump_cache(map);
21473                 mutex_unlock(&map->cache_lock);
21474                 kfree(map->debugfs_name);
21475 +               map->debugfs_name = NULL;
21476         } else {
21477                 struct regmap_debugfs_node *node, *tmp;
21479 diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
21480 index fa3719ef80e4..88310ac9ce90 100644
21481 --- a/drivers/base/swnode.c
21482 +++ b/drivers/base/swnode.c
21483 @@ -1032,6 +1032,7 @@ int device_add_software_node(struct device *dev, const struct software_node *nod
21484         }
21486         set_secondary_fwnode(dev, &swnode->fwnode);
21487 +       software_node_notify(dev, KOBJ_ADD);
21489         return 0;
21491 @@ -1105,8 +1106,8 @@ int software_node_notify(struct device *dev, unsigned long action)
21493         switch (action) {
21494         case KOBJ_ADD:
21495 -               ret = sysfs_create_link(&dev->kobj, &swnode->kobj,
21496 -                                       "software_node");
21497 +               ret = sysfs_create_link_nowarn(&dev->kobj, &swnode->kobj,
21498 +                                              "software_node");
21499                 if (ret)
21500                         break;
21502 diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
21503 index 104b713f4055..d601e49f80e0 100644
21504 --- a/drivers/block/ataflop.c
21505 +++ b/drivers/block/ataflop.c
21506 @@ -729,8 +729,12 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
21507         unsigned long   flags;
21508         int ret;
21510 -       if (type)
21511 +       if (type) {
21512                 type--;
21513 +               if (type >= NUM_DISK_MINORS ||
21514 +                   minor2disktype[type].drive_types > DriveType)
21515 +                       return -EINVAL;
21516 +       }
21518         q = unit[drive].disk[type]->queue;
21519         blk_mq_freeze_queue(q);
21520 @@ -742,11 +746,6 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
21521         local_irq_restore(flags);
21523         if (type) {
21524 -               if (type >= NUM_DISK_MINORS ||
21525 -                   minor2disktype[type].drive_types > DriveType) {
21526 -                       ret = -EINVAL;
21527 -                       goto out;
21528 -               }
21529                 type = minor2disktype[type].index;
21530                 UDT = &atari_disk_type[type];
21531         }
21532 @@ -2002,7 +2001,10 @@ static void ataflop_probe(dev_t dev)
21533         int drive = MINOR(dev) & 3;
21534         int type  = MINOR(dev) >> 2;
21536 -       if (drive >= FD_MAX_UNITS || type > NUM_DISK_MINORS)
21537 +       if (type)
21538 +               type--;
21540 +       if (drive >= FD_MAX_UNITS || type >= NUM_DISK_MINORS)
21541                 return;
21542         mutex_lock(&ataflop_probe_lock);
21543         if (!unit[drive].disk[type]) {
21544 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
21545 index 4ff71b579cfc..974da561b8e5 100644
21546 --- a/drivers/block/nbd.c
21547 +++ b/drivers/block/nbd.c
21548 @@ -1980,7 +1980,8 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
21549          * config ref and try to destroy the workqueue from inside the work
21550          * queue.
21551          */
21552 -       flush_workqueue(nbd->recv_workq);
21553 +       if (nbd->recv_workq)
21554 +               flush_workqueue(nbd->recv_workq);
21555         if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
21556                                &nbd->config->runtime_flags))
21557                 nbd_config_put(nbd);
21558 diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
21559 index bfcab1c782b5..dae54dd1aeac 100644
21560 --- a/drivers/block/null_blk/zoned.c
21561 +++ b/drivers/block/null_blk/zoned.c
21562 @@ -180,6 +180,7 @@ int null_register_zoned_dev(struct nullb *nullb)
21563  void null_free_zoned_dev(struct nullb_device *dev)
21565         kvfree(dev->zones);
21566 +       dev->zones = NULL;
21569  int null_report_zones(struct gendisk *disk, sector_t sector,
21570 diff --git a/drivers/block/rnbd/rnbd-clt-sysfs.c b/drivers/block/rnbd/rnbd-clt-sysfs.c
21571 index d4aa6bfc9555..49ad400a5225 100644
21572 --- a/drivers/block/rnbd/rnbd-clt-sysfs.c
21573 +++ b/drivers/block/rnbd/rnbd-clt-sysfs.c
21574 @@ -432,10 +432,14 @@ void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev)
21575          * i.e. rnbd_clt_unmap_dev_store() leading to a sysfs warning because
21576          * of sysfs link already was removed already.
21577          */
21578 -       if (dev->blk_symlink_name && try_module_get(THIS_MODULE)) {
21579 -               sysfs_remove_link(rnbd_devs_kobj, dev->blk_symlink_name);
21580 +       if (dev->blk_symlink_name) {
21581 +               if (try_module_get(THIS_MODULE)) {
21582 +                       sysfs_remove_link(rnbd_devs_kobj, dev->blk_symlink_name);
21583 +                       module_put(THIS_MODULE);
21584 +               }
21585 +               /* It should be freed always. */
21586                 kfree(dev->blk_symlink_name);
21587 -               module_put(THIS_MODULE);
21588 +               dev->blk_symlink_name = NULL;
21589         }
21592 @@ -479,11 +483,7 @@ static int rnbd_clt_get_path_name(struct rnbd_clt_dev *dev, char *buf,
21593         while ((s = strchr(pathname, '/')))
21594                 s[0] = '!';
21596 -       ret = snprintf(buf, len, "%s", pathname);
21597 -       if (ret >= len)
21598 -               return -ENAMETOOLONG;
21600 -       ret = snprintf(buf, len, "%s@%s", buf, dev->sess->sessname);
21601 +       ret = snprintf(buf, len, "%s@%s", pathname, dev->sess->sessname);
21602         if (ret >= len)
21603                 return -ENAMETOOLONG;
21605 diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
21606 index 45a470076652..5ab7319ff2ea 100644
21607 --- a/drivers/block/rnbd/rnbd-clt.c
21608 +++ b/drivers/block/rnbd/rnbd-clt.c
21609 @@ -693,7 +693,11 @@ static void remap_devs(struct rnbd_clt_session *sess)
21610                 return;
21611         }
21613 -       rtrs_clt_query(sess->rtrs, &attrs);
21614 +       err = rtrs_clt_query(sess->rtrs, &attrs);
21615 +       if (err) {
21616 +               pr_err("rtrs_clt_query(\"%s\"): %d\n", sess->sessname, err);
21617 +               return;
21618 +       }
21619         mutex_lock(&sess->lock);
21620         sess->max_io_size = attrs.max_io_size;
21622 @@ -1234,7 +1238,11 @@ find_and_get_or_create_sess(const char *sessname,
21623                 err = PTR_ERR(sess->rtrs);
21624                 goto wake_up_and_put;
21625         }
21626 -       rtrs_clt_query(sess->rtrs, &attrs);
21628 +       err = rtrs_clt_query(sess->rtrs, &attrs);
21629 +       if (err)
21630 +               goto close_rtrs;
21632         sess->max_io_size = attrs.max_io_size;
21633         sess->queue_depth = attrs.queue_depth;
21635 diff --git a/drivers/block/rnbd/rnbd-clt.h b/drivers/block/rnbd/rnbd-clt.h
21636 index 537d499dad3b..73d980840531 100644
21637 --- a/drivers/block/rnbd/rnbd-clt.h
21638 +++ b/drivers/block/rnbd/rnbd-clt.h
21639 @@ -87,7 +87,7 @@ struct rnbd_clt_session {
21640         DECLARE_BITMAP(cpu_queues_bm, NR_CPUS);
21641         int     __percpu        *cpu_rr; /* per-cpu var for CPU round-robin */
21642         atomic_t                busy;
21643 -       int                     queue_depth;
21644 +       size_t                  queue_depth;
21645         u32                     max_io_size;
21646         struct blk_mq_tag_set   tag_set;
21647         struct mutex            lock; /* protects state and devs_list */
21648 diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
21649 index a6a68d44f517..677770f32843 100644
21650 --- a/drivers/block/rnbd/rnbd-srv.c
21651 +++ b/drivers/block/rnbd/rnbd-srv.c
21652 @@ -341,7 +341,9 @@ void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev)
21653         struct rnbd_srv_session *sess = sess_dev->sess;
21655         sess_dev->keep_id = true;
21656 -       mutex_lock(&sess->lock);
21657 +       /* It is already started to close by client's close message. */
21658 +       if (!mutex_trylock(&sess->lock))
21659 +               return;
21660         rnbd_srv_destroy_dev_session_sysfs(sess_dev);
21661         mutex_unlock(&sess->lock);
21663 diff --git a/drivers/block/swim.c b/drivers/block/swim.c
21664 index cc6a0bc6c005..ac5c170d76e8 100644
21665 --- a/drivers/block/swim.c
21666 +++ b/drivers/block/swim.c
21667 @@ -328,7 +328,7 @@ static inline void swim_motor(struct swim __iomem *base,
21668                         if (swim_readbit(base, MOTOR_ON))
21669                                 break;
21670                         set_current_state(TASK_INTERRUPTIBLE);
21671 -                       schedule_timeout(1);
21672 +                       schedule_min_hrtimeout();
21673                 }
21674         } else if (action == OFF) {
21675                 swim_action(base, MOTOR_OFF);
21676 @@ -347,7 +347,7 @@ static inline void swim_eject(struct swim __iomem *base)
21677                 if (!swim_readbit(base, DISK_IN))
21678                         break;
21679                 set_current_state(TASK_INTERRUPTIBLE);
21680 -               schedule_timeout(1);
21681 +               schedule_min_hrtimeout();
21682         }
21683         swim_select(base, RELAX);
21685 @@ -372,6 +372,7 @@ static inline int swim_step(struct swim __iomem *base)
21687                 set_current_state(TASK_INTERRUPTIBLE);
21688                 schedule_timeout(1);
21689 +               schedule_min_hrtimeout();
21691                 swim_select(base, RELAX);
21692                 if (!swim_readbit(base, STEP))
21693 diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
21694 index b0c71d3a81a0..bda5c815e441 100644
21695 --- a/drivers/block/xen-blkback/common.h
21696 +++ b/drivers/block/xen-blkback/common.h
21697 @@ -313,6 +313,7 @@ struct xen_blkif {
21699         struct work_struct      free_work;
21700         unsigned int            nr_ring_pages;
21701 +       bool                    multi_ref;
21702         /* All rings for this device. */
21703         struct xen_blkif_ring   *rings;
21704         unsigned int            nr_rings;
21705 diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
21706 index c2aaf690352c..125b22205d38 100644
21707 --- a/drivers/block/xen-blkback/xenbus.c
21708 +++ b/drivers/block/xen-blkback/xenbus.c
21709 @@ -998,14 +998,17 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
21710         for (i = 0; i < nr_grefs; i++) {
21711                 char ring_ref_name[RINGREF_NAME_LEN];
21713 -               snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
21714 +               if (blkif->multi_ref)
21715 +                       snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
21716 +               else {
21717 +                       WARN_ON(i != 0);
21718 +                       snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref");
21719 +               }
21721                 err = xenbus_scanf(XBT_NIL, dir, ring_ref_name,
21722                                    "%u", &ring_ref[i]);
21724                 if (err != 1) {
21725 -                       if (nr_grefs == 1)
21726 -                               break;
21728                         err = -EINVAL;
21729                         xenbus_dev_fatal(dev, err, "reading %s/%s",
21730                                          dir, ring_ref_name);
21731 @@ -1013,18 +1016,6 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
21732                 }
21733         }
21735 -       if (err != 1) {
21736 -               WARN_ON(nr_grefs != 1);
21738 -               err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u",
21739 -                                  &ring_ref[0]);
21740 -               if (err != 1) {
21741 -                       err = -EINVAL;
21742 -                       xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
21743 -                       return err;
21744 -               }
21745 -       }
21747         err = -ENOMEM;
21748         for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
21749                 req = kzalloc(sizeof(*req), GFP_KERNEL);
21750 @@ -1129,10 +1120,15 @@ static int connect_ring(struct backend_info *be)
21751                  blkif->nr_rings, blkif->blk_protocol, protocol,
21752                  blkif->vbd.feature_gnt_persistent ? "persistent grants" : "");
21754 -       ring_page_order = xenbus_read_unsigned(dev->otherend,
21755 -                                              "ring-page-order", 0);
21757 -       if (ring_page_order > xen_blkif_max_ring_order) {
21758 +       err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
21759 +                          &ring_page_order);
21760 +       if (err != 1) {
21761 +               blkif->nr_ring_pages = 1;
21762 +               blkif->multi_ref = false;
21763 +       } else if (ring_page_order <= xen_blkif_max_ring_order) {
21764 +               blkif->nr_ring_pages = 1 << ring_page_order;
21765 +               blkif->multi_ref = true;
21766 +       } else {
21767                 err = -EINVAL;
21768                 xenbus_dev_fatal(dev, err,
21769                                  "requested ring page order %d exceed max:%d",
21770 @@ -1141,8 +1137,6 @@ static int connect_ring(struct backend_info *be)
21771                 return err;
21772         }
21774 -       blkif->nr_ring_pages = 1 << ring_page_order;
21776         if (blkif->nr_rings == 1)
21777                 return read_per_ring_refs(&blkif->rings[0], dev->otherend);
21778         else {
21779 diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
21780 index 5cbfbd948f67..4a901508e48e 100644
21781 --- a/drivers/bluetooth/btusb.c
21782 +++ b/drivers/bluetooth/btusb.c
21783 @@ -399,7 +399,9 @@ static const struct usb_device_id blacklist_table[] = {
21785         /* MediaTek Bluetooth devices */
21786         { USB_VENDOR_AND_INTERFACE_INFO(0x0e8d, 0xe0, 0x01, 0x01),
21787 -         .driver_info = BTUSB_MEDIATEK },
21788 +         .driver_info = BTUSB_MEDIATEK |
21789 +                        BTUSB_WIDEBAND_SPEECH |
21790 +                        BTUSB_VALID_LE_STATES },
21792         /* Additional MediaTek MT7615E Bluetooth devices */
21793         { USB_DEVICE(0x13d3, 0x3560), .driver_info = BTUSB_MEDIATEK},
21794 diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c
21795 index c2546bf229fb..08c28740dc4e 100644
21796 --- a/drivers/bus/mhi/core/boot.c
21797 +++ b/drivers/bus/mhi/core/boot.c
21798 @@ -389,7 +389,6 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
21799  void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
21801         const struct firmware *firmware = NULL;
21802 -       struct image_info *image_info;
21803         struct device *dev = &mhi_cntrl->mhi_dev->dev;
21804         const char *fw_name;
21805         void *buf;
21806 @@ -491,44 +490,42 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
21807  fw_load_ee_pthru:
21808         /* Transitioning into MHI RESET->READY state */
21809         ret = mhi_ready_state_transition(mhi_cntrl);
21811 -       if (!mhi_cntrl->fbc_download)
21812 -               return;
21814         if (ret) {
21815                 dev_err(dev, "MHI did not enter READY state\n");
21816                 goto error_ready_state;
21817         }
21819 -       /* Wait for the SBL event */
21820 -       ret = wait_event_timeout(mhi_cntrl->state_event,
21821 -                                mhi_cntrl->ee == MHI_EE_SBL ||
21822 -                                MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
21823 -                                msecs_to_jiffies(mhi_cntrl->timeout_ms));
21824 +       dev_info(dev, "Wait for device to enter SBL or Mission mode\n");
21825 +       return;
21827 -       if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
21828 -               dev_err(dev, "MHI did not enter SBL\n");
21829 -               goto error_ready_state;
21830 +error_ready_state:
21831 +       if (mhi_cntrl->fbc_download) {
21832 +               mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
21833 +               mhi_cntrl->fbc_image = NULL;
21834         }
21836 -       /* Start full firmware image download */
21837 -       image_info = mhi_cntrl->fbc_image;
21838 +error_fw_load:
21839 +       mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
21840 +       wake_up_all(&mhi_cntrl->state_event);
21843 +int mhi_download_amss_image(struct mhi_controller *mhi_cntrl)
21845 +       struct image_info *image_info = mhi_cntrl->fbc_image;
21846 +       struct device *dev = &mhi_cntrl->mhi_dev->dev;
21847 +       int ret;
21849 +       if (!image_info)
21850 +               return -EIO;
21852         ret = mhi_fw_load_bhie(mhi_cntrl,
21853                                /* Vector table is the last entry */
21854                                &image_info->mhi_buf[image_info->entries - 1]);
21855         if (ret) {
21856 -               dev_err(dev, "MHI did not load image over BHIe, ret: %d\n",
21857 -                       ret);
21858 -               goto error_fw_load;
21859 +               dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret);
21860 +               mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
21861 +               wake_up_all(&mhi_cntrl->state_event);
21862         }
21864 -       return;
21866 -error_ready_state:
21867 -       mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
21868 -       mhi_cntrl->fbc_image = NULL;
21870 -error_fw_load:
21871 -       mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
21872 -       wake_up_all(&mhi_cntrl->state_event);
21873 +       return ret;
21875 diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c
21876 index be4eebb0971b..08b7f4a06bfc 100644
21877 --- a/drivers/bus/mhi/core/init.c
21878 +++ b/drivers/bus/mhi/core/init.c
21879 @@ -508,8 +508,6 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
21881         /* Setup wake db */
21882         mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
21883 -       mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0);
21884 -       mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0);
21885         mhi_cntrl->wake_set = false;
21887         /* Setup channel db address for each channel in tre_ring */
21888 @@ -552,6 +550,7 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
21889         struct mhi_ring *buf_ring;
21890         struct mhi_ring *tre_ring;
21891         struct mhi_chan_ctxt *chan_ctxt;
21892 +       u32 tmp;
21894         buf_ring = &mhi_chan->buf_ring;
21895         tre_ring = &mhi_chan->tre_ring;
21896 @@ -565,7 +564,19 @@ void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
21897         vfree(buf_ring->base);
21899         buf_ring->base = tre_ring->base = NULL;
21900 +       tre_ring->ctxt_wp = NULL;
21901         chan_ctxt->rbase = 0;
21902 +       chan_ctxt->rlen = 0;
21903 +       chan_ctxt->rp = 0;
21904 +       chan_ctxt->wp = 0;
21906 +       tmp = chan_ctxt->chcfg;
21907 +       tmp &= ~CHAN_CTX_CHSTATE_MASK;
21908 +       tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT);
21909 +       chan_ctxt->chcfg = tmp;
21911 +       /* Update to all cores */
21912 +       smp_wmb();
21915  int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
21916 @@ -863,12 +874,10 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
21917         u32 soc_info;
21918         int ret, i;
21920 -       if (!mhi_cntrl)
21921 -               return -EINVAL;
21923 -       if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
21924 +       if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs ||
21925 +           !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
21926             !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
21927 -           !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs)
21928 +           !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs || !mhi_cntrl->irq)
21929                 return -EINVAL;
21931         ret = parse_config(mhi_cntrl, config);
21932 @@ -890,8 +899,7 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
21933         INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
21934         init_waitqueue_head(&mhi_cntrl->state_event);
21936 -       mhi_cntrl->hiprio_wq = alloc_ordered_workqueue
21937 -                               ("mhi_hiprio_wq", WQ_MEM_RECLAIM | WQ_HIGHPRI);
21938 +       mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI);
21939         if (!mhi_cntrl->hiprio_wq) {
21940                 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n");
21941                 ret = -ENOMEM;
21942 @@ -1296,7 +1304,8 @@ static int mhi_driver_remove(struct device *dev)
21944                 mutex_lock(&mhi_chan->mutex);
21946 -               if (ch_state[dir] == MHI_CH_STATE_ENABLED &&
21947 +               if ((ch_state[dir] == MHI_CH_STATE_ENABLED ||
21948 +                    ch_state[dir] == MHI_CH_STATE_STOP) &&
21949                     !mhi_chan->offload_ch)
21950                         mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
21952 diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h
21953 index 6f80ec30c0cd..6f37439e5247 100644
21954 --- a/drivers/bus/mhi/core/internal.h
21955 +++ b/drivers/bus/mhi/core/internal.h
21956 @@ -619,6 +619,7 @@ int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl);
21957  int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl);
21958  int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
21959                  enum mhi_cmd_type cmd);
21960 +int mhi_download_amss_image(struct mhi_controller *mhi_cntrl);
21961  static inline bool mhi_is_active(struct mhi_controller *mhi_cntrl)
21963         return (mhi_cntrl->dev_state >= MHI_STATE_M0 &&
21964 diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
21965 index 4e0131b94056..61c37b23dd71 100644
21966 --- a/drivers/bus/mhi/core/main.c
21967 +++ b/drivers/bus/mhi/core/main.c
21968 @@ -242,10 +242,17 @@ static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
21969         smp_wmb();
21972 +static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
21974 +       return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len;
21977  int mhi_destroy_device(struct device *dev, void *data)
21979 +       struct mhi_chan *ul_chan, *dl_chan;
21980         struct mhi_device *mhi_dev;
21981         struct mhi_controller *mhi_cntrl;
21982 +       enum mhi_ee_type ee = MHI_EE_MAX;
21984         if (dev->bus != &mhi_bus_type)
21985                 return 0;
21986 @@ -257,6 +264,17 @@ int mhi_destroy_device(struct device *dev, void *data)
21987         if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
21988                 return 0;
21990 +       ul_chan = mhi_dev->ul_chan;
21991 +       dl_chan = mhi_dev->dl_chan;
21993 +       /*
21994 +        * If execution environment is specified, remove only those devices that
21995 +        * started in them based on ee_mask for the channels as we move on to a
21996 +        * different execution environment
21997 +        */
21998 +       if (data)
21999 +               ee = *(enum mhi_ee_type *)data;
22001         /*
22002          * For the suspend and resume case, this function will get called
22003          * without mhi_unregister_controller(). Hence, we need to drop the
22004 @@ -264,11 +282,19 @@ int mhi_destroy_device(struct device *dev, void *data)
22005          * be sure that there will be no instances of mhi_dev left after
22006          * this.
22007          */
22008 -       if (mhi_dev->ul_chan)
22009 -               put_device(&mhi_dev->ul_chan->mhi_dev->dev);
22010 +       if (ul_chan) {
22011 +               if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee)))
22012 +                       return 0;
22014 -       if (mhi_dev->dl_chan)
22015 -               put_device(&mhi_dev->dl_chan->mhi_dev->dev);
22016 +               put_device(&ul_chan->mhi_dev->dev);
22017 +       }
22019 +       if (dl_chan) {
22020 +               if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee)))
22021 +                       return 0;
22023 +               put_device(&dl_chan->mhi_dev->dev);
22024 +       }
22026         dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
22027                  mhi_dev->name);
22028 @@ -383,7 +409,16 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev)
22029         struct mhi_event_ctxt *er_ctxt =
22030                 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
22031         struct mhi_ring *ev_ring = &mhi_event->ring;
22032 -       void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
22033 +       dma_addr_t ptr = er_ctxt->rp;
22034 +       void *dev_rp;
22036 +       if (!is_valid_ring_ptr(ev_ring, ptr)) {
22037 +               dev_err(&mhi_cntrl->mhi_dev->dev,
22038 +                       "Event ring rp points outside of the event ring\n");
22039 +               return IRQ_HANDLED;
22040 +       }
22042 +       dev_rp = mhi_to_virtual(ev_ring, ptr);
22044         /* Only proceed if event ring has pending events */
22045         if (ev_ring->rp == dev_rp)
22046 @@ -409,7 +444,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
22047         struct device *dev = &mhi_cntrl->mhi_dev->dev;
22048         enum mhi_state state = MHI_STATE_MAX;
22049         enum mhi_pm_state pm_state = 0;
22050 -       enum mhi_ee_type ee = 0;
22051 +       enum mhi_ee_type ee = MHI_EE_MAX;
22053         write_lock_irq(&mhi_cntrl->pm_lock);
22054         if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
22055 @@ -418,8 +453,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
22056         }
22058         state = mhi_get_mhi_state(mhi_cntrl);
22059 -       ee = mhi_cntrl->ee;
22060 -       mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
22061 +       ee = mhi_get_exec_env(mhi_cntrl);
22062         dev_dbg(dev, "local ee:%s device ee:%s dev_state:%s\n",
22063                 TO_MHI_EXEC_STR(mhi_cntrl->ee), TO_MHI_EXEC_STR(ee),
22064                 TO_MHI_STATE_STR(state));
22065 @@ -431,27 +465,30 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
22066         }
22067         write_unlock_irq(&mhi_cntrl->pm_lock);
22069 -        /* If device supports RDDM don't bother processing SYS error */
22070 -       if (mhi_cntrl->rddm_image) {
22071 -               /* host may be performing a device power down already */
22072 -               if (!mhi_is_active(mhi_cntrl))
22073 -                       goto exit_intvec;
22074 +       if (pm_state != MHI_PM_SYS_ERR_DETECT || ee == mhi_cntrl->ee)
22075 +               goto exit_intvec;
22077 -               if (mhi_cntrl->ee == MHI_EE_RDDM && mhi_cntrl->ee != ee) {
22078 +       switch (ee) {
22079 +       case MHI_EE_RDDM:
22080 +               /* proceed if power down is not already in progress */
22081 +               if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
22082                         mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
22083 +                       mhi_cntrl->ee = ee;
22084                         wake_up_all(&mhi_cntrl->state_event);
22085                 }
22086 -               goto exit_intvec;
22087 -       }
22089 -       if (pm_state == MHI_PM_SYS_ERR_DETECT) {
22090 +               break;
22091 +       case MHI_EE_PBL:
22092 +       case MHI_EE_EDL:
22093 +       case MHI_EE_PTHRU:
22094 +               mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
22095 +               mhi_cntrl->ee = ee;
22096                 wake_up_all(&mhi_cntrl->state_event);
22098 -               /* For fatal errors, we let controller decide next step */
22099 -               if (MHI_IN_PBL(ee))
22100 -                       mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
22101 -               else
22102 -                       mhi_pm_sys_err_handler(mhi_cntrl);
22103 +               mhi_pm_sys_err_handler(mhi_cntrl);
22104 +               break;
22105 +       default:
22106 +               wake_up_all(&mhi_cntrl->state_event);
22107 +               mhi_pm_sys_err_handler(mhi_cntrl);
22108 +               break;
22109         }
22111  exit_intvec:
22112 @@ -536,6 +573,11 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
22113                 struct mhi_buf_info *buf_info;
22114                 u16 xfer_len;
22116 +               if (!is_valid_ring_ptr(tre_ring, ptr)) {
22117 +                       dev_err(&mhi_cntrl->mhi_dev->dev,
22118 +                               "Event element points outside of the tre ring\n");
22119 +                       break;
22120 +               }
22121                 /* Get the TRB this event points to */
22122                 ev_tre = mhi_to_virtual(tre_ring, ptr);
22124 @@ -570,8 +612,11 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
22125                         /* notify client */
22126                         mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
22128 -                       if (mhi_chan->dir == DMA_TO_DEVICE)
22129 +                       if (mhi_chan->dir == DMA_TO_DEVICE) {
22130                                 atomic_dec(&mhi_cntrl->pending_pkts);
22131 +                               /* Release the reference got from mhi_queue() */
22132 +                               mhi_cntrl->runtime_put(mhi_cntrl);
22133 +                       }
22135                         /*
22136                          * Recycle the buffer if buffer is pre-allocated,
22137 @@ -695,6 +740,12 @@ static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
22138         struct mhi_chan *mhi_chan;
22139         u32 chan;
22141 +       if (!is_valid_ring_ptr(mhi_ring, ptr)) {
22142 +               dev_err(&mhi_cntrl->mhi_dev->dev,
22143 +                       "Event element points outside of the cmd ring\n");
22144 +               return;
22145 +       }
22147         cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
22149         chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
22150 @@ -719,6 +770,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
22151         struct device *dev = &mhi_cntrl->mhi_dev->dev;
22152         u32 chan;
22153         int count = 0;
22154 +       dma_addr_t ptr = er_ctxt->rp;
22156         /*
22157          * This is a quick check to avoid unnecessary event processing
22158 @@ -728,7 +780,13 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
22159         if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
22160                 return -EIO;
22162 -       dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
22163 +       if (!is_valid_ring_ptr(ev_ring, ptr)) {
22164 +               dev_err(&mhi_cntrl->mhi_dev->dev,
22165 +                       "Event ring rp points outside of the event ring\n");
22166 +               return -EIO;
22167 +       }
22169 +       dev_rp = mhi_to_virtual(ev_ring, ptr);
22170         local_rp = ev_ring->rp;
22172         while (dev_rp != local_rp) {
22173 @@ -834,6 +892,8 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
22174                          */
22175                         if (chan < mhi_cntrl->max_chan) {
22176                                 mhi_chan = &mhi_cntrl->mhi_chan[chan];
22177 +                               if (!mhi_chan->configured)
22178 +                                       break;
22179                                 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
22180                                 event_quota--;
22181                         }
22182 @@ -845,7 +905,15 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
22184                 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
22185                 local_rp = ev_ring->rp;
22186 -               dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
22188 +               ptr = er_ctxt->rp;
22189 +               if (!is_valid_ring_ptr(ev_ring, ptr)) {
22190 +                       dev_err(&mhi_cntrl->mhi_dev->dev,
22191 +                               "Event ring rp points outside of the event ring\n");
22192 +                       return -EIO;
22193 +               }
22195 +               dev_rp = mhi_to_virtual(ev_ring, ptr);
22196                 count++;
22197         }
22199 @@ -868,11 +936,18 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
22200         int count = 0;
22201         u32 chan;
22202         struct mhi_chan *mhi_chan;
22203 +       dma_addr_t ptr = er_ctxt->rp;
22205         if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
22206                 return -EIO;
22208 -       dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
22209 +       if (!is_valid_ring_ptr(ev_ring, ptr)) {
22210 +               dev_err(&mhi_cntrl->mhi_dev->dev,
22211 +                       "Event ring rp points outside of the event ring\n");
22212 +               return -EIO;
22213 +       }
22215 +       dev_rp = mhi_to_virtual(ev_ring, ptr);
22216         local_rp = ev_ring->rp;
22218         while (dev_rp != local_rp && event_quota > 0) {
22219 @@ -886,7 +961,8 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
22220                  * Only process the event ring elements whose channel
22221                  * ID is within the maximum supported range.
22222                  */
22223 -               if (chan < mhi_cntrl->max_chan) {
22224 +               if (chan < mhi_cntrl->max_chan &&
22225 +                   mhi_cntrl->mhi_chan[chan].configured) {
22226                         mhi_chan = &mhi_cntrl->mhi_chan[chan];
22228                         if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
22229 @@ -900,7 +976,15 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
22231                 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
22232                 local_rp = ev_ring->rp;
22233 -               dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
22235 +               ptr = er_ctxt->rp;
22236 +               if (!is_valid_ring_ptr(ev_ring, ptr)) {
22237 +                       dev_err(&mhi_cntrl->mhi_dev->dev,
22238 +                               "Event ring rp points outside of the event ring\n");
22239 +                       return -EIO;
22240 +               }
22242 +               dev_rp = mhi_to_virtual(ev_ring, ptr);
22243                 count++;
22244         }
22245         read_lock_bh(&mhi_cntrl->pm_lock);
22246 @@ -1004,9 +1088,11 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
22247         if (unlikely(ret))
22248                 goto exit_unlock;
22250 -       /* trigger M3 exit if necessary */
22251 -       if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
22252 -               mhi_trigger_resume(mhi_cntrl);
22253 +       /* Packet is queued, take a usage ref to exit M3 if necessary
22254 +        * for host->device buffer, balanced put is done on buffer completion
22255 +        * for device->host buffer, balanced put is after ringing the DB
22256 +        */
22257 +       mhi_cntrl->runtime_get(mhi_cntrl);
22259         /* Assert dev_wake (to exit/prevent M1/M2)*/
22260         mhi_cntrl->wake_toggle(mhi_cntrl);
22261 @@ -1014,12 +1100,11 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
22262         if (mhi_chan->dir == DMA_TO_DEVICE)
22263                 atomic_inc(&mhi_cntrl->pending_pkts);
22265 -       if (unlikely(!MHI_DB_ACCESS_VALID(mhi_cntrl))) {
22266 -               ret = -EIO;
22267 -               goto exit_unlock;
22268 -       }
22269 +       if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
22270 +               mhi_ring_chan_db(mhi_cntrl, mhi_chan);
22272 -       mhi_ring_chan_db(mhi_cntrl, mhi_chan);
22273 +       if (dir == DMA_FROM_DEVICE)
22274 +               mhi_cntrl->runtime_put(mhi_cntrl);
22276  exit_unlock:
22277         read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
22278 @@ -1365,6 +1450,7 @@ static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
22279         struct mhi_ring *ev_ring;
22280         struct device *dev = &mhi_cntrl->mhi_dev->dev;
22281         unsigned long flags;
22282 +       dma_addr_t ptr;
22284         dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
22286 @@ -1372,7 +1458,15 @@ static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
22288         /* mark all stale events related to channel as STALE event */
22289         spin_lock_irqsave(&mhi_event->lock, flags);
22290 -       dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
22292 +       ptr = er_ctxt->rp;
22293 +       if (!is_valid_ring_ptr(ev_ring, ptr)) {
22294 +               dev_err(&mhi_cntrl->mhi_dev->dev,
22295 +                       "Event ring rp points outside of the event ring\n");
22296 +               dev_rp = ev_ring->rp;
22297 +       } else {
22298 +               dev_rp = mhi_to_virtual(ev_ring, ptr);
22299 +       }
22301         local_rp = ev_ring->rp;
22302         while (dev_rp != local_rp) {
22303 @@ -1403,8 +1497,11 @@ static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
22304         while (tre_ring->rp != tre_ring->wp) {
22305                 struct mhi_buf_info *buf_info = buf_ring->rp;
22307 -               if (mhi_chan->dir == DMA_TO_DEVICE)
22308 +               if (mhi_chan->dir == DMA_TO_DEVICE) {
22309                         atomic_dec(&mhi_cntrl->pending_pkts);
22310 +                       /* Release the reference got from mhi_queue() */
22311 +                       mhi_cntrl->runtime_put(mhi_cntrl);
22312 +               }
22314                 if (!buf_info->pre_mapped)
22315                         mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
22316 diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
22317 index 681960c72d2a..87d3b73bcade 100644
22318 --- a/drivers/bus/mhi/core/pm.c
22319 +++ b/drivers/bus/mhi/core/pm.c
22320 @@ -377,24 +377,28 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
22322         struct mhi_event *mhi_event;
22323         struct device *dev = &mhi_cntrl->mhi_dev->dev;
22324 +       enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee;
22325         int i, ret;
22327         dev_dbg(dev, "Processing Mission Mode transition\n");
22329         write_lock_irq(&mhi_cntrl->pm_lock);
22330         if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
22331 -               mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
22332 +               ee = mhi_get_exec_env(mhi_cntrl);
22334 -       if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
22335 +       if (!MHI_IN_MISSION_MODE(ee)) {
22336                 mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
22337                 write_unlock_irq(&mhi_cntrl->pm_lock);
22338                 wake_up_all(&mhi_cntrl->state_event);
22339                 return -EIO;
22340         }
22341 +       mhi_cntrl->ee = ee;
22342         write_unlock_irq(&mhi_cntrl->pm_lock);
22344         wake_up_all(&mhi_cntrl->state_event);
22346 +       device_for_each_child(&mhi_cntrl->mhi_dev->dev, &current_ee,
22347 +                             mhi_destroy_device);
22348         mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
22350         /* Force MHI to be in M0 state before continuing */
22351 @@ -755,6 +759,8 @@ void mhi_pm_st_worker(struct work_struct *work)
22352                          * either SBL or AMSS states
22353                          */
22354                         mhi_create_devices(mhi_cntrl);
22355 +                       if (mhi_cntrl->fbc_download)
22356 +                               mhi_download_amss_image(mhi_cntrl);
22357                         break;
22358                 case DEV_ST_TRANSITION_MISSION_MODE:
22359                         mhi_pm_mission_mode_transition(mhi_cntrl);
22360 @@ -1092,7 +1098,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
22361                                                            &val) ||
22362                                         !val,
22363                                 msecs_to_jiffies(mhi_cntrl->timeout_ms));
22364 -               if (ret) {
22365 +               if (!ret) {
22366                         ret = -EIO;
22367                         dev_info(dev, "Failed to reset MHI due to syserr state\n");
22368                         goto error_bhi_offset;
22369 diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c
22370 index 20673a4b4a3c..ef549c695b55 100644
22371 --- a/drivers/bus/mhi/pci_generic.c
22372 +++ b/drivers/bus/mhi/pci_generic.c
22373 @@ -230,6 +230,21 @@ static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl,
22374         }
22377 +static void mhi_pci_wake_get_nop(struct mhi_controller *mhi_cntrl, bool force)
22379 +       /* no-op */
22382 +static void mhi_pci_wake_put_nop(struct mhi_controller *mhi_cntrl, bool override)
22384 +       /* no-op */
22387 +static void mhi_pci_wake_toggle_nop(struct mhi_controller *mhi_cntrl)
22389 +       /* no-op */
22392  static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl)
22394         struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
22395 @@ -433,6 +448,9 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
22396         mhi_cntrl->status_cb = mhi_pci_status_cb;
22397         mhi_cntrl->runtime_get = mhi_pci_runtime_get;
22398         mhi_cntrl->runtime_put = mhi_pci_runtime_put;
22399 +       mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
22400 +       mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
22401 +       mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
22403         err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
22404         if (err)
22405 @@ -498,6 +516,12 @@ static void mhi_pci_remove(struct pci_dev *pdev)
22406         mhi_unregister_controller(mhi_cntrl);
22409 +static void mhi_pci_shutdown(struct pci_dev *pdev)
22411 +       mhi_pci_remove(pdev);
22412 +       pci_set_power_state(pdev, PCI_D3hot);
22415  static void mhi_pci_reset_prepare(struct pci_dev *pdev)
22417         struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
22418 @@ -668,6 +692,7 @@ static struct pci_driver mhi_pci_driver = {
22419         .id_table       = mhi_pci_id_table,
22420         .probe          = mhi_pci_probe,
22421         .remove         = mhi_pci_remove,
22422 +       .shutdown       = mhi_pci_shutdown,
22423         .err_handler    = &mhi_pci_err_handler,
22424         .driver.pm      = &mhi_pci_pm_ops
22425  };
22426 diff --git a/drivers/bus/qcom-ebi2.c b/drivers/bus/qcom-ebi2.c
22427 index 03ddcf426887..0b8f53a688b8 100644
22428 --- a/drivers/bus/qcom-ebi2.c
22429 +++ b/drivers/bus/qcom-ebi2.c
22430 @@ -353,8 +353,10 @@ static int qcom_ebi2_probe(struct platform_device *pdev)
22432                 /* Figure out the chipselect */
22433                 ret = of_property_read_u32(child, "reg", &csindex);
22434 -               if (ret)
22435 +               if (ret) {
22436 +                       of_node_put(child);
22437                         return ret;
22438 +               }
22440                 if (csindex > 5) {
22441                         dev_err(dev,
22442 diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
22443 index 3d74f237f005..68145e326eb9 100644
22444 --- a/drivers/bus/ti-sysc.c
22445 +++ b/drivers/bus/ti-sysc.c
22446 @@ -635,6 +635,51 @@ static int sysc_parse_and_check_child_range(struct sysc *ddata)
22447         return 0;
22450 +/* Interconnect instances to probe before l4_per instances */
22451 +static struct resource early_bus_ranges[] = {
22452 +       /* am3/4 l4_wkup */
22453 +       { .start = 0x44c00000, .end = 0x44c00000 + 0x300000, },
22454 +       /* omap4/5 and dra7 l4_cfg */
22455 +       { .start = 0x4a000000, .end = 0x4a000000 + 0x300000, },
22456 +       /* omap4 l4_wkup */
22457 +       { .start = 0x4a300000, .end = 0x4a300000 + 0x30000,  },
22458 +       /* omap5 and dra7 l4_wkup without dra7 dcan segment */
22459 +       { .start = 0x4ae00000, .end = 0x4ae00000 + 0x30000,  },
22462 +static atomic_t sysc_defer = ATOMIC_INIT(10);
22464 +/**
22465 + * sysc_defer_non_critical - defer non_critical interconnect probing
22466 + * @ddata: device driver data
22467 + *
22468 + * We want to probe l4_cfg and l4_wkup interconnect instances before any
22469 + * l4_per instances as l4_per instances depend on resources on l4_cfg and
22470 + * l4_wkup interconnects.
22471 + */
22472 +static int sysc_defer_non_critical(struct sysc *ddata)
22474 +       struct resource *res;
22475 +       int i;
22477 +       if (!atomic_read(&sysc_defer))
22478 +               return 0;
22480 +       for (i = 0; i < ARRAY_SIZE(early_bus_ranges); i++) {
22481 +               res = &early_bus_ranges[i];
22482 +               if (ddata->module_pa >= res->start &&
22483 +                   ddata->module_pa <= res->end) {
22484 +                       atomic_set(&sysc_defer, 0);
22486 +                       return 0;
22487 +               }
22488 +       }
22490 +       atomic_dec_if_positive(&sysc_defer);
22492 +       return -EPROBE_DEFER;
22495  static struct device_node *stdout_path;
22497  static void sysc_init_stdout_path(struct sysc *ddata)
22498 @@ -856,15 +901,19 @@ static int sysc_map_and_check_registers(struct sysc *ddata)
22499         struct device_node *np = ddata->dev->of_node;
22500         int error;
22502 -       if (!of_get_property(np, "reg", NULL))
22503 -               return 0;
22505         error = sysc_parse_and_check_child_range(ddata);
22506         if (error)
22507                 return error;
22509 +       error = sysc_defer_non_critical(ddata);
22510 +       if (error)
22511 +               return error;
22513         sysc_check_children(ddata);
22515 +       if (!of_get_property(np, "reg", NULL))
22516 +               return 0;
22518         error = sysc_parse_registers(ddata);
22519         if (error)
22520                 return error;
22521 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
22522 index c44ad18464f1..ca87178200e0 100644
22523 --- a/drivers/char/ipmi/ipmi_msghandler.c
22524 +++ b/drivers/char/ipmi/ipmi_msghandler.c
22525 @@ -3563,7 +3563,7 @@ static void cleanup_smi_msgs(struct ipmi_smi *intf)
22526         /* Current message first, to preserve order */
22527         while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
22528                 /* Wait for the message to clear out. */
22529 -               schedule_timeout(1);
22530 +               schedule_min_hrtimeout();
22531         }
22533         /* No need for locks, the interface is down. */
22534 diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
22535 index 0416b9c9d410..9ce5fae0f1cf 100644
22536 --- a/drivers/char/ipmi/ipmi_ssif.c
22537 +++ b/drivers/char/ipmi/ipmi_ssif.c
22538 @@ -1288,7 +1288,7 @@ static void shutdown_ssif(void *send_info)
22540         /* make sure the driver is not looking for flags any more. */
22541         while (ssif_info->ssif_state != SSIF_NORMAL)
22542 -               schedule_timeout(1);
22543 +               schedule_min_hrtimeout();
22545         ssif_info->stopping = true;
22546         del_timer_sync(&ssif_info->watch_timer);
22547 diff --git a/drivers/char/random.c b/drivers/char/random.c
22548 index 0fe9e200e4c8..5d6acfecd919 100644
22549 --- a/drivers/char/random.c
22550 +++ b/drivers/char/random.c
22551 @@ -819,7 +819,7 @@ static bool __init crng_init_try_arch_early(struct crng_state *crng)
22553  static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
22555 -       memcpy(&crng->state[0], "expand 32-byte k", 16);
22556 +       chacha_init_consts(crng->state);
22557         _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
22558         crng_init_try_arch(crng);
22559         crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
22560 @@ -827,7 +827,7 @@ static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
22562  static void __init crng_initialize_primary(struct crng_state *crng)
22564 -       memcpy(&crng->state[0], "expand 32-byte k", 16);
22565 +       chacha_init_consts(crng->state);
22566         _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0);
22567         if (crng_init_try_arch_early(crng) && trust_cpu) {
22568                 invalidate_batched_entropy();
22569 diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
22570 index 3633ed70f48f..1b18ce5ebab1 100644
22571 --- a/drivers/char/tpm/eventlog/acpi.c
22572 +++ b/drivers/char/tpm/eventlog/acpi.c
22573 @@ -41,6 +41,27 @@ struct acpi_tcpa {
22574         };
22575  };
22577 +/* Check that the given log is indeed a TPM2 log. */
22578 +static bool tpm_is_tpm2_log(void *bios_event_log, u64 len)
22580 +       struct tcg_efi_specid_event_head *efispecid;
22581 +       struct tcg_pcr_event *event_header;
22582 +       int n;
22584 +       if (len < sizeof(*event_header))
22585 +               return false;
22586 +       len -= sizeof(*event_header);
22587 +       event_header = bios_event_log;
22589 +       if (len < sizeof(*efispecid))
22590 +               return false;
22591 +       efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
22593 +       n = memcmp(efispecid->signature, TCG_SPECID_SIG,
22594 +                  sizeof(TCG_SPECID_SIG));
22595 +       return n == 0;
22598  /* read binary bios log */
22599  int tpm_read_log_acpi(struct tpm_chip *chip)
22601 @@ -52,6 +73,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
22602         struct acpi_table_tpm2 *tbl;
22603         struct acpi_tpm2_phy *tpm2_phy;
22604         int format;
22605 +       int ret;
22607         log = &chip->log;
22609 @@ -112,6 +134,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
22611         log->bios_event_log_end = log->bios_event_log + len;
22613 +       ret = -EIO;
22614         virt = acpi_os_map_iomem(start, len);
22615         if (!virt)
22616                 goto err;
22617 @@ -119,11 +142,19 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
22618         memcpy_fromio(log->bios_event_log, virt, len);
22620         acpi_os_unmap_iomem(virt, len);
22622 +       if (chip->flags & TPM_CHIP_FLAG_TPM2 &&
22623 +           !tpm_is_tpm2_log(log->bios_event_log, len)) {
22624 +               /* try EFI log next */
22625 +               ret = -ENODEV;
22626 +               goto err;
22627 +       }
22629         return format;
22631  err:
22632         kfree(log->bios_event_log);
22633         log->bios_event_log = NULL;
22634 -       return -EIO;
22635 +       return ret;
22638 diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c
22639 index 7460f230bae4..8512ec76d526 100644
22640 --- a/drivers/char/tpm/eventlog/common.c
22641 +++ b/drivers/char/tpm/eventlog/common.c
22642 @@ -107,6 +107,9 @@ void tpm_bios_log_setup(struct tpm_chip *chip)
22643         int log_version;
22644         int rc = 0;
22646 +       if (chip->flags & TPM_CHIP_FLAG_VIRTUAL)
22647 +               return;
22649         rc = tpm_read_log(chip);
22650         if (rc < 0)
22651                 return;
22652 diff --git a/drivers/char/tpm/eventlog/efi.c b/drivers/char/tpm/eventlog/efi.c
22653 index 35229e5143ca..e6cb9d525e30 100644
22654 --- a/drivers/char/tpm/eventlog/efi.c
22655 +++ b/drivers/char/tpm/eventlog/efi.c
22656 @@ -17,6 +17,7 @@ int tpm_read_log_efi(struct tpm_chip *chip)
22659         struct efi_tcg2_final_events_table *final_tbl = NULL;
22660 +       int final_events_log_size = efi_tpm_final_log_size;
22661         struct linux_efi_tpm_eventlog *log_tbl;
22662         struct tpm_bios_log *log;
22663         u32 log_size;
22664 @@ -66,12 +67,12 @@ int tpm_read_log_efi(struct tpm_chip *chip)
22665         ret = tpm_log_version;
22667         if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR ||
22668 -           efi_tpm_final_log_size == 0 ||
22669 +           final_events_log_size == 0 ||
22670             tpm_log_version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
22671                 goto out;
22673         final_tbl = memremap(efi.tpm_final_log,
22674 -                            sizeof(*final_tbl) + efi_tpm_final_log_size,
22675 +                            sizeof(*final_tbl) + final_events_log_size,
22676                              MEMREMAP_WB);
22677         if (!final_tbl) {
22678                 pr_err("Could not map UEFI TPM final log\n");
22679 @@ -80,10 +81,18 @@ int tpm_read_log_efi(struct tpm_chip *chip)
22680                 goto out;
22681         }
22683 -       efi_tpm_final_log_size -= log_tbl->final_events_preboot_size;
22684 +       /*
22685 +        * The 'final events log' size excludes the 'final events preboot log'
22686 +        * at its beginning.
22687 +        */
22688 +       final_events_log_size -= log_tbl->final_events_preboot_size;
22690 +       /*
22691 +        * Allocate memory for the 'combined log' where we will append the
22692 +        * 'final events log' to.
22693 +        */
22694         tmp = krealloc(log->bios_event_log,
22695 -                      log_size + efi_tpm_final_log_size,
22696 +                      log_size + final_events_log_size,
22697                        GFP_KERNEL);
22698         if (!tmp) {
22699                 kfree(log->bios_event_log);
22700 @@ -94,15 +103,19 @@ int tpm_read_log_efi(struct tpm_chip *chip)
22701         log->bios_event_log = tmp;
22703         /*
22704 -        * Copy any of the final events log that didn't also end up in the
22705 -        * main log. Events can be logged in both if events are generated
22706 +        * Append any of the 'final events log' that didn't also end up in the
22707 +        * 'main log'. Events can be logged in both if events are generated
22708          * between GetEventLog() and ExitBootServices().
22709          */
22710         memcpy((void *)log->bios_event_log + log_size,
22711                final_tbl->events + log_tbl->final_events_preboot_size,
22712 -              efi_tpm_final_log_size);
22713 +              final_events_log_size);
22714 +       /*
22715 +        * The size of the 'combined log' is the size of the 'main log' plus
22716 +        * the size of the 'final events log'.
22717 +        */
22718         log->bios_event_log_end = log->bios_event_log +
22719 -               log_size + efi_tpm_final_log_size;
22720 +               log_size + final_events_log_size;
22722  out:
22723         memunmap(final_tbl);
22724 diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
22725 index eff1f12d981a..c84d23951219 100644
22726 --- a/drivers/char/tpm/tpm2-cmd.c
22727 +++ b/drivers/char/tpm/tpm2-cmd.c
22728 @@ -656,6 +656,7 @@ int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
22730         if (nr_commands !=
22731             be32_to_cpup((__be32 *)&buf.data[TPM_HEADER_SIZE + 5])) {
22732 +               rc = -EFAULT;
22733                 tpm_buf_destroy(&buf);
22734                 goto out;
22735         }
22736 diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
22737 index a2e0395cbe61..55b9d3965ae1 100644
22738 --- a/drivers/char/tpm/tpm_tis_core.c
22739 +++ b/drivers/char/tpm/tpm_tis_core.c
22740 @@ -709,16 +709,14 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
22741         cap_t cap;
22742         int ret;
22744 -       /* TPM 2.0 */
22745 -       if (chip->flags & TPM_CHIP_FLAG_TPM2)
22746 -               return tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
22748 -       /* TPM 1.2 */
22749         ret = request_locality(chip, 0);
22750         if (ret < 0)
22751                 return ret;
22753 -       ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
22754 +       if (chip->flags & TPM_CHIP_FLAG_TPM2)
22755 +               ret = tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
22756 +       else
22757 +               ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
22759         release_locality(chip, 0);
22761 @@ -1127,12 +1125,20 @@ int tpm_tis_resume(struct device *dev)
22762         if (ret)
22763                 return ret;
22765 -       /* TPM 1.2 requires self-test on resume. This function actually returns
22766 +       /*
22767 +        * TPM 1.2 requires self-test on resume. This function actually returns
22768          * an error code but for unknown reason it isn't handled.
22769          */
22770 -       if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
22771 +       if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
22772 +               ret = request_locality(chip, 0);
22773 +               if (ret < 0)
22774 +                       return ret;
22776                 tpm1_do_selftest(chip);
22778 +               release_locality(chip, 0);
22779 +       }
22781         return 0;
22783  EXPORT_SYMBOL_GPL(tpm_tis_resume);
22784 diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c
22785 index ec9a65e7887d..f19c227d20f4 100644
22786 --- a/drivers/char/tpm/tpm_tis_i2c_cr50.c
22787 +++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c
22788 @@ -483,6 +483,7 @@ static int tpm_cr50_i2c_tis_recv(struct tpm_chip *chip, u8 *buf, size_t buf_len)
22789         expected = be32_to_cpup((__be32 *)(buf + 2));
22790         if (expected > buf_len) {
22791                 dev_err(&chip->dev, "Buffer too small to receive i2c data\n");
22792 +               rc = -E2BIG;
22793                 goto out_err;
22794         }
22796 diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
22797 index 6a0059e508e3..93f5d11c830b 100644
22798 --- a/drivers/char/ttyprintk.c
22799 +++ b/drivers/char/ttyprintk.c
22800 @@ -158,12 +158,23 @@ static int tpk_ioctl(struct tty_struct *tty,
22801         return 0;
22805 + * TTY operations hangup function.
22806 + */
22807 +static void tpk_hangup(struct tty_struct *tty)
22809 +       struct ttyprintk_port *tpkp = tty->driver_data;
22811 +       tty_port_hangup(&tpkp->port);
22814  static const struct tty_operations ttyprintk_ops = {
22815         .open = tpk_open,
22816         .close = tpk_close,
22817         .write = tpk_write,
22818         .write_room = tpk_write_room,
22819         .ioctl = tpk_ioctl,
22820 +       .hangup = tpk_hangup,
22821  };
22823  static const struct tty_port_operations null_ops = { };
22824 diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
22825 index a55b37fc2c8b..bc3be5f3eae1 100644
22826 --- a/drivers/clk/clk-ast2600.c
22827 +++ b/drivers/clk/clk-ast2600.c
22828 @@ -61,10 +61,10 @@ static void __iomem *scu_g6_base;
22829  static const struct aspeed_gate_data aspeed_g6_gates[] = {
22830         /*                                  clk rst  name               parent   flags */
22831         [ASPEED_CLK_GATE_MCLK]          = {  0, -1, "mclk-gate",        "mpll",  CLK_IS_CRITICAL }, /* SDRAM */
22832 -       [ASPEED_CLK_GATE_ECLK]          = {  1, -1, "eclk-gate",        "eclk",  0 },   /* Video Engine */
22833 +       [ASPEED_CLK_GATE_ECLK]          = {  1,  6, "eclk-gate",        "eclk",  0 },   /* Video Engine */
22834         [ASPEED_CLK_GATE_GCLK]          = {  2,  7, "gclk-gate",        NULL,    0 },   /* 2D engine */
22835         /* vclk parent - dclk/d1clk/hclk/mclk */
22836 -       [ASPEED_CLK_GATE_VCLK]          = {  3,  6, "vclk-gate",        NULL,    0 },   /* Video Capture */
22837 +       [ASPEED_CLK_GATE_VCLK]          = {  3, -1, "vclk-gate",        NULL,    0 },   /* Video Capture */
22838         [ASPEED_CLK_GATE_BCLK]          = {  4,  8, "bclk-gate",        "bclk",  0 }, /* PCIe/PCI */
22839         /* From dpll */
22840         [ASPEED_CLK_GATE_DCLK]          = {  5, -1, "dclk-gate",        NULL,    CLK_IS_CRITICAL }, /* DAC */
22841 diff --git a/drivers/clk/imx/clk-imx25.c b/drivers/clk/imx/clk-imx25.c
22842 index a66cabfbf94f..66192fe0a898 100644
22843 --- a/drivers/clk/imx/clk-imx25.c
22844 +++ b/drivers/clk/imx/clk-imx25.c
22845 @@ -73,16 +73,6 @@ enum mx25_clks {
22847  static struct clk *clk[clk_max];
22849 -static struct clk ** const uart_clks[] __initconst = {
22850 -       &clk[uart_ipg_per],
22851 -       &clk[uart1_ipg],
22852 -       &clk[uart2_ipg],
22853 -       &clk[uart3_ipg],
22854 -       &clk[uart4_ipg],
22855 -       &clk[uart5_ipg],
22856 -       NULL
22859  static int __init __mx25_clocks_init(void __iomem *ccm_base)
22861         BUG_ON(!ccm_base);
22862 @@ -228,7 +218,7 @@ static int __init __mx25_clocks_init(void __iomem *ccm_base)
22863          */
22864         clk_set_parent(clk[cko_sel], clk[ipg]);
22866 -       imx_register_uart_clocks(uart_clks);
22867 +       imx_register_uart_clocks(6);
22869         return 0;
22871 diff --git a/drivers/clk/imx/clk-imx27.c b/drivers/clk/imx/clk-imx27.c
22872 index 5585ded8b8c6..56a5fc402b10 100644
22873 --- a/drivers/clk/imx/clk-imx27.c
22874 +++ b/drivers/clk/imx/clk-imx27.c
22875 @@ -49,17 +49,6 @@ static const char *ssi_sel_clks[] = { "spll_gate", "mpll", };
22876  static struct clk *clk[IMX27_CLK_MAX];
22877  static struct clk_onecell_data clk_data;
22879 -static struct clk ** const uart_clks[] __initconst = {
22880 -       &clk[IMX27_CLK_PER1_GATE],
22881 -       &clk[IMX27_CLK_UART1_IPG_GATE],
22882 -       &clk[IMX27_CLK_UART2_IPG_GATE],
22883 -       &clk[IMX27_CLK_UART3_IPG_GATE],
22884 -       &clk[IMX27_CLK_UART4_IPG_GATE],
22885 -       &clk[IMX27_CLK_UART5_IPG_GATE],
22886 -       &clk[IMX27_CLK_UART6_IPG_GATE],
22887 -       NULL
22890  static void __init _mx27_clocks_init(unsigned long fref)
22892         BUG_ON(!ccm);
22893 @@ -176,7 +165,7 @@ static void __init _mx27_clocks_init(unsigned long fref)
22895         clk_prepare_enable(clk[IMX27_CLK_EMI_AHB_GATE]);
22897 -       imx_register_uart_clocks(uart_clks);
22898 +       imx_register_uart_clocks(7);
22900         imx_print_silicon_rev("i.MX27", mx27_revision());
22902 diff --git a/drivers/clk/imx/clk-imx35.c b/drivers/clk/imx/clk-imx35.c
22903 index c1df03665c09..0fe5ac210156 100644
22904 --- a/drivers/clk/imx/clk-imx35.c
22905 +++ b/drivers/clk/imx/clk-imx35.c
22906 @@ -82,14 +82,6 @@ enum mx35_clks {
22908  static struct clk *clk[clk_max];
22910 -static struct clk ** const uart_clks[] __initconst = {
22911 -       &clk[ipg],
22912 -       &clk[uart1_gate],
22913 -       &clk[uart2_gate],
22914 -       &clk[uart3_gate],
22915 -       NULL
22918  static void __init _mx35_clocks_init(void)
22920         void __iomem *base;
22921 @@ -243,7 +235,7 @@ static void __init _mx35_clocks_init(void)
22922          */
22923         clk_prepare_enable(clk[scc_gate]);
22925 -       imx_register_uart_clocks(uart_clks);
22926 +       imx_register_uart_clocks(4);
22928         imx_print_silicon_rev("i.MX35", mx35_revision());
22930 diff --git a/drivers/clk/imx/clk-imx5.c b/drivers/clk/imx/clk-imx5.c
22931 index 01e079b81026..e4493846454d 100644
22932 --- a/drivers/clk/imx/clk-imx5.c
22933 +++ b/drivers/clk/imx/clk-imx5.c
22934 @@ -128,30 +128,6 @@ static const char *ieee1588_sels[] = { "pll3_sw", "pll4_sw", "dummy" /* usbphy2_
22935  static struct clk *clk[IMX5_CLK_END];
22936  static struct clk_onecell_data clk_data;
22938 -static struct clk ** const uart_clks_mx51[] __initconst = {
22939 -       &clk[IMX5_CLK_UART1_IPG_GATE],
22940 -       &clk[IMX5_CLK_UART1_PER_GATE],
22941 -       &clk[IMX5_CLK_UART2_IPG_GATE],
22942 -       &clk[IMX5_CLK_UART2_PER_GATE],
22943 -       &clk[IMX5_CLK_UART3_IPG_GATE],
22944 -       &clk[IMX5_CLK_UART3_PER_GATE],
22945 -       NULL
22948 -static struct clk ** const uart_clks_mx50_mx53[] __initconst = {
22949 -       &clk[IMX5_CLK_UART1_IPG_GATE],
22950 -       &clk[IMX5_CLK_UART1_PER_GATE],
22951 -       &clk[IMX5_CLK_UART2_IPG_GATE],
22952 -       &clk[IMX5_CLK_UART2_PER_GATE],
22953 -       &clk[IMX5_CLK_UART3_IPG_GATE],
22954 -       &clk[IMX5_CLK_UART3_PER_GATE],
22955 -       &clk[IMX5_CLK_UART4_IPG_GATE],
22956 -       &clk[IMX5_CLK_UART4_PER_GATE],
22957 -       &clk[IMX5_CLK_UART5_IPG_GATE],
22958 -       &clk[IMX5_CLK_UART5_PER_GATE],
22959 -       NULL
22962  static void __init mx5_clocks_common_init(void __iomem *ccm_base)
22964         clk[IMX5_CLK_DUMMY]             = imx_clk_fixed("dummy", 0);
22965 @@ -382,7 +358,7 @@ static void __init mx50_clocks_init(struct device_node *np)
22966         r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000);
22967         clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r);
22969 -       imx_register_uart_clocks(uart_clks_mx50_mx53);
22970 +       imx_register_uart_clocks(5);
22972  CLK_OF_DECLARE(imx50_ccm, "fsl,imx50-ccm", mx50_clocks_init);
22974 @@ -488,7 +464,7 @@ static void __init mx51_clocks_init(struct device_node *np)
22975         val |= 1 << 23;
22976         writel(val, MXC_CCM_CLPCR);
22978 -       imx_register_uart_clocks(uart_clks_mx51);
22979 +       imx_register_uart_clocks(3);
22981  CLK_OF_DECLARE(imx51_ccm, "fsl,imx51-ccm", mx51_clocks_init);
22983 @@ -633,6 +609,6 @@ static void __init mx53_clocks_init(struct device_node *np)
22984         r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000);
22985         clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r);
22987 -       imx_register_uart_clocks(uart_clks_mx50_mx53);
22988 +       imx_register_uart_clocks(5);
22990  CLK_OF_DECLARE(imx53_ccm, "fsl,imx53-ccm", mx53_clocks_init);
22991 diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
22992 index 521d6136d22c..496900de0b0b 100644
22993 --- a/drivers/clk/imx/clk-imx6q.c
22994 +++ b/drivers/clk/imx/clk-imx6q.c
22995 @@ -140,13 +140,6 @@ static inline int clk_on_imx6dl(void)
22996         return of_machine_is_compatible("fsl,imx6dl");
22999 -static const int uart_clk_ids[] __initconst = {
23000 -       IMX6QDL_CLK_UART_IPG,
23001 -       IMX6QDL_CLK_UART_SERIAL,
23004 -static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
23006  static int ldb_di_sel_by_clock_id(int clock_id)
23008         switch (clock_id) {
23009 @@ -440,7 +433,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
23010         struct device_node *np;
23011         void __iomem *anatop_base, *base;
23012         int ret;
23013 -       int i;
23015         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
23016                                           IMX6QDL_CLK_END), GFP_KERNEL);
23017 @@ -982,12 +974,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
23018                                hws[IMX6QDL_CLK_PLL3_USB_OTG]->clk);
23019         }
23021 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
23022 -               int index = uart_clk_ids[i];
23024 -               uart_clks[i] = &hws[index]->clk;
23025 -       }
23027 -       imx_register_uart_clocks(uart_clks);
23028 +       imx_register_uart_clocks(1);
23030  CLK_OF_DECLARE(imx6q, "fsl,imx6q-ccm", imx6q_clocks_init);
23031 diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c
23032 index 29eab05c9068..277365970320 100644
23033 --- a/drivers/clk/imx/clk-imx6sl.c
23034 +++ b/drivers/clk/imx/clk-imx6sl.c
23035 @@ -179,19 +179,11 @@ void imx6sl_set_wait_clk(bool enter)
23036                 imx6sl_enable_pll_arm(false);
23039 -static const int uart_clk_ids[] __initconst = {
23040 -       IMX6SL_CLK_UART,
23041 -       IMX6SL_CLK_UART_SERIAL,
23044 -static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
23046  static void __init imx6sl_clocks_init(struct device_node *ccm_node)
23048         struct device_node *np;
23049         void __iomem *base;
23050         int ret;
23051 -       int i;
23053         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
23054                                           IMX6SL_CLK_END), GFP_KERNEL);
23055 @@ -448,12 +440,6 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
23056         clk_set_parent(hws[IMX6SL_CLK_LCDIF_AXI_SEL]->clk,
23057                        hws[IMX6SL_CLK_PLL2_PFD2]->clk);
23059 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
23060 -               int index = uart_clk_ids[i];
23062 -               uart_clks[i] = &hws[index]->clk;
23063 -       }
23065 -       imx_register_uart_clocks(uart_clks);
23066 +       imx_register_uart_clocks(2);
23068  CLK_OF_DECLARE(imx6sl, "fsl,imx6sl-ccm", imx6sl_clocks_init);
23069 diff --git a/drivers/clk/imx/clk-imx6sll.c b/drivers/clk/imx/clk-imx6sll.c
23070 index 8e8288bda4d0..31d777f30039 100644
23071 --- a/drivers/clk/imx/clk-imx6sll.c
23072 +++ b/drivers/clk/imx/clk-imx6sll.c
23073 @@ -76,26 +76,10 @@ static u32 share_count_ssi1;
23074  static u32 share_count_ssi2;
23075  static u32 share_count_ssi3;
23077 -static const int uart_clk_ids[] __initconst = {
23078 -       IMX6SLL_CLK_UART1_IPG,
23079 -       IMX6SLL_CLK_UART1_SERIAL,
23080 -       IMX6SLL_CLK_UART2_IPG,
23081 -       IMX6SLL_CLK_UART2_SERIAL,
23082 -       IMX6SLL_CLK_UART3_IPG,
23083 -       IMX6SLL_CLK_UART3_SERIAL,
23084 -       IMX6SLL_CLK_UART4_IPG,
23085 -       IMX6SLL_CLK_UART4_SERIAL,
23086 -       IMX6SLL_CLK_UART5_IPG,
23087 -       IMX6SLL_CLK_UART5_SERIAL,
23090 -static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
23092  static void __init imx6sll_clocks_init(struct device_node *ccm_node)
23094         struct device_node *np;
23095         void __iomem *base;
23096 -       int i;
23098         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
23099                                           IMX6SLL_CLK_END), GFP_KERNEL);
23100 @@ -356,13 +340,7 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node)
23102         of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
23104 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
23105 -               int index = uart_clk_ids[i];
23107 -               uart_clks[i] = &hws[index]->clk;
23108 -       }
23110 -       imx_register_uart_clocks(uart_clks);
23111 +       imx_register_uart_clocks(5);
23113         /* Lower the AHB clock rate before changing the clock source. */
23114         clk_set_rate(hws[IMX6SLL_CLK_AHB]->clk, 99000000);
23115 diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
23116 index 20dcce526d07..fc1bd23d4583 100644
23117 --- a/drivers/clk/imx/clk-imx6sx.c
23118 +++ b/drivers/clk/imx/clk-imx6sx.c
23119 @@ -117,18 +117,10 @@ static u32 share_count_ssi3;
23120  static u32 share_count_sai1;
23121  static u32 share_count_sai2;
23123 -static const int uart_clk_ids[] __initconst = {
23124 -       IMX6SX_CLK_UART_IPG,
23125 -       IMX6SX_CLK_UART_SERIAL,
23128 -static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
23130  static void __init imx6sx_clocks_init(struct device_node *ccm_node)
23132         struct device_node *np;
23133         void __iomem *base;
23134 -       int i;
23136         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
23137                                           IMX6SX_CLK_CLK_END), GFP_KERNEL);
23138 @@ -556,12 +548,6 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
23139         clk_set_parent(hws[IMX6SX_CLK_QSPI1_SEL]->clk, hws[IMX6SX_CLK_PLL2_BUS]->clk);
23140         clk_set_parent(hws[IMX6SX_CLK_QSPI2_SEL]->clk, hws[IMX6SX_CLK_PLL2_BUS]->clk);
23142 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
23143 -               int index = uart_clk_ids[i];
23145 -               uart_clks[i] = &hws[index]->clk;
23146 -       }
23148 -       imx_register_uart_clocks(uart_clks);
23149 +       imx_register_uart_clocks(2);
23151  CLK_OF_DECLARE(imx6sx, "fsl,imx6sx-ccm", imx6sx_clocks_init);
23152 diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
23153 index 22d24a6a05e7..c4e0f1c07192 100644
23154 --- a/drivers/clk/imx/clk-imx7d.c
23155 +++ b/drivers/clk/imx/clk-imx7d.c
23156 @@ -377,23 +377,10 @@ static const char *pll_video_bypass_sel[] = { "pll_video_main", "pll_video_main_
23157  static struct clk_hw **hws;
23158  static struct clk_hw_onecell_data *clk_hw_data;
23160 -static const int uart_clk_ids[] __initconst = {
23161 -       IMX7D_UART1_ROOT_CLK,
23162 -       IMX7D_UART2_ROOT_CLK,
23163 -       IMX7D_UART3_ROOT_CLK,
23164 -       IMX7D_UART4_ROOT_CLK,
23165 -       IMX7D_UART5_ROOT_CLK,
23166 -       IMX7D_UART6_ROOT_CLK,
23167 -       IMX7D_UART7_ROOT_CLK,
23170 -static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
23172  static void __init imx7d_clocks_init(struct device_node *ccm_node)
23174         struct device_node *np;
23175         void __iomem *base;
23176 -       int i;
23178         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
23179                                           IMX7D_CLK_END), GFP_KERNEL);
23180 @@ -897,14 +884,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
23181         hws[IMX7D_USB1_MAIN_480M_CLK] = imx_clk_hw_fixed_factor("pll_usb1_main_clk", "osc", 20, 1);
23182         hws[IMX7D_USB_MAIN_480M_CLK] = imx_clk_hw_fixed_factor("pll_usb_main_clk", "osc", 20, 1);
23184 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
23185 -               int index = uart_clk_ids[i];
23187 -               uart_clks[i] = &hws[index]->clk;
23188 -       }
23191 -       imx_register_uart_clocks(uart_clks);
23192 +       imx_register_uart_clocks(7);
23195  CLK_OF_DECLARE(imx7d, "fsl,imx7d-ccm", imx7d_clocks_init);
23196 diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c
23197 index 634c0b6636b0..779e09105da7 100644
23198 --- a/drivers/clk/imx/clk-imx7ulp.c
23199 +++ b/drivers/clk/imx/clk-imx7ulp.c
23200 @@ -43,19 +43,6 @@ static const struct clk_div_table ulp_div_table[] = {
23201         { /* sentinel */ },
23202  };
23204 -static const int pcc2_uart_clk_ids[] __initconst = {
23205 -       IMX7ULP_CLK_LPUART4,
23206 -       IMX7ULP_CLK_LPUART5,
23209 -static const int pcc3_uart_clk_ids[] __initconst = {
23210 -       IMX7ULP_CLK_LPUART6,
23211 -       IMX7ULP_CLK_LPUART7,
23214 -static struct clk **pcc2_uart_clks[ARRAY_SIZE(pcc2_uart_clk_ids) + 1] __initdata;
23215 -static struct clk **pcc3_uart_clks[ARRAY_SIZE(pcc3_uart_clk_ids) + 1] __initdata;
23217  static void __init imx7ulp_clk_scg1_init(struct device_node *np)
23219         struct clk_hw_onecell_data *clk_data;
23220 @@ -150,7 +137,6 @@ static void __init imx7ulp_clk_pcc2_init(struct device_node *np)
23221         struct clk_hw_onecell_data *clk_data;
23222         struct clk_hw **hws;
23223         void __iomem *base;
23224 -       int i;
23226         clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_PCC2_END),
23227                            GFP_KERNEL);
23228 @@ -190,13 +176,7 @@ static void __init imx7ulp_clk_pcc2_init(struct device_node *np)
23230         of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
23232 -       for (i = 0; i < ARRAY_SIZE(pcc2_uart_clk_ids); i++) {
23233 -               int index = pcc2_uart_clk_ids[i];
23235 -               pcc2_uart_clks[i] = &hws[index]->clk;
23236 -       }
23238 -       imx_register_uart_clocks(pcc2_uart_clks);
23239 +       imx_register_uart_clocks(2);
23241  CLK_OF_DECLARE(imx7ulp_clk_pcc2, "fsl,imx7ulp-pcc2", imx7ulp_clk_pcc2_init);
23243 @@ -205,7 +185,6 @@ static void __init imx7ulp_clk_pcc3_init(struct device_node *np)
23244         struct clk_hw_onecell_data *clk_data;
23245         struct clk_hw **hws;
23246         void __iomem *base;
23247 -       int i;
23249         clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_PCC3_END),
23250                            GFP_KERNEL);
23251 @@ -244,13 +223,7 @@ static void __init imx7ulp_clk_pcc3_init(struct device_node *np)
23253         of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
23255 -       for (i = 0; i < ARRAY_SIZE(pcc3_uart_clk_ids); i++) {
23256 -               int index = pcc3_uart_clk_ids[i];
23258 -               pcc3_uart_clks[i] = &hws[index]->clk;
23259 -       }
23261 -       imx_register_uart_clocks(pcc3_uart_clks);
23262 +       imx_register_uart_clocks(7);
23264  CLK_OF_DECLARE(imx7ulp_clk_pcc3, "fsl,imx7ulp-pcc3", imx7ulp_clk_pcc3_init);
23266 diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
23267 index 6a01eec36dd0..f1919fafb124 100644
23268 --- a/drivers/clk/imx/clk-imx8mm.c
23269 +++ b/drivers/clk/imx/clk-imx8mm.c
23270 @@ -296,20 +296,12 @@ static const char * const clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "
23271  static struct clk_hw_onecell_data *clk_hw_data;
23272  static struct clk_hw **hws;
23274 -static const int uart_clk_ids[] = {
23275 -       IMX8MM_CLK_UART1_ROOT,
23276 -       IMX8MM_CLK_UART2_ROOT,
23277 -       IMX8MM_CLK_UART3_ROOT,
23278 -       IMX8MM_CLK_UART4_ROOT,
23280 -static struct clk **uart_hws[ARRAY_SIZE(uart_clk_ids) + 1];
23282  static int imx8mm_clocks_probe(struct platform_device *pdev)
23284         struct device *dev = &pdev->dev;
23285         struct device_node *np = dev->of_node;
23286         void __iomem *base;
23287 -       int ret, i;
23288 +       int ret;
23290         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
23291                                           IMX8MM_CLK_END), GFP_KERNEL);
23292 @@ -634,13 +626,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
23293                 goto unregister_hws;
23294         }
23296 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
23297 -               int index = uart_clk_ids[i];
23299 -               uart_hws[i] = &hws[index]->clk;
23300 -       }
23302 -       imx_register_uart_clocks(uart_hws);
23303 +       imx_register_uart_clocks(4);
23305         return 0;
23307 diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
23308 index 324c5fd0aa04..88f6630cd472 100644
23309 --- a/drivers/clk/imx/clk-imx8mn.c
23310 +++ b/drivers/clk/imx/clk-imx8mn.c
23311 @@ -289,20 +289,12 @@ static const char * const clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "
23312  static struct clk_hw_onecell_data *clk_hw_data;
23313  static struct clk_hw **hws;
23315 -static const int uart_clk_ids[] = {
23316 -       IMX8MN_CLK_UART1_ROOT,
23317 -       IMX8MN_CLK_UART2_ROOT,
23318 -       IMX8MN_CLK_UART3_ROOT,
23319 -       IMX8MN_CLK_UART4_ROOT,
23321 -static struct clk **uart_hws[ARRAY_SIZE(uart_clk_ids) + 1];
23323  static int imx8mn_clocks_probe(struct platform_device *pdev)
23325         struct device *dev = &pdev->dev;
23326         struct device_node *np = dev->of_node;
23327         void __iomem *base;
23328 -       int ret, i;
23329 +       int ret;
23331         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
23332                                           IMX8MN_CLK_END), GFP_KERNEL);
23333 @@ -585,13 +577,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
23334                 goto unregister_hws;
23335         }
23337 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
23338 -               int index = uart_clk_ids[i];
23340 -               uart_hws[i] = &hws[index]->clk;
23341 -       }
23343 -       imx_register_uart_clocks(uart_hws);
23344 +       imx_register_uart_clocks(4);
23346         return 0;
23348 diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
23349 index 2f4e1d674e1c..3e6557e7d559 100644
23350 --- a/drivers/clk/imx/clk-imx8mp.c
23351 +++ b/drivers/clk/imx/clk-imx8mp.c
23352 @@ -414,20 +414,11 @@ static const char * const imx8mp_dram_core_sels[] = {"dram_pll_out", "dram_alt_r
23353  static struct clk_hw **hws;
23354  static struct clk_hw_onecell_data *clk_hw_data;
23356 -static const int uart_clk_ids[] = {
23357 -       IMX8MP_CLK_UART1_ROOT,
23358 -       IMX8MP_CLK_UART2_ROOT,
23359 -       IMX8MP_CLK_UART3_ROOT,
23360 -       IMX8MP_CLK_UART4_ROOT,
23362 -static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1];
23364  static int imx8mp_clocks_probe(struct platform_device *pdev)
23366         struct device *dev = &pdev->dev;
23367         struct device_node *np;
23368         void __iomem *anatop_base, *ccm_base;
23369 -       int i;
23371         np = of_find_compatible_node(NULL, NULL, "fsl,imx8mp-anatop");
23372         anatop_base = of_iomap(np, 0);
23373 @@ -737,13 +728,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
23375         of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
23377 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
23378 -               int index = uart_clk_ids[i];
23380 -               uart_clks[i] = &hws[index]->clk;
23381 -       }
23383 -       imx_register_uart_clocks(uart_clks);
23384 +       imx_register_uart_clocks(4);
23386         return 0;
23388 diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
23389 index 4dd4ae9d022b..3e1a10d3f55c 100644
23390 --- a/drivers/clk/imx/clk-imx8mq.c
23391 +++ b/drivers/clk/imx/clk-imx8mq.c
23392 @@ -281,20 +281,12 @@ static const char * const pllout_monitor_sels[] = {"osc_25m", "osc_27m", "dummy"
23393  static struct clk_hw_onecell_data *clk_hw_data;
23394  static struct clk_hw **hws;
23396 -static const int uart_clk_ids[] = {
23397 -       IMX8MQ_CLK_UART1_ROOT,
23398 -       IMX8MQ_CLK_UART2_ROOT,
23399 -       IMX8MQ_CLK_UART3_ROOT,
23400 -       IMX8MQ_CLK_UART4_ROOT,
23402 -static struct clk **uart_hws[ARRAY_SIZE(uart_clk_ids) + 1];
23404  static int imx8mq_clocks_probe(struct platform_device *pdev)
23406         struct device *dev = &pdev->dev;
23407         struct device_node *np = dev->of_node;
23408         void __iomem *base;
23409 -       int err, i;
23410 +       int err;
23412         clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
23413                                           IMX8MQ_CLK_END), GFP_KERNEL);
23414 @@ -629,13 +621,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
23415                 goto unregister_hws;
23416         }
23418 -       for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
23419 -               int index = uart_clk_ids[i];
23421 -               uart_hws[i] = &hws[index]->clk;
23422 -       }
23424 -       imx_register_uart_clocks(uart_hws);
23425 +       imx_register_uart_clocks(4);
23427         return 0;
23429 diff --git a/drivers/clk/imx/clk.c b/drivers/clk/imx/clk.c
23430 index 47882c51cb85..7cc669934253 100644
23431 --- a/drivers/clk/imx/clk.c
23432 +++ b/drivers/clk/imx/clk.c
23433 @@ -147,8 +147,10 @@ void imx_cscmr1_fixup(u32 *val)
23436  #ifndef MODULE
23437 -static int imx_keep_uart_clocks;
23438 -static struct clk ** const *imx_uart_clocks;
23440 +static bool imx_keep_uart_clocks;
23441 +static int imx_enabled_uart_clocks;
23442 +static struct clk **imx_uart_clocks;
23444  static int __init imx_keep_uart_clocks_param(char *str)
23446 @@ -161,24 +163,45 @@ __setup_param("earlycon", imx_keep_uart_earlycon,
23447  __setup_param("earlyprintk", imx_keep_uart_earlyprintk,
23448               imx_keep_uart_clocks_param, 0);
23450 -void imx_register_uart_clocks(struct clk ** const clks[])
23451 +void imx_register_uart_clocks(unsigned int clk_count)
23453 +       imx_enabled_uart_clocks = 0;
23455 +/* i.MX boards use device trees now.  For build tests without CONFIG_OF, do nothing */
23456 +#ifdef CONFIG_OF
23457         if (imx_keep_uart_clocks) {
23458                 int i;
23460 -               imx_uart_clocks = clks;
23461 -               for (i = 0; imx_uart_clocks[i]; i++)
23462 -                       clk_prepare_enable(*imx_uart_clocks[i]);
23463 +               imx_uart_clocks = kcalloc(clk_count, sizeof(struct clk *), GFP_KERNEL);
23465 +               if (!of_stdout)
23466 +                       return;
23468 +               for (i = 0; i < clk_count; i++) {
23469 +                       imx_uart_clocks[imx_enabled_uart_clocks] = of_clk_get(of_stdout, i);
23471 +                       /* Stop if there are no more of_stdout references */
23472 +                       if (IS_ERR(imx_uart_clocks[imx_enabled_uart_clocks]))
23473 +                               return;
23475 +                       /* Only enable the clock if it's not NULL */
23476 +                       if (imx_uart_clocks[imx_enabled_uart_clocks])
23477 +                               clk_prepare_enable(imx_uart_clocks[imx_enabled_uart_clocks++]);
23478 +               }
23479         }
23480 +#endif
23483  static int __init imx_clk_disable_uart(void)
23485 -       if (imx_keep_uart_clocks && imx_uart_clocks) {
23486 +       if (imx_keep_uart_clocks && imx_enabled_uart_clocks) {
23487                 int i;
23489 -               for (i = 0; imx_uart_clocks[i]; i++)
23490 -                       clk_disable_unprepare(*imx_uart_clocks[i]);
23491 +               for (i = 0; i < imx_enabled_uart_clocks; i++) {
23492 +                       clk_disable_unprepare(imx_uart_clocks[i]);
23493 +                       clk_put(imx_uart_clocks[i]);
23494 +               }
23495 +               kfree(imx_uart_clocks);
23496         }
23498         return 0;
23499 diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
23500 index 4f04c8287286..7571603bee23 100644
23501 --- a/drivers/clk/imx/clk.h
23502 +++ b/drivers/clk/imx/clk.h
23503 @@ -11,9 +11,9 @@ extern spinlock_t imx_ccm_lock;
23504  void imx_check_clocks(struct clk *clks[], unsigned int count);
23505  void imx_check_clk_hws(struct clk_hw *clks[], unsigned int count);
23506  #ifndef MODULE
23507 -void imx_register_uart_clocks(struct clk ** const clks[]);
23508 +void imx_register_uart_clocks(unsigned int clk_count);
23509  #else
23510 -static inline void imx_register_uart_clocks(struct clk ** const clks[])
23511 +static inline void imx_register_uart_clocks(unsigned int clk_count)
23514  #endif
23515 diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
23516 index f5746f9ea929..32ac6b6b7530 100644
23517 --- a/drivers/clk/mvebu/armada-37xx-periph.c
23518 +++ b/drivers/clk/mvebu/armada-37xx-periph.c
23519 @@ -84,6 +84,7 @@ struct clk_pm_cpu {
23520         void __iomem *reg_div;
23521         u8 shift_div;
23522         struct regmap *nb_pm_base;
23523 +       unsigned long l1_expiration;
23524  };
23526  #define to_clk_double_div(_hw) container_of(_hw, struct clk_double_div, hw)
23527 @@ -440,33 +441,6 @@ static u8 clk_pm_cpu_get_parent(struct clk_hw *hw)
23528         return val;
23531 -static int clk_pm_cpu_set_parent(struct clk_hw *hw, u8 index)
23533 -       struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
23534 -       struct regmap *base = pm_cpu->nb_pm_base;
23535 -       int load_level;
23537 -       /*
23538 -        * We set the clock parent only if the DVFS is available but
23539 -        * not enabled.
23540 -        */
23541 -       if (IS_ERR(base) || armada_3700_pm_dvfs_is_enabled(base))
23542 -               return -EINVAL;
23544 -       /* Set the parent clock for all the load level */
23545 -       for (load_level = 0; load_level < LOAD_LEVEL_NR; load_level++) {
23546 -               unsigned int reg, mask,  val,
23547 -                       offset = ARMADA_37XX_NB_TBG_SEL_OFF;
23549 -               armada_3700_pm_dvfs_update_regs(load_level, &reg, &offset);
23551 -               val = index << offset;
23552 -               mask = ARMADA_37XX_NB_TBG_SEL_MASK << offset;
23553 -               regmap_update_bits(base, reg, mask, val);
23554 -       }
23555 -       return 0;
23558  static unsigned long clk_pm_cpu_recalc_rate(struct clk_hw *hw,
23559                                             unsigned long parent_rate)
23561 @@ -514,8 +488,10 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
23564  /*
23565 - * Switching the CPU from the L2 or L3 frequencies (300 and 200 Mhz
23566 - * respectively) to L0 frequency (1.2 Ghz) requires a significant
23567 + * Workaround when base CPU frequnecy is 1000 or 1200 MHz
23568 + *
23569 + * Switching the CPU from the L2 or L3 frequencies (250/300 or 200 MHz
23570 + * respectively) to L0 frequency (1/1.2 GHz) requires a significant
23571   * amount of time to let VDD stabilize to the appropriate
23572   * voltage. This amount of time is large enough that it cannot be
23573   * covered by the hardware countdown register. Due to this, the CPU
23574 @@ -525,26 +501,56 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
23575   * To work around this problem, we prevent switching directly from the
23576   * L2/L3 frequencies to the L0 frequency, and instead switch to the L1
23577   * frequency in-between. The sequence therefore becomes:
23578 - * 1. First switch from L2/L3(200/300MHz) to L1(600MHZ)
23579 + * 1. First switch from L2/L3 (200/250/300 MHz) to L1 (500/600 MHz)
23580   * 2. Sleep 20ms for stabling VDD voltage
23581 - * 3. Then switch from L1(600MHZ) to L0(1200Mhz).
23582 + * 3. Then switch from L1 (500/600 MHz) to L0 (1000/1200 MHz).
23583   */
23584 -static void clk_pm_cpu_set_rate_wa(unsigned long rate, struct regmap *base)
23585 +static void clk_pm_cpu_set_rate_wa(struct clk_pm_cpu *pm_cpu,
23586 +                                  unsigned int new_level, unsigned long rate,
23587 +                                  struct regmap *base)
23589         unsigned int cur_level;
23591 -       if (rate != 1200 * 1000 * 1000)
23592 -               return;
23594         regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level);
23595         cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
23596 -       if (cur_level <= ARMADA_37XX_DVFS_LOAD_1)
23598 +       if (cur_level == new_level)
23599 +               return;
23601 +       /*
23602 +        * System wants to go to L1 on its own. If we are going from L2/L3,
23603 +        * remember when 20ms will expire. If from L0, set the value so that
23604 +        * next switch to L0 won't have to wait.
23605 +        */
23606 +       if (new_level == ARMADA_37XX_DVFS_LOAD_1) {
23607 +               if (cur_level == ARMADA_37XX_DVFS_LOAD_0)
23608 +                       pm_cpu->l1_expiration = jiffies;
23609 +               else
23610 +                       pm_cpu->l1_expiration = jiffies + msecs_to_jiffies(20);
23611                 return;
23612 +       }
23614 +       /*
23615 +        * If we are setting to L2/L3, just invalidate L1 expiration time,
23616 +        * sleeping is not needed.
23617 +        */
23618 +       if (rate < 1000*1000*1000)
23619 +               goto invalidate_l1_exp;
23621 +       /*
23622 +        * We are going to L0 with rate >= 1GHz. Check whether we have been at
23623 +        * L1 for long enough time. If not, go to L1 for 20ms.
23624 +        */
23625 +       if (pm_cpu->l1_expiration && jiffies >= pm_cpu->l1_expiration)
23626 +               goto invalidate_l1_exp;
23628         regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD,
23629                            ARMADA_37XX_NB_CPU_LOAD_MASK,
23630                            ARMADA_37XX_DVFS_LOAD_1);
23631         msleep(20);
23633 +invalidate_l1_exp:
23634 +       pm_cpu->l1_expiration = 0;
23637  static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
23638 @@ -578,7 +584,9 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
23639                         reg = ARMADA_37XX_NB_CPU_LOAD;
23640                         mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
23642 -                       clk_pm_cpu_set_rate_wa(rate, base);
23643 +                       /* Apply workaround when base CPU frequency is 1000 or 1200 MHz */
23644 +                       if (parent_rate >= 1000*1000*1000)
23645 +                               clk_pm_cpu_set_rate_wa(pm_cpu, load_level, rate, base);
23647                         regmap_update_bits(base, reg, mask, load_level);
23649 @@ -592,7 +600,6 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
23651  static const struct clk_ops clk_pm_cpu_ops = {
23652         .get_parent = clk_pm_cpu_get_parent,
23653 -       .set_parent = clk_pm_cpu_set_parent,
23654         .round_rate = clk_pm_cpu_round_rate,
23655         .set_rate = clk_pm_cpu_set_rate,
23656         .recalc_rate = clk_pm_cpu_recalc_rate,
23657 diff --git a/drivers/clk/qcom/a53-pll.c b/drivers/clk/qcom/a53-pll.c
23658 index 45cfc57bff92..af6ac17c7dae 100644
23659 --- a/drivers/clk/qcom/a53-pll.c
23660 +++ b/drivers/clk/qcom/a53-pll.c
23661 @@ -93,6 +93,7 @@ static const struct of_device_id qcom_a53pll_match_table[] = {
23662         { .compatible = "qcom,msm8916-a53pll" },
23663         { }
23664  };
23665 +MODULE_DEVICE_TABLE(of, qcom_a53pll_match_table);
23667  static struct platform_driver qcom_a53pll_driver = {
23668         .probe = qcom_a53pll_probe,
23669 diff --git a/drivers/clk/qcom/a7-pll.c b/drivers/clk/qcom/a7-pll.c
23670 index e171d3caf2cf..c4a53e5db229 100644
23671 --- a/drivers/clk/qcom/a7-pll.c
23672 +++ b/drivers/clk/qcom/a7-pll.c
23673 @@ -86,6 +86,7 @@ static const struct of_device_id qcom_a7pll_match_table[] = {
23674         { .compatible = "qcom,sdx55-a7pll" },
23675         { }
23676  };
23677 +MODULE_DEVICE_TABLE(of, qcom_a7pll_match_table);
23679  static struct platform_driver qcom_a7pll_driver = {
23680         .probe = qcom_a7pll_probe,
23681 diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c
23682 index 30be87fb222a..bef7899ad0d6 100644
23683 --- a/drivers/clk/qcom/apss-ipq-pll.c
23684 +++ b/drivers/clk/qcom/apss-ipq-pll.c
23685 @@ -81,6 +81,7 @@ static const struct of_device_id apss_ipq_pll_match_table[] = {
23686         { .compatible = "qcom,ipq6018-a53pll" },
23687         { }
23688  };
23689 +MODULE_DEVICE_TABLE(of, apss_ipq_pll_match_table);
23691  static struct platform_driver apss_ipq_pll_driver = {
23692         .probe = apss_ipq_pll_probe,
23693 diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
23694 index 87ee1bad9a9a..4a5d2a914bd6 100644
23695 --- a/drivers/clk/samsung/clk-exynos7.c
23696 +++ b/drivers/clk/samsung/clk-exynos7.c
23697 @@ -537,8 +537,13 @@ static const struct samsung_gate_clock top1_gate_clks[] __initconst = {
23698         GATE(CLK_ACLK_FSYS0_200, "aclk_fsys0_200", "dout_aclk_fsys0_200",
23699                 ENABLE_ACLK_TOP13, 28, CLK_SET_RATE_PARENT |
23700                 CLK_IS_CRITICAL, 0),
23701 +       /*
23702 +        * This clock is required for the CMU_FSYS1 registers access, keep it
23703 +        * enabled permanently until proper runtime PM support is added.
23704 +        */
23705         GATE(CLK_ACLK_FSYS1_200, "aclk_fsys1_200", "dout_aclk_fsys1_200",
23706 -               ENABLE_ACLK_TOP13, 24, CLK_SET_RATE_PARENT, 0),
23707 +               ENABLE_ACLK_TOP13, 24, CLK_SET_RATE_PARENT |
23708 +               CLK_IS_CRITICAL, 0),
23710         GATE(CLK_SCLK_PHY_FSYS1_26M, "sclk_phy_fsys1_26m",
23711                 "dout_sclk_phy_fsys1_26m", ENABLE_SCLK_TOP1_FSYS11,
23712 diff --git a/drivers/clk/socfpga/clk-gate-a10.c b/drivers/clk/socfpga/clk-gate-a10.c
23713 index cd5df9103614..d62778884208 100644
23714 --- a/drivers/clk/socfpga/clk-gate-a10.c
23715 +++ b/drivers/clk/socfpga/clk-gate-a10.c
23716 @@ -146,6 +146,7 @@ static void __init __socfpga_gate_init(struct device_node *node,
23717                 if (IS_ERR(socfpga_clk->sys_mgr_base_addr)) {
23718                         pr_err("%s: failed to find altr,sys-mgr regmap!\n",
23719                                         __func__);
23720 +                       kfree(socfpga_clk);
23721                         return;
23722                 }
23723         }
23724 diff --git a/drivers/clk/uniphier/clk-uniphier-mux.c b/drivers/clk/uniphier/clk-uniphier-mux.c
23725 index 462c84321b2d..1998e9d4cfc0 100644
23726 --- a/drivers/clk/uniphier/clk-uniphier-mux.c
23727 +++ b/drivers/clk/uniphier/clk-uniphier-mux.c
23728 @@ -31,10 +31,10 @@ static int uniphier_clk_mux_set_parent(struct clk_hw *hw, u8 index)
23729  static u8 uniphier_clk_mux_get_parent(struct clk_hw *hw)
23731         struct uniphier_clk_mux *mux = to_uniphier_clk_mux(hw);
23732 -       int num_parents = clk_hw_get_num_parents(hw);
23733 +       unsigned int num_parents = clk_hw_get_num_parents(hw);
23734         int ret;
23735         unsigned int val;
23736 -       u8 i;
23737 +       unsigned int i;
23739         ret = regmap_read(mux->regmap, mux->reg, &val);
23740         if (ret)
23741 diff --git a/drivers/clk/zynqmp/pll.c b/drivers/clk/zynqmp/pll.c
23742 index 92f449ed38e5..abe6afbf3407 100644
23743 --- a/drivers/clk/zynqmp/pll.c
23744 +++ b/drivers/clk/zynqmp/pll.c
23745 @@ -14,10 +14,12 @@
23746   * struct zynqmp_pll - PLL clock
23747   * @hw:                Handle between common and hardware-specific interfaces
23748   * @clk_id:    PLL clock ID
23749 + * @set_pll_mode:      Whether an IOCTL_SET_PLL_FRAC_MODE request be sent to ATF
23750   */
23751  struct zynqmp_pll {
23752         struct clk_hw hw;
23753         u32 clk_id;
23754 +       bool set_pll_mode;
23755  };
23757  #define to_zynqmp_pll(_hw)     container_of(_hw, struct zynqmp_pll, hw)
23758 @@ -81,6 +83,8 @@ static inline void zynqmp_pll_set_mode(struct clk_hw *hw, bool on)
23759         if (ret)
23760                 pr_warn_once("%s() PLL set frac mode failed for %s, ret = %d\n",
23761                              __func__, clk_name, ret);
23762 +       else
23763 +               clk->set_pll_mode = true;
23766  /**
23767 @@ -100,9 +104,7 @@ static long zynqmp_pll_round_rate(struct clk_hw *hw, unsigned long rate,
23768         /* Enable the fractional mode if needed */
23769         rate_div = (rate * FRAC_DIV) / *prate;
23770         f = rate_div % FRAC_DIV;
23771 -       zynqmp_pll_set_mode(hw, !!f);
23773 -       if (zynqmp_pll_get_mode(hw) == PLL_MODE_FRAC) {
23774 +       if (f) {
23775                 if (rate > PS_PLL_VCO_MAX) {
23776                         fbdiv = rate / PS_PLL_VCO_MAX;
23777                         rate = rate / (fbdiv + 1);
23778 @@ -173,10 +175,12 @@ static int zynqmp_pll_set_rate(struct clk_hw *hw, unsigned long rate,
23779         long rate_div, frac, m, f;
23780         int ret;
23782 -       if (zynqmp_pll_get_mode(hw) == PLL_MODE_FRAC) {
23783 -               rate_div = (rate * FRAC_DIV) / parent_rate;
23784 +       rate_div = (rate * FRAC_DIV) / parent_rate;
23785 +       f = rate_div % FRAC_DIV;
23786 +       zynqmp_pll_set_mode(hw, !!f);
23788 +       if (f) {
23789                 m = rate_div / FRAC_DIV;
23790 -               f = rate_div % FRAC_DIV;
23791                 m = clamp_t(u32, m, (PLL_FBDIV_MIN), (PLL_FBDIV_MAX));
23792                 rate = parent_rate * m;
23793                 frac = (parent_rate * f) / FRAC_DIV;
23794 @@ -240,9 +244,15 @@ static int zynqmp_pll_enable(struct clk_hw *hw)
23795         u32 clk_id = clk->clk_id;
23796         int ret;
23798 -       if (zynqmp_pll_is_enabled(hw))
23799 +       /*
23800 +        * Don't skip enabling clock if there is an IOCTL_SET_PLL_FRAC_MODE request
23801 +        * that has been sent to ATF.
23802 +        */
23803 +       if (zynqmp_pll_is_enabled(hw) && (!clk->set_pll_mode))
23804                 return 0;
23806 +       clk->set_pll_mode = false;
23808         ret = zynqmp_pm_clock_enable(clk_id);
23809         if (ret)
23810                 pr_warn_once("%s() clock enable failed for %s, ret = %d\n",
23811 diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
23812 index 42e7e43b8fcd..b1e2b697b21b 100644
23813 --- a/drivers/clocksource/dw_apb_timer_of.c
23814 +++ b/drivers/clocksource/dw_apb_timer_of.c
23815 @@ -52,18 +52,34 @@ static int __init timer_get_base_and_rate(struct device_node *np,
23816                 return 0;
23818         timer_clk = of_clk_get_by_name(np, "timer");
23819 -       if (IS_ERR(timer_clk))
23820 -               return PTR_ERR(timer_clk);
23821 +       if (IS_ERR(timer_clk)) {
23822 +               ret = PTR_ERR(timer_clk);
23823 +               goto out_pclk_disable;
23824 +       }
23826         ret = clk_prepare_enable(timer_clk);
23827         if (ret)
23828 -               return ret;
23829 +               goto out_timer_clk_put;
23831         *rate = clk_get_rate(timer_clk);
23832 -       if (!(*rate))
23833 -               return -EINVAL;
23834 +       if (!(*rate)) {
23835 +               ret = -EINVAL;
23836 +               goto out_timer_clk_disable;
23837 +       }
23839         return 0;
23841 +out_timer_clk_disable:
23842 +       clk_disable_unprepare(timer_clk);
23843 +out_timer_clk_put:
23844 +       clk_put(timer_clk);
23845 +out_pclk_disable:
23846 +       if (!IS_ERR(pclk)) {
23847 +               clk_disable_unprepare(pclk);
23848 +               clk_put(pclk);
23849 +       }
23850 +       iounmap(*base);
23851 +       return ret;
23854  static int __init add_clockevent(struct device_node *event_timer)
23855 diff --git a/drivers/clocksource/ingenic-ost.c b/drivers/clocksource/ingenic-ost.c
23856 index 029efc2731b4..6af2470136bd 100644
23857 --- a/drivers/clocksource/ingenic-ost.c
23858 +++ b/drivers/clocksource/ingenic-ost.c
23859 @@ -88,9 +88,9 @@ static int __init ingenic_ost_probe(struct platform_device *pdev)
23860                 return PTR_ERR(ost->regs);
23862         map = device_node_to_regmap(dev->parent->of_node);
23863 -       if (!map) {
23864 +       if (IS_ERR(map)) {
23865                 dev_err(dev, "regmap not found");
23866 -               return -EINVAL;
23867 +               return PTR_ERR(map);
23868         }
23870         ost->clk = devm_clk_get(dev, "ost");
23871 diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c
23872 index 33b3e8aa2cc5..b6f97960d8ee 100644
23873 --- a/drivers/clocksource/timer-ti-dm-systimer.c
23874 +++ b/drivers/clocksource/timer-ti-dm-systimer.c
23875 @@ -2,6 +2,7 @@
23876  #include <linux/clk.h>
23877  #include <linux/clocksource.h>
23878  #include <linux/clockchips.h>
23879 +#include <linux/cpuhotplug.h>
23880  #include <linux/interrupt.h>
23881  #include <linux/io.h>
23882  #include <linux/iopoll.h>
23883 @@ -449,13 +450,13 @@ static int dmtimer_set_next_event(unsigned long cycles,
23884         struct dmtimer_systimer *t = &clkevt->t;
23885         void __iomem *pend = t->base + t->pend;
23887 -       writel_relaxed(0xffffffff - cycles, t->base + t->counter);
23888         while (readl_relaxed(pend) & WP_TCRR)
23889                 cpu_relax();
23890 +       writel_relaxed(0xffffffff - cycles, t->base + t->counter);
23892 -       writel_relaxed(OMAP_TIMER_CTRL_ST, t->base + t->ctrl);
23893         while (readl_relaxed(pend) & WP_TCLR)
23894                 cpu_relax();
23895 +       writel_relaxed(OMAP_TIMER_CTRL_ST, t->base + t->ctrl);
23897         return 0;
23899 @@ -490,18 +491,18 @@ static int dmtimer_set_periodic(struct clock_event_device *evt)
23900         dmtimer_clockevent_shutdown(evt);
23902         /* Looks like we need to first set the load value separately */
23903 -       writel_relaxed(clkevt->period, t->base + t->load);
23904         while (readl_relaxed(pend) & WP_TLDR)
23905                 cpu_relax();
23906 +       writel_relaxed(clkevt->period, t->base + t->load);
23908 -       writel_relaxed(clkevt->period, t->base + t->counter);
23909         while (readl_relaxed(pend) & WP_TCRR)
23910                 cpu_relax();
23911 +       writel_relaxed(clkevt->period, t->base + t->counter);
23913 -       writel_relaxed(OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
23914 -                      t->base + t->ctrl);
23915         while (readl_relaxed(pend) & WP_TCLR)
23916                 cpu_relax();
23917 +       writel_relaxed(OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
23918 +                      t->base + t->ctrl);
23920         return 0;
23922 @@ -530,17 +531,17 @@ static void omap_clockevent_unidle(struct clock_event_device *evt)
23923         writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
23926 -static int __init dmtimer_clockevent_init(struct device_node *np)
23927 +static int __init dmtimer_clkevt_init_common(struct dmtimer_clockevent *clkevt,
23928 +                                            struct device_node *np,
23929 +                                            unsigned int features,
23930 +                                            const struct cpumask *cpumask,
23931 +                                            const char *name,
23932 +                                            int rating)
23934 -       struct dmtimer_clockevent *clkevt;
23935         struct clock_event_device *dev;
23936         struct dmtimer_systimer *t;
23937         int error;
23939 -       clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
23940 -       if (!clkevt)
23941 -               return -ENOMEM;
23943         t = &clkevt->t;
23944         dev = &clkevt->dev;
23946 @@ -548,24 +549,23 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
23947          * We mostly use cpuidle_coupled with ARM local timers for runtime,
23948          * so there's probably no use for CLOCK_EVT_FEAT_DYNIRQ here.
23949          */
23950 -       dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
23951 -       dev->rating = 300;
23952 +       dev->features = features;
23953 +       dev->rating = rating;
23954         dev->set_next_event = dmtimer_set_next_event;
23955         dev->set_state_shutdown = dmtimer_clockevent_shutdown;
23956         dev->set_state_periodic = dmtimer_set_periodic;
23957         dev->set_state_oneshot = dmtimer_clockevent_shutdown;
23958 +       dev->set_state_oneshot_stopped = dmtimer_clockevent_shutdown;
23959         dev->tick_resume = dmtimer_clockevent_shutdown;
23960 -       dev->cpumask = cpu_possible_mask;
23961 +       dev->cpumask = cpumask;
23963         dev->irq = irq_of_parse_and_map(np, 0);
23964 -       if (!dev->irq) {
23965 -               error = -ENXIO;
23966 -               goto err_out_free;
23967 -       }
23968 +       if (!dev->irq)
23969 +               return -ENXIO;
23971         error = dmtimer_systimer_setup(np, &clkevt->t);
23972         if (error)
23973 -               goto err_out_free;
23974 +               return error;
23976         clkevt->period = 0xffffffff - DIV_ROUND_CLOSEST(t->rate, HZ);
23978 @@ -577,38 +577,132 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
23979         writel_relaxed(OMAP_TIMER_CTRL_POSTED, t->base + t->ifctrl);
23981         error = request_irq(dev->irq, dmtimer_clockevent_interrupt,
23982 -                           IRQF_TIMER, "clockevent", clkevt);
23983 +                           IRQF_TIMER, name, clkevt);
23984         if (error)
23985                 goto err_out_unmap;
23987         writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena);
23988         writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
23990 -       pr_info("TI gptimer clockevent: %s%lu Hz at %pOF\n",
23991 -               of_find_property(np, "ti,timer-alwon", NULL) ?
23992 +       pr_info("TI gptimer %s: %s%lu Hz at %pOF\n",
23993 +               name, of_find_property(np, "ti,timer-alwon", NULL) ?
23994                 "always-on " : "", t->rate, np->parent);
23996 -       clockevents_config_and_register(dev, t->rate,
23997 -                                       3, /* Timer internal resynch latency */
23998 +       return 0;
24000 +err_out_unmap:
24001 +       iounmap(t->base);
24003 +       return error;
24006 +static int __init dmtimer_clockevent_init(struct device_node *np)
24008 +       struct dmtimer_clockevent *clkevt;
24009 +       int error;
24011 +       clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
24012 +       if (!clkevt)
24013 +               return -ENOMEM;
24015 +       error = dmtimer_clkevt_init_common(clkevt, np,
24016 +                                          CLOCK_EVT_FEAT_PERIODIC |
24017 +                                          CLOCK_EVT_FEAT_ONESHOT,
24018 +                                          cpu_possible_mask, "clockevent",
24019 +                                          300);
24020 +       if (error)
24021 +               goto err_out_free;
24023 +       clockevents_config_and_register(&clkevt->dev, clkevt->t.rate,
24024 +                                       3, /* Timer internal resync latency */
24025                                         0xffffffff);
24027         if (of_machine_is_compatible("ti,am33xx") ||
24028             of_machine_is_compatible("ti,am43")) {
24029 -               dev->suspend = omap_clockevent_idle;
24030 -               dev->resume = omap_clockevent_unidle;
24031 +               clkevt->dev.suspend = omap_clockevent_idle;
24032 +               clkevt->dev.resume = omap_clockevent_unidle;
24033         }
24035         return 0;
24037 -err_out_unmap:
24038 -       iounmap(t->base);
24040  err_out_free:
24041         kfree(clkevt);
24043         return error;
24046 +/* Dmtimer as percpu timer. See dra7 ARM architected timer wrap erratum i940 */
24047 +static DEFINE_PER_CPU(struct dmtimer_clockevent, dmtimer_percpu_timer);
24049 +static int __init dmtimer_percpu_timer_init(struct device_node *np, int cpu)
24051 +       struct dmtimer_clockevent *clkevt;
24052 +       int error;
24054 +       if (!cpu_possible(cpu))
24055 +               return -EINVAL;
24057 +       if (!of_property_read_bool(np->parent, "ti,no-reset-on-init") ||
24058 +           !of_property_read_bool(np->parent, "ti,no-idle"))
24059 +               pr_warn("Incomplete dtb for percpu dmtimer %pOF\n", np->parent);
24061 +       clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu);
24063 +       error = dmtimer_clkevt_init_common(clkevt, np, CLOCK_EVT_FEAT_ONESHOT,
24064 +                                          cpumask_of(cpu), "percpu-dmtimer",
24065 +                                          500);
24066 +       if (error)
24067 +               return error;
24069 +       return 0;
24072 +/* See TRM for timer internal resynch latency */
24073 +static int omap_dmtimer_starting_cpu(unsigned int cpu)
24075 +       struct dmtimer_clockevent *clkevt = per_cpu_ptr(&dmtimer_percpu_timer, cpu);
24076 +       struct clock_event_device *dev = &clkevt->dev;
24077 +       struct dmtimer_systimer *t = &clkevt->t;
24079 +       clockevents_config_and_register(dev, t->rate, 3, ULONG_MAX);
24080 +       irq_force_affinity(dev->irq, cpumask_of(cpu));
24082 +       return 0;
24085 +static int __init dmtimer_percpu_timer_startup(void)
24087 +       struct dmtimer_clockevent *clkevt = per_cpu_ptr(&dmtimer_percpu_timer, 0);
24088 +       struct dmtimer_systimer *t = &clkevt->t;
24090 +       if (t->sysc) {
24091 +               cpuhp_setup_state(CPUHP_AP_TI_GP_TIMER_STARTING,
24092 +                                 "clockevents/omap/gptimer:starting",
24093 +                                 omap_dmtimer_starting_cpu, NULL);
24094 +       }
24096 +       return 0;
24098 +subsys_initcall(dmtimer_percpu_timer_startup);
24100 +static int __init dmtimer_percpu_quirk_init(struct device_node *np, u32 pa)
24102 +       struct device_node *arm_timer;
24104 +       arm_timer = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
24105 +       if (of_device_is_available(arm_timer)) {
24106 +               pr_warn_once("ARM architected timer wrap issue i940 detected\n");
24107 +               return 0;
24108 +       }
24110 +       if (pa == 0x48034000)           /* dra7 dmtimer3 */
24111 +               return dmtimer_percpu_timer_init(np, 0);
24112 +       else if (pa == 0x48036000)      /* dra7 dmtimer4 */
24113 +               return dmtimer_percpu_timer_init(np, 1);
24115 +       return 0;
24118  /* Clocksource */
24119  static struct dmtimer_clocksource *
24120  to_dmtimer_clocksource(struct clocksource *cs)
24121 @@ -742,6 +836,9 @@ static int __init dmtimer_systimer_init(struct device_node *np)
24122         if (clockevent == pa)
24123                 return dmtimer_clockevent_init(np);
24125 +       if (of_machine_is_compatible("ti,dra7"))
24126 +               return dmtimer_percpu_quirk_init(np, pa);
24128         return 0;
24131 diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
24132 index d1bbc16fba4b..7e7450453714 100644
24133 --- a/drivers/cpufreq/acpi-cpufreq.c
24134 +++ b/drivers/cpufreq/acpi-cpufreq.c
24135 @@ -646,7 +646,11 @@ static u64 get_max_boost_ratio(unsigned int cpu)
24136                 return 0;
24137         }
24139 -       highest_perf = perf_caps.highest_perf;
24140 +       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
24141 +               highest_perf = amd_get_highest_perf();
24142 +       else
24143 +               highest_perf = perf_caps.highest_perf;
24145         nominal_perf = perf_caps.nominal_perf;
24147         if (!highest_perf || !nominal_perf) {
24148 diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
24149 index b4af4094309b..e4782f562e7a 100644
24150 --- a/drivers/cpufreq/armada-37xx-cpufreq.c
24151 +++ b/drivers/cpufreq/armada-37xx-cpufreq.c
24152 @@ -25,6 +25,10 @@
24154  #include "cpufreq-dt.h"
24156 +/* Clk register set */
24157 +#define ARMADA_37XX_CLK_TBG_SEL                0
24158 +#define ARMADA_37XX_CLK_TBG_SEL_CPU_OFF        22
24160  /* Power management in North Bridge register set */
24161  #define ARMADA_37XX_NB_L0L1    0x18
24162  #define ARMADA_37XX_NB_L2L3    0x1C
24163 @@ -69,6 +73,8 @@
24164  #define LOAD_LEVEL_NR  4
24166  #define MIN_VOLT_MV 1000
24167 +#define MIN_VOLT_MV_FOR_L1_1000MHZ 1108
24168 +#define MIN_VOLT_MV_FOR_L1_1200MHZ 1155
24170  /*  AVS value for the corresponding voltage (in mV) */
24171  static int avs_map[] = {
24172 @@ -120,10 +126,15 @@ static struct armada_37xx_dvfs *armada_37xx_cpu_freq_info_get(u32 freq)
24173   * will be configured then the DVFS will be enabled.
24174   */
24175  static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
24176 -                                                struct clk *clk, u8 *divider)
24177 +                                                struct regmap *clk_base, u8 *divider)
24179 +       u32 cpu_tbg_sel;
24180         int load_lvl;
24181 -       struct clk *parent;
24183 +       /* Determine to which TBG clock is CPU connected */
24184 +       regmap_read(clk_base, ARMADA_37XX_CLK_TBG_SEL, &cpu_tbg_sel);
24185 +       cpu_tbg_sel >>= ARMADA_37XX_CLK_TBG_SEL_CPU_OFF;
24186 +       cpu_tbg_sel &= ARMADA_37XX_NB_TBG_SEL_MASK;
24188         for (load_lvl = 0; load_lvl < LOAD_LEVEL_NR; load_lvl++) {
24189                 unsigned int reg, mask, val, offset = 0;
24190 @@ -142,6 +153,11 @@ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
24191                 mask = (ARMADA_37XX_NB_CLK_SEL_MASK
24192                         << ARMADA_37XX_NB_CLK_SEL_OFF);
24194 +               /* Set TBG index, for all levels we use the same TBG */
24195 +               val = cpu_tbg_sel << ARMADA_37XX_NB_TBG_SEL_OFF;
24196 +               mask = (ARMADA_37XX_NB_TBG_SEL_MASK
24197 +                       << ARMADA_37XX_NB_TBG_SEL_OFF);
24199                 /*
24200                  * Set cpu divider based on the pre-computed array in
24201                  * order to have balanced step.
24202 @@ -160,14 +176,6 @@ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
24204                 regmap_update_bits(base, reg, mask, val);
24205         }
24207 -       /*
24208 -        * Set cpu clock source, for all the level we keep the same
24209 -        * clock source that the one already configured. For this one
24210 -        * we need to use the clock framework
24211 -        */
24212 -       parent = clk_get_parent(clk);
24213 -       clk_set_parent(clk, parent);
24216  /*
24217 @@ -202,6 +210,8 @@ static u32 armada_37xx_avs_val_match(int target_vm)
24218   * - L2 & L3 voltage should be about 150mv smaller than L0 voltage.
24219   * This function calculates L1 & L2 & L3 AVS values dynamically based
24220   * on L0 voltage and fill all AVS values to the AVS value table.
24221 + * When base CPU frequency is 1000 or 1200 MHz then there is additional
24222 + * minimal avs value for load L1.
24223   */
24224  static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
24225                                                 struct armada_37xx_dvfs *dvfs)
24226 @@ -233,6 +243,19 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
24227                 for (load_level = 1; load_level < LOAD_LEVEL_NR; load_level++)
24228                         dvfs->avs[load_level] = avs_min;
24230 +               /*
24231 +                * Set the avs values for load L0 and L1 when base CPU frequency
24232 +                * is 1000/1200 MHz to its typical initial values according to
24233 +                * the Armada 3700 Hardware Specifications.
24234 +                */
24235 +               if (dvfs->cpu_freq_max >= 1000*1000*1000) {
24236 +                       if (dvfs->cpu_freq_max >= 1200*1000*1000)
24237 +                               avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1200MHZ);
24238 +                       else
24239 +                               avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1000MHZ);
24240 +                       dvfs->avs[0] = dvfs->avs[1] = avs_min;
24241 +               }
24243                 return;
24244         }
24246 @@ -252,6 +275,26 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
24247         target_vm = avs_map[l0_vdd_min] - 150;
24248         target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
24249         dvfs->avs[2] = dvfs->avs[3] = armada_37xx_avs_val_match(target_vm);
24251 +       /*
24252 +        * Fix the avs value for load L1 when base CPU frequency is 1000/1200 MHz,
24253 +        * otherwise the CPU gets stuck when switching from load L1 to load L0.
24254 +        * Also ensure that avs value for load L1 is not higher than for L0.
24255 +        */
24256 +       if (dvfs->cpu_freq_max >= 1000*1000*1000) {
24257 +               u32 avs_min_l1;
24259 +               if (dvfs->cpu_freq_max >= 1200*1000*1000)
24260 +                       avs_min_l1 = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1200MHZ);
24261 +               else
24262 +                       avs_min_l1 = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1000MHZ);
24264 +               if (avs_min_l1 > dvfs->avs[0])
24265 +                       avs_min_l1 = dvfs->avs[0];
24267 +               if (dvfs->avs[1] < avs_min_l1)
24268 +                       dvfs->avs[1] = avs_min_l1;
24269 +       }
24272  static void __init armada37xx_cpufreq_avs_setup(struct regmap *base,
24273 @@ -358,11 +401,16 @@ static int __init armada37xx_cpufreq_driver_init(void)
24274         struct platform_device *pdev;
24275         unsigned long freq;
24276         unsigned int cur_frequency, base_frequency;
24277 -       struct regmap *nb_pm_base, *avs_base;
24278 +       struct regmap *nb_clk_base, *nb_pm_base, *avs_base;
24279         struct device *cpu_dev;
24280         int load_lvl, ret;
24281         struct clk *clk, *parent;
24283 +       nb_clk_base =
24284 +               syscon_regmap_lookup_by_compatible("marvell,armada-3700-periph-clock-nb");
24285 +       if (IS_ERR(nb_clk_base))
24286 +               return -ENODEV;
24288         nb_pm_base =
24289                 syscon_regmap_lookup_by_compatible("marvell,armada-3700-nb-pm");
24291 @@ -421,7 +469,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
24292                 return -EINVAL;
24293         }
24295 -       dvfs = armada_37xx_cpu_freq_info_get(cur_frequency);
24296 +       dvfs = armada_37xx_cpu_freq_info_get(base_frequency);
24297         if (!dvfs) {
24298                 clk_put(clk);
24299                 return -EINVAL;
24300 @@ -439,7 +487,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
24301         armada37xx_cpufreq_avs_configure(avs_base, dvfs);
24302         armada37xx_cpufreq_avs_setup(avs_base, dvfs);
24304 -       armada37xx_cpufreq_dvfs_setup(nb_pm_base, clk, dvfs->divider);
24305 +       armada37xx_cpufreq_dvfs_setup(nb_pm_base, nb_clk_base, dvfs->divider);
24306         clk_put(clk);
24308         for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
24309 @@ -473,7 +521,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
24310  remove_opp:
24311         /* clean-up the already added opp before leaving */
24312         while (load_lvl-- > ARMADA_37XX_DVFS_LOAD_0) {
24313 -               freq = cur_frequency / dvfs->divider[load_lvl];
24314 +               freq = base_frequency / dvfs->divider[load_lvl];
24315                 dev_pm_opp_remove(cpu_dev, freq);
24316         }
24318 diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
24319 index aa39ff31ec9f..b3eae5ec17b2 100644
24320 --- a/drivers/cpufreq/cpufreq_conservative.c
24321 +++ b/drivers/cpufreq/cpufreq_conservative.c
24322 @@ -28,8 +28,8 @@ struct cs_dbs_tuners {
24323  };
24325  /* Conservative governor macros */
24326 -#define DEF_FREQUENCY_UP_THRESHOLD             (80)
24327 -#define DEF_FREQUENCY_DOWN_THRESHOLD           (20)
24328 +#define DEF_FREQUENCY_UP_THRESHOLD             (63)
24329 +#define DEF_FREQUENCY_DOWN_THRESHOLD           (26)
24330  #define DEF_FREQUENCY_STEP                     (5)
24331  #define DEF_SAMPLING_DOWN_FACTOR               (1)
24332  #define MAX_SAMPLING_DOWN_FACTOR               (10)
24333 @@ -47,9 +47,9 @@ static inline unsigned int get_freq_step(struct cs_dbs_tuners *cs_tuners,
24336  /*
24337 - * Every sampling_rate, we check, if current idle time is less than 20%
24338 + * Every sampling_rate, we check, if current idle time is less than 37%
24339   * (default), then we try to increase frequency. Every sampling_rate *
24340 - * sampling_down_factor, we check, if current idle time is more than 80%
24341 + * sampling_down_factor, we check, if current idle time is more than 74%
24342   * (default), then we try to decrease frequency
24343   *
24344   * Frequency updates happen at minimum steps of 5% (default) of maximum
24345 diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
24346 index ac361a8b1d3b..611d80122336 100644
24347 --- a/drivers/cpufreq/cpufreq_ondemand.c
24348 +++ b/drivers/cpufreq/cpufreq_ondemand.c
24349 @@ -18,10 +18,10 @@
24350  #include "cpufreq_ondemand.h"
24352  /* On-demand governor macros */
24353 -#define DEF_FREQUENCY_UP_THRESHOLD             (80)
24354 -#define DEF_SAMPLING_DOWN_FACTOR               (1)
24355 +#define DEF_FREQUENCY_UP_THRESHOLD             (63)
24356 +#define DEF_SAMPLING_DOWN_FACTOR               (100)
24357  #define MAX_SAMPLING_DOWN_FACTOR               (100000)
24358 -#define MICRO_FREQUENCY_UP_THRESHOLD           (95)
24359 +#define MICRO_FREQUENCY_UP_THRESHOLD           (70)
24360  #define MICRO_FREQUENCY_MIN_SAMPLE_RATE                (10000)
24361  #define MIN_FREQUENCY_UP_THRESHOLD             (1)
24362  #define MAX_FREQUENCY_UP_THRESHOLD             (100)
24363 @@ -127,7 +127,7 @@ static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
24366  /*
24367 - * Every sampling_rate, we check, if current idle time is less than 20%
24368 + * Every sampling_rate, we check, if current idle time is less than 37%
24369   * (default), then we try to increase frequency. Else, we adjust the frequency
24370   * proportional to load.
24371   */
24372 diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
24373 index 5175ae3cac44..34196c107de6 100644
24374 --- a/drivers/cpufreq/intel_pstate.c
24375 +++ b/drivers/cpufreq/intel_pstate.c
24376 @@ -3054,6 +3054,14 @@ static const struct x86_cpu_id hwp_support_ids[] __initconst = {
24377         {}
24378  };
24380 +static bool intel_pstate_hwp_is_enabled(void)
24382 +       u64 value;
24384 +       rdmsrl(MSR_PM_ENABLE, value);
24385 +       return !!(value & 0x1);
24388  static int __init intel_pstate_init(void)
24390         const struct x86_cpu_id *id;
24391 @@ -3072,8 +3080,12 @@ static int __init intel_pstate_init(void)
24392                  * Avoid enabling HWP for processors without EPP support,
24393                  * because that means incomplete HWP implementation which is a
24394                  * corner case and supporting it is generally problematic.
24395 +                *
24396 +                * If HWP is enabled already, though, there is no choice but to
24397 +                * deal with it.
24398                  */
24399 -               if (!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) {
24400 +               if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) ||
24401 +                   intel_pstate_hwp_is_enabled()) {
24402                         hwp_active++;
24403                         hwp_mode_bdw = id->driver_data;
24404                         intel_pstate.attr = hwp_cpufreq_attrs;
24405 diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
24406 index 0844fadc4be8..334f83e56120 100644
24407 --- a/drivers/cpuidle/Kconfig.arm
24408 +++ b/drivers/cpuidle/Kconfig.arm
24409 @@ -107,7 +107,7 @@ config ARM_TEGRA_CPUIDLE
24411  config ARM_QCOM_SPM_CPUIDLE
24412         bool "CPU Idle Driver for Qualcomm Subsystem Power Manager (SPM)"
24413 -       depends on (ARCH_QCOM || COMPILE_TEST) && !ARM64
24414 +       depends on (ARCH_QCOM || COMPILE_TEST) && !ARM64 && MMU
24415         select ARM_CPU_SUSPEND
24416         select CPU_IDLE_MULTIPLE_DRIVERS
24417         select DT_IDLE_STATES
24418 diff --git a/drivers/cpuidle/cpuidle-tegra.c b/drivers/cpuidle/cpuidle-tegra.c
24419 index 191966dc8d02..29c5e83500d3 100644
24420 --- a/drivers/cpuidle/cpuidle-tegra.c
24421 +++ b/drivers/cpuidle/cpuidle-tegra.c
24422 @@ -135,13 +135,13 @@ static int tegra_cpuidle_c7_enter(void)
24424         int err;
24426 -       if (tegra_cpuidle_using_firmware()) {
24427 -               err = call_firmware_op(prepare_idle, TF_PM_MODE_LP2_NOFLUSH_L2);
24428 -               if (err)
24429 -                       return err;
24430 +       err = call_firmware_op(prepare_idle, TF_PM_MODE_LP2_NOFLUSH_L2);
24431 +       if (err && err != -ENOSYS)
24432 +               return err;
24434 -               return call_firmware_op(do_idle, 0);
24435 -       }
24436 +       err = call_firmware_op(do_idle, 0);
24437 +       if (err != -ENOSYS)
24438 +               return err;
24440         return cpu_suspend(0, tegra30_pm_secondary_cpu_suspend);
24442 diff --git a/drivers/crypto/allwinner/Kconfig b/drivers/crypto/allwinner/Kconfig
24443 index 856fb2045656..b8e75210a0e3 100644
24444 --- a/drivers/crypto/allwinner/Kconfig
24445 +++ b/drivers/crypto/allwinner/Kconfig
24446 @@ -71,10 +71,10 @@ config CRYPTO_DEV_SUN8I_CE_DEBUG
24447  config CRYPTO_DEV_SUN8I_CE_HASH
24448         bool "Enable support for hash on sun8i-ce"
24449         depends on CRYPTO_DEV_SUN8I_CE
24450 -       select MD5
24451 -       select SHA1
24452 -       select SHA256
24453 -       select SHA512
24454 +       select CRYPTO_MD5
24455 +       select CRYPTO_SHA1
24456 +       select CRYPTO_SHA256
24457 +       select CRYPTO_SHA512
24458         help
24459           Say y to enable support for hash algorithms.
24461 @@ -132,8 +132,8 @@ config CRYPTO_DEV_SUN8I_SS_PRNG
24462  config CRYPTO_DEV_SUN8I_SS_HASH
24463         bool "Enable support for hash on sun8i-ss"
24464         depends on CRYPTO_DEV_SUN8I_SS
24465 -       select MD5
24466 -       select SHA1
24467 -       select SHA256
24468 +       select CRYPTO_MD5
24469 +       select CRYPTO_SHA1
24470 +       select CRYPTO_SHA256
24471         help
24472           Say y to enable support for hash algorithms.
24473 diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
24474 index c2e6f5ed1d79..dec79fa3ebaf 100644
24475 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
24476 +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
24477 @@ -561,7 +561,7 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
24478                                     sizeof(struct sun4i_cipher_req_ctx) +
24479                                     crypto_skcipher_reqsize(op->fallback_tfm));
24481 -       err = pm_runtime_get_sync(op->ss->dev);
24482 +       err = pm_runtime_resume_and_get(op->ss->dev);
24483         if (err < 0)
24484                 goto error_pm;
24486 diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
24487 index 709905ec4680..02a2d34845f2 100644
24488 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
24489 +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
24490 @@ -459,7 +459,7 @@ static int sun4i_ss_probe(struct platform_device *pdev)
24491          * this info could be useful
24492          */
24494 -       err = pm_runtime_get_sync(ss->dev);
24495 +       err = pm_runtime_resume_and_get(ss->dev);
24496         if (err < 0)
24497                 goto error_pm;
24499 diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
24500 index c1b4585e9bbc..d28292762b32 100644
24501 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
24502 +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
24503 @@ -27,7 +27,7 @@ int sun4i_hash_crainit(struct crypto_tfm *tfm)
24504         algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
24505         op->ss = algt->ss;
24507 -       err = pm_runtime_get_sync(op->ss->dev);
24508 +       err = pm_runtime_resume_and_get(op->ss->dev);
24509         if (err < 0)
24510                 return err;
24512 diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
24513 index 443160a114bb..491fcb7b81b4 100644
24514 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
24515 +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
24516 @@ -29,7 +29,7 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
24517         algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);
24518         ss = algt->ss;
24520 -       err = pm_runtime_get_sync(ss->dev);
24521 +       err = pm_runtime_resume_and_get(ss->dev);
24522         if (err < 0)
24523                 return err;
24525 diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
24526 index 158422ff5695..00194d1d9ae6 100644
24527 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
24528 +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
24529 @@ -932,7 +932,7 @@ static int sun8i_ce_probe(struct platform_device *pdev)
24530         if (err)
24531                 goto error_alg;
24533 -       err = pm_runtime_get_sync(ce->dev);
24534 +       err = pm_runtime_resume_and_get(ce->dev);
24535         if (err < 0)
24536                 goto error_alg;
24538 diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
24539 index ed2a69f82e1c..7c355bc2fb06 100644
24540 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
24541 +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
24542 @@ -351,7 +351,7 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
24543         op->enginectx.op.prepare_request = NULL;
24544         op->enginectx.op.unprepare_request = NULL;
24546 -       err = pm_runtime_get_sync(op->ss->dev);
24547 +       err = pm_runtime_resume_and_get(op->ss->dev);
24548         if (err < 0) {
24549                 dev_err(op->ss->dev, "pm error %d\n", err);
24550                 goto error_pm;
24551 diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
24552 index e0ddc684798d..80e89066dbd1 100644
24553 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
24554 +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
24555 @@ -753,7 +753,7 @@ static int sun8i_ss_probe(struct platform_device *pdev)
24556         if (err)
24557                 goto error_alg;
24559 -       err = pm_runtime_get_sync(ss->dev);
24560 +       err = pm_runtime_resume_and_get(ss->dev);
24561         if (err < 0)
24562                 goto error_alg;
24564 diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
24565 index 11cbcbc83a7b..64446b86c927 100644
24566 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
24567 +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
24568 @@ -348,8 +348,10 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
24569         bf = (__le32 *)pad;
24571         result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
24572 -       if (!result)
24573 +       if (!result) {
24574 +               kfree(pad);
24575                 return -ENOMEM;
24576 +       }
24578         for (i = 0; i < MAX_SG; i++) {
24579                 rctx->t_dst[i].addr = 0;
24580 @@ -435,11 +437,10 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
24581         dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
24582         dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE);
24584 -       kfree(pad);
24586         memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
24587 -       kfree(result);
24588  theend:
24589 +       kfree(pad);
24590 +       kfree(result);
24591         crypto_finalize_hash_request(engine, breq, err);
24592         return 0;
24594 diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
24595 index 08a1473b2145..3191527928e4 100644
24596 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
24597 +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
24598 @@ -103,7 +103,8 @@ int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
24599         dma_iv = dma_map_single(ss->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE);
24600         if (dma_mapping_error(ss->dev, dma_iv)) {
24601                 dev_err(ss->dev, "Cannot DMA MAP IV\n");
24602 -               return -EFAULT;
24603 +               err = -EFAULT;
24604 +               goto err_free;
24605         }
24607         dma_dst = dma_map_single(ss->dev, d, todo, DMA_FROM_DEVICE);
24608 @@ -167,6 +168,7 @@ int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
24609                 memcpy(ctx->seed, d + dlen, ctx->slen);
24610         }
24611         memzero_explicit(d, todo);
24612 +err_free:
24613         kfree(d);
24615         return err;
24616 diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
24617 index cb9b4c4e371e..3e0d1d6922ba 100644
24618 --- a/drivers/crypto/ccp/sev-dev.c
24619 +++ b/drivers/crypto/ccp/sev-dev.c
24620 @@ -150,6 +150,9 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
24622         sev = psp->sev_data;
24624 +       if (data && WARN_ON_ONCE(!virt_addr_valid(data)))
24625 +               return -EINVAL;
24627         /* Get the physical address of the command buffer */
24628         phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
24629         phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
24630 @@ -987,7 +990,7 @@ int sev_dev_init(struct psp_device *psp)
24631         if (!sev->vdata) {
24632                 ret = -ENODEV;
24633                 dev_err(dev, "sev: missing driver data\n");
24634 -               goto e_err;
24635 +               goto e_sev;
24636         }
24638         psp_set_sev_irq_handler(psp, sev_irq_handler, sev);
24639 @@ -1002,6 +1005,8 @@ int sev_dev_init(struct psp_device *psp)
24641  e_irq:
24642         psp_clear_sev_irq_handler(psp);
24643 +e_sev:
24644 +       devm_kfree(dev, sev);
24645  e_err:
24646         psp->sev_data = NULL;
24648 diff --git a/drivers/crypto/ccp/tee-dev.c b/drivers/crypto/ccp/tee-dev.c
24649 index 5e697a90ea7f..bcb81fef4211 100644
24650 --- a/drivers/crypto/ccp/tee-dev.c
24651 +++ b/drivers/crypto/ccp/tee-dev.c
24652 @@ -36,6 +36,7 @@ static int tee_alloc_ring(struct psp_tee_device *tee, int ring_size)
24653         if (!start_addr)
24654                 return -ENOMEM;
24656 +       memset(start_addr, 0x0, ring_size);
24657         rb_mgr->ring_start = start_addr;
24658         rb_mgr->ring_size = ring_size;
24659         rb_mgr->ring_pa = __psp_pa(start_addr);
24660 @@ -244,41 +245,54 @@ static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id,
24661                           void *buf, size_t len, struct tee_ring_cmd **resp)
24663         struct tee_ring_cmd *cmd;
24664 -       u32 rptr, wptr;
24665         int nloop = 1000, ret = 0;
24666 +       u32 rptr;
24668         *resp = NULL;
24670         mutex_lock(&tee->rb_mgr.mutex);
24672 -       wptr = tee->rb_mgr.wptr;
24674 -       /* Check if ring buffer is full */
24675 +       /* Loop until empty entry found in ring buffer */
24676         do {
24677 +               /* Get pointer to ring buffer command entry */
24678 +               cmd = (struct tee_ring_cmd *)
24679 +                       (tee->rb_mgr.ring_start + tee->rb_mgr.wptr);
24681                 rptr = ioread32(tee->io_regs + tee->vdata->ring_rptr_reg);
24683 -               if (!(wptr + sizeof(struct tee_ring_cmd) == rptr))
24684 +               /* Check if ring buffer is full or command entry is waiting
24685 +                * for response from TEE
24686 +                */
24687 +               if (!(tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr ||
24688 +                     cmd->flag == CMD_WAITING_FOR_RESPONSE))
24689                         break;
24691 -               dev_info(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
24692 -                        rptr, wptr);
24693 +               dev_dbg(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
24694 +                       rptr, tee->rb_mgr.wptr);
24696 -               /* Wait if ring buffer is full */
24697 +               /* Wait if ring buffer is full or TEE is processing data */
24698                 mutex_unlock(&tee->rb_mgr.mutex);
24699                 schedule_timeout_interruptible(msecs_to_jiffies(10));
24700                 mutex_lock(&tee->rb_mgr.mutex);
24702         } while (--nloop);
24704 -       if (!nloop && (wptr + sizeof(struct tee_ring_cmd) == rptr)) {
24705 -               dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
24706 -                       rptr, wptr);
24707 +       if (!nloop &&
24708 +           (tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr ||
24709 +            cmd->flag == CMD_WAITING_FOR_RESPONSE)) {
24710 +               dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u response flag %u\n",
24711 +                       rptr, tee->rb_mgr.wptr, cmd->flag);
24712                 ret = -EBUSY;
24713                 goto unlock;
24714         }
24716 -       /* Pointer to empty data entry in ring buffer */
24717 -       cmd = (struct tee_ring_cmd *)(tee->rb_mgr.ring_start + wptr);
24718 +       /* Do not submit command if PSP got disabled while processing any
24719 +        * command in another thread
24720 +        */
24721 +       if (psp_dead) {
24722 +               ret = -EBUSY;
24723 +               goto unlock;
24724 +       }
24726         /* Write command data into ring buffer */
24727         cmd->cmd_id = cmd_id;
24728 @@ -286,6 +300,9 @@ static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id,
24729         memset(&cmd->buf[0], 0, sizeof(cmd->buf));
24730         memcpy(&cmd->buf[0], buf, len);
24732 +       /* Indicate driver is waiting for response */
24733 +       cmd->flag = CMD_WAITING_FOR_RESPONSE;
24735         /* Update local copy of write pointer */
24736         tee->rb_mgr.wptr += sizeof(struct tee_ring_cmd);
24737         if (tee->rb_mgr.wptr >= tee->rb_mgr.ring_size)
24738 @@ -353,12 +370,16 @@ int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf, size_t len,
24739                 return ret;
24741         ret = tee_wait_cmd_completion(tee, resp, TEE_DEFAULT_TIMEOUT);
24742 -       if (ret)
24743 +       if (ret) {
24744 +               resp->flag = CMD_RESPONSE_TIMEDOUT;
24745                 return ret;
24746 +       }
24748         memcpy(buf, &resp->buf[0], len);
24749         *status = resp->status;
24751 +       resp->flag = CMD_RESPONSE_COPIED;
24753         return 0;
24755  EXPORT_SYMBOL(psp_tee_process_cmd);
24756 diff --git a/drivers/crypto/ccp/tee-dev.h b/drivers/crypto/ccp/tee-dev.h
24757 index f09960112115..49d26158b71e 100644
24758 --- a/drivers/crypto/ccp/tee-dev.h
24759 +++ b/drivers/crypto/ccp/tee-dev.h
24760 @@ -1,6 +1,6 @@
24761  /* SPDX-License-Identifier: MIT */
24762  /*
24763 - * Copyright 2019 Advanced Micro Devices, Inc.
24764 + * Copyright (C) 2019,2021 Advanced Micro Devices, Inc.
24765   *
24766   * Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
24767   * Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com>
24768 @@ -18,7 +18,7 @@
24769  #include <linux/mutex.h>
24771  #define TEE_DEFAULT_TIMEOUT            10
24772 -#define MAX_BUFFER_SIZE                        992
24773 +#define MAX_BUFFER_SIZE                        988
24775  /**
24776   * enum tee_ring_cmd_id - TEE interface commands for ring buffer configuration
24777 @@ -81,6 +81,20 @@ enum tee_cmd_state {
24778         TEE_CMD_STATE_COMPLETED,
24779  };
24781 +/**
24782 + * enum cmd_resp_state - TEE command's response status maintained by driver
24783 + * @CMD_RESPONSE_INVALID:      initial state when no command is written to ring
24784 + * @CMD_WAITING_FOR_RESPONSE:  driver waiting for response from TEE
24785 + * @CMD_RESPONSE_TIMEDOUT:     failed to get response from TEE
24786 + * @CMD_RESPONSE_COPIED:       driver has copied response from TEE
24787 + */
24788 +enum cmd_resp_state {
24789 +       CMD_RESPONSE_INVALID,
24790 +       CMD_WAITING_FOR_RESPONSE,
24791 +       CMD_RESPONSE_TIMEDOUT,
24792 +       CMD_RESPONSE_COPIED,
24795  /**
24796   * struct tee_ring_cmd - Structure of the command buffer in TEE ring
24797   * @cmd_id:      refers to &enum tee_cmd_id. Command id for the ring buffer
24798 @@ -91,6 +105,7 @@ enum tee_cmd_state {
24799   * @pdata:       private data (currently unused)
24800   * @res1:        reserved region
24801   * @buf:         TEE command specific buffer
24802 + * @flag:       refers to &enum cmd_resp_state
24803   */
24804  struct tee_ring_cmd {
24805         u32 cmd_id;
24806 @@ -100,6 +115,7 @@ struct tee_ring_cmd {
24807         u64 pdata;
24808         u32 res1[2];
24809         u8 buf[MAX_BUFFER_SIZE];
24810 +       u32 flag;
24812         /* Total size: 1024 bytes */
24813  } __packed;
24814 diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
24815 index f5a336634daa..405ff957b837 100644
24816 --- a/drivers/crypto/chelsio/chcr_algo.c
24817 +++ b/drivers/crypto/chelsio/chcr_algo.c
24818 @@ -769,13 +769,14 @@ static inline void create_wreq(struct chcr_context *ctx,
24819         struct uld_ctx *u_ctx = ULD_CTX(ctx);
24820         unsigned int tx_channel_id, rx_channel_id;
24821         unsigned int txqidx = 0, rxqidx = 0;
24822 -       unsigned int qid, fid;
24823 +       unsigned int qid, fid, portno;
24825         get_qidxs(req, &txqidx, &rxqidx);
24826         qid = u_ctx->lldi.rxq_ids[rxqidx];
24827         fid = u_ctx->lldi.rxq_ids[0];
24828 +       portno = rxqidx / ctx->rxq_perchan;
24829         tx_channel_id = txqidx / ctx->txq_perchan;
24830 -       rx_channel_id = rxqidx / ctx->rxq_perchan;
24831 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[portno]);
24834         chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
24835 @@ -806,6 +807,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
24837         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
24838         struct chcr_context *ctx = c_ctx(tfm);
24839 +       struct uld_ctx *u_ctx = ULD_CTX(ctx);
24840         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
24841         struct sk_buff *skb = NULL;
24842         struct chcr_wr *chcr_req;
24843 @@ -822,6 +824,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
24844         struct adapter *adap = padap(ctx->dev);
24845         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
24847 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
24848         nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
24849                               reqctx->dst_ofst);
24850         dst_size = get_space_for_phys_dsgl(nents);
24851 @@ -1580,6 +1583,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
24852         int error = 0;
24853         unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
24855 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
24856         transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
24857         req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
24858                                 param->sg_len) <= SGE_MAX_WR_LEN;
24859 @@ -2438,6 +2442,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
24861         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
24862         struct chcr_context *ctx = a_ctx(tfm);
24863 +       struct uld_ctx *u_ctx = ULD_CTX(ctx);
24864         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
24865         struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
24866         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
24867 @@ -2457,6 +2462,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
24868         struct adapter *adap = padap(ctx->dev);
24869         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
24871 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
24872         if (req->cryptlen == 0)
24873                 return NULL;
24875 @@ -2710,9 +2716,11 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
24876         struct dsgl_walk dsgl_walk;
24877         unsigned int authsize = crypto_aead_authsize(tfm);
24878         struct chcr_context *ctx = a_ctx(tfm);
24879 +       struct uld_ctx *u_ctx = ULD_CTX(ctx);
24880         u32 temp;
24881         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
24883 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
24884         dsgl_walk_init(&dsgl_walk, phys_cpl);
24885         dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
24886         temp = req->assoclen + req->cryptlen +
24887 @@ -2752,9 +2760,11 @@ void chcr_add_cipher_dst_ent(struct skcipher_request *req,
24888         struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
24889         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
24890         struct chcr_context *ctx = c_ctx(tfm);
24891 +       struct uld_ctx *u_ctx = ULD_CTX(ctx);
24892         struct dsgl_walk dsgl_walk;
24893         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
24895 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
24896         dsgl_walk_init(&dsgl_walk, phys_cpl);
24897         dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
24898                          reqctx->dst_ofst);
24899 @@ -2958,6 +2968,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
24901         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
24902         struct chcr_context *ctx = a_ctx(tfm);
24903 +       struct uld_ctx *u_ctx = ULD_CTX(ctx);
24904         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
24905         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
24906         unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
24907 @@ -2967,6 +2978,8 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
24908         unsigned int tag_offset = 0, auth_offset = 0;
24909         unsigned int assoclen;
24911 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
24913         if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
24914                 assoclen = req->assoclen - 8;
24915         else
24916 @@ -3127,6 +3140,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
24918         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
24919         struct chcr_context *ctx = a_ctx(tfm);
24920 +       struct uld_ctx *u_ctx = ULD_CTX(ctx);
24921         struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
24922         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
24923         struct sk_buff *skb = NULL;
24924 @@ -3143,6 +3157,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
24925         struct adapter *adap = padap(ctx->dev);
24926         unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
24928 +       rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
24929         if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
24930                 assoclen = req->assoclen - 8;
24932 diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
24933 index 2eaa516b3231..8adcbb327126 100644
24934 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
24935 +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
24936 @@ -546,7 +546,7 @@ static int sec_skcipher_init(struct crypto_skcipher *tfm)
24937         crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
24938         ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
24939         if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
24940 -               dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n");
24941 +               pr_err("get error skcipher iv size!\n");
24942                 return -EINVAL;
24943         }
24945 diff --git a/drivers/crypto/keembay/keembay-ocs-aes-core.c b/drivers/crypto/keembay/keembay-ocs-aes-core.c
24946 index b6b25d994af3..2ef312866338 100644
24947 --- a/drivers/crypto/keembay/keembay-ocs-aes-core.c
24948 +++ b/drivers/crypto/keembay/keembay-ocs-aes-core.c
24949 @@ -1649,8 +1649,10 @@ static int kmb_ocs_aes_probe(struct platform_device *pdev)
24951         /* Initialize crypto engine */
24952         aes_dev->engine = crypto_engine_alloc_init(dev, true);
24953 -       if (!aes_dev->engine)
24954 +       if (!aes_dev->engine) {
24955 +               rc = -ENOMEM;
24956                 goto list_del;
24957 +       }
24959         rc = crypto_engine_start(aes_dev->engine);
24960         if (rc) {
24961 diff --git a/drivers/crypto/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/keembay/keembay-ocs-hcu-core.c
24962 index c4b97b4160e9..322c51a6936f 100644
24963 --- a/drivers/crypto/keembay/keembay-ocs-hcu-core.c
24964 +++ b/drivers/crypto/keembay/keembay-ocs-hcu-core.c
24965 @@ -1220,8 +1220,10 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev)
24967         /* Initialize crypto engine */
24968         hcu_dev->engine = crypto_engine_alloc_init(dev, 1);
24969 -       if (!hcu_dev->engine)
24970 +       if (!hcu_dev->engine) {
24971 +               rc = -ENOMEM;
24972                 goto list_del;
24973 +       }
24975         rc = crypto_engine_start(hcu_dev->engine);
24976         if (rc) {
24977 diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
24978 index a45bdcf3026d..0dd4c6b157de 100644
24979 --- a/drivers/crypto/omap-aes.c
24980 +++ b/drivers/crypto/omap-aes.c
24981 @@ -103,9 +103,8 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd)
24982                 dd->err = 0;
24983         }
24985 -       err = pm_runtime_get_sync(dd->dev);
24986 +       err = pm_runtime_resume_and_get(dd->dev);
24987         if (err < 0) {
24988 -               pm_runtime_put_noidle(dd->dev);
24989                 dev_err(dd->dev, "failed to get sync: %d\n", err);
24990                 return err;
24991         }
24992 @@ -1134,7 +1133,7 @@ static int omap_aes_probe(struct platform_device *pdev)
24993         pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
24995         pm_runtime_enable(dev);
24996 -       err = pm_runtime_get_sync(dev);
24997 +       err = pm_runtime_resume_and_get(dev);
24998         if (err < 0) {
24999                 dev_err(dev, "%s: failed to get_sync(%d)\n",
25000                         __func__, err);
25001 @@ -1303,7 +1302,7 @@ static int omap_aes_suspend(struct device *dev)
25003  static int omap_aes_resume(struct device *dev)
25005 -       pm_runtime_get_sync(dev);
25006 +       pm_runtime_resume_and_get(dev);
25007         return 0;
25009  #endif
25010 diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
25011 index 1d1532e8fb6d..067ca5e17d38 100644
25012 --- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
25013 +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
25014 @@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
25015         if (ret)
25016                 goto out_err_free_reg;
25018 -       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
25020         ret = adf_dev_init(accel_dev);
25021         if (ret)
25022                 goto out_err_dev_shutdown;
25024 +       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
25026         ret = adf_dev_start(accel_dev);
25027         if (ret)
25028                 goto out_err_dev_stop;
25029 diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
25030 index 04742a6d91ca..51ea88c0b17d 100644
25031 --- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
25032 +++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
25033 @@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
25034         if (ret)
25035                 goto out_err_free_reg;
25037 -       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
25039         ret = adf_dev_init(accel_dev);
25040         if (ret)
25041                 goto out_err_dev_shutdown;
25043 +       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
25045         ret = adf_dev_start(accel_dev);
25046         if (ret)
25047                 goto out_err_dev_stop;
25048 diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
25049 index c45853463530..e3ad5587be49 100644
25050 --- a/drivers/crypto/qat/qat_common/adf_isr.c
25051 +++ b/drivers/crypto/qat/qat_common/adf_isr.c
25052 @@ -291,19 +291,32 @@ int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
25054         ret = adf_isr_alloc_msix_entry_table(accel_dev);
25055         if (ret)
25056 -               return ret;
25057 -       if (adf_enable_msix(accel_dev))
25058                 goto err_out;
25060 -       if (adf_setup_bh(accel_dev))
25061 -               goto err_out;
25062 +       ret = adf_enable_msix(accel_dev);
25063 +       if (ret)
25064 +               goto err_free_msix_table;
25066 -       if (adf_request_irqs(accel_dev))
25067 -               goto err_out;
25068 +       ret = adf_setup_bh(accel_dev);
25069 +       if (ret)
25070 +               goto err_disable_msix;
25072 +       ret = adf_request_irqs(accel_dev);
25073 +       if (ret)
25074 +               goto err_cleanup_bh;
25076         return 0;
25078 +err_cleanup_bh:
25079 +       adf_cleanup_bh(accel_dev);
25081 +err_disable_msix:
25082 +       adf_disable_msix(&accel_dev->accel_pci_dev);
25084 +err_free_msix_table:
25085 +       adf_isr_free_msix_entry_table(accel_dev);
25087  err_out:
25088 -       adf_isr_resource_free(accel_dev);
25089 -       return -EFAULT;
25090 +       return ret;
25092  EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
25093 diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
25094 index 888c1e047295..8ba28409fb74 100644
25095 --- a/drivers/crypto/qat/qat_common/adf_transport.c
25096 +++ b/drivers/crypto/qat/qat_common/adf_transport.c
25097 @@ -172,6 +172,7 @@ static int adf_init_ring(struct adf_etr_ring_data *ring)
25098                 dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n");
25099                 dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
25100                                   ring->base_addr, ring->dma_addr);
25101 +               ring->base_addr = NULL;
25102                 return -EFAULT;
25103         }
25105 diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
25106 index 38d316a42ba6..888388acb6bd 100644
25107 --- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
25108 +++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
25109 @@ -261,17 +261,26 @@ int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
25110                 goto err_out;
25112         if (adf_setup_pf2vf_bh(accel_dev))
25113 -               goto err_out;
25114 +               goto err_disable_msi;
25116         if (adf_setup_bh(accel_dev))
25117 -               goto err_out;
25118 +               goto err_cleanup_pf2vf_bh;
25120         if (adf_request_msi_irq(accel_dev))
25121 -               goto err_out;
25122 +               goto err_cleanup_bh;
25124         return 0;
25126 +err_cleanup_bh:
25127 +       adf_cleanup_bh(accel_dev);
25129 +err_cleanup_pf2vf_bh:
25130 +       adf_cleanup_pf2vf_bh(accel_dev);
25132 +err_disable_msi:
25133 +       adf_disable_msi(accel_dev);
25135  err_out:
25136 -       adf_vf_isr_resource_free(accel_dev);
25137         return -EFAULT;
25139  EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
25140 diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
25141 index ff78c73c47e3..ea1c6899290d 100644
25142 --- a/drivers/crypto/qat/qat_common/qat_algs.c
25143 +++ b/drivers/crypto/qat/qat_common/qat_algs.c
25144 @@ -719,7 +719,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
25145         struct qat_alg_buf_list *bufl;
25146         struct qat_alg_buf_list *buflout = NULL;
25147         dma_addr_t blp;
25148 -       dma_addr_t bloutp = 0;
25149 +       dma_addr_t bloutp;
25150         struct scatterlist *sg;
25151         size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
25153 @@ -731,6 +731,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
25154         if (unlikely(!bufl))
25155                 return -ENOMEM;
25157 +       for_each_sg(sgl, sg, n, i)
25158 +               bufl->bufers[i].addr = DMA_MAPPING_ERROR;
25160         blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
25161         if (unlikely(dma_mapping_error(dev, blp)))
25162                 goto err_in;
25163 @@ -764,10 +767,14 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
25164                                        dev_to_node(&GET_DEV(inst->accel_dev)));
25165                 if (unlikely(!buflout))
25166                         goto err_in;
25168 +               bufers = buflout->bufers;
25169 +               for_each_sg(sglout, sg, n, i)
25170 +                       bufers[i].addr = DMA_MAPPING_ERROR;
25172                 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
25173                 if (unlikely(dma_mapping_error(dev, bloutp)))
25174                         goto err_out;
25175 -               bufers = buflout->bufers;
25176                 for_each_sg(sglout, sg, n, i) {
25177                         int y = sg_nctr;
25179 diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
25180 index c972554a755e..29999da716cc 100644
25181 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
25182 +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
25183 @@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
25184         if (ret)
25185                 goto out_err_free_reg;
25187 -       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
25189         ret = adf_dev_init(accel_dev);
25190         if (ret)
25191                 goto out_err_dev_shutdown;
25193 +       set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
25195         ret = adf_dev_start(accel_dev);
25196         if (ret)
25197                 goto out_err_dev_stop;
25198 diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
25199 index f300b0a5958a..b0f0502a5bb0 100644
25200 --- a/drivers/crypto/sa2ul.c
25201 +++ b/drivers/crypto/sa2ul.c
25202 @@ -1146,8 +1146,10 @@ static int sa_run(struct sa_req *req)
25203                 mapped_sg->sgt.sgl = src;
25204                 mapped_sg->sgt.orig_nents = src_nents;
25205                 ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
25206 -               if (ret)
25207 +               if (ret) {
25208 +                       kfree(rxd);
25209                         return ret;
25210 +               }
25212                 mapped_sg->dir = dir_src;
25213                 mapped_sg->mapped = true;
25214 @@ -1155,8 +1157,10 @@ static int sa_run(struct sa_req *req)
25215                 mapped_sg->sgt.sgl = req->src;
25216                 mapped_sg->sgt.orig_nents = sg_nents;
25217                 ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
25218 -               if (ret)
25219 +               if (ret) {
25220 +                       kfree(rxd);
25221                         return ret;
25222 +               }
25224                 mapped_sg->dir = dir_src;
25225                 mapped_sg->mapped = true;
25226 @@ -2350,7 +2354,7 @@ static int sa_ul_probe(struct platform_device *pdev)
25227         dev_set_drvdata(sa_k3_dev, dev_data);
25229         pm_runtime_enable(dev);
25230 -       ret = pm_runtime_get_sync(dev);
25231 +       ret = pm_runtime_resume_and_get(dev);
25232         if (ret < 0) {
25233                 dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
25234                         ret);
25235 diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
25236 index 2a4793176c71..7389a0536ff0 100644
25237 --- a/drivers/crypto/stm32/stm32-cryp.c
25238 +++ b/drivers/crypto/stm32/stm32-cryp.c
25239 @@ -542,7 +542,7 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
25240         int ret;
25241         u32 cfg, hw_mode;
25243 -       pm_runtime_get_sync(cryp->dev);
25244 +       pm_runtime_resume_and_get(cryp->dev);
25246         /* Disable interrupt */
25247         stm32_cryp_write(cryp, CRYP_IMSCR, 0);
25248 @@ -2043,7 +2043,7 @@ static int stm32_cryp_remove(struct platform_device *pdev)
25249         if (!cryp)
25250                 return -ENODEV;
25252 -       ret = pm_runtime_get_sync(cryp->dev);
25253 +       ret = pm_runtime_resume_and_get(cryp->dev);
25254         if (ret < 0)
25255                 return ret;
25257 diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
25258 index 7ac0573ef663..389de9e3302d 100644
25259 --- a/drivers/crypto/stm32/stm32-hash.c
25260 +++ b/drivers/crypto/stm32/stm32-hash.c
25261 @@ -813,7 +813,7 @@ static void stm32_hash_finish_req(struct ahash_request *req, int err)
25262  static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
25263                               struct stm32_hash_request_ctx *rctx)
25265 -       pm_runtime_get_sync(hdev->dev);
25266 +       pm_runtime_resume_and_get(hdev->dev);
25268         if (!(HASH_FLAGS_INIT & hdev->flags)) {
25269                 stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
25270 @@ -962,7 +962,7 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
25271         u32 *preg;
25272         unsigned int i;
25274 -       pm_runtime_get_sync(hdev->dev);
25275 +       pm_runtime_resume_and_get(hdev->dev);
25277         while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY))
25278                 cpu_relax();
25279 @@ -1000,7 +1000,7 @@ static int stm32_hash_import(struct ahash_request *req, const void *in)
25281         preg = rctx->hw_context;
25283 -       pm_runtime_get_sync(hdev->dev);
25284 +       pm_runtime_resume_and_get(hdev->dev);
25286         stm32_hash_write(hdev, HASH_IMR, *preg++);
25287         stm32_hash_write(hdev, HASH_STR, *preg++);
25288 @@ -1566,7 +1566,7 @@ static int stm32_hash_remove(struct platform_device *pdev)
25289         if (!hdev)
25290                 return -ENODEV;
25292 -       ret = pm_runtime_get_sync(hdev->dev);
25293 +       ret = pm_runtime_resume_and_get(hdev->dev);
25294         if (ret < 0)
25295                 return ret;
25297 diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
25298 index bf3047896e41..59ba59bea0f5 100644
25299 --- a/drivers/devfreq/devfreq.c
25300 +++ b/drivers/devfreq/devfreq.c
25301 @@ -387,7 +387,7 @@ static int devfreq_set_target(struct devfreq *devfreq, unsigned long new_freq,
25302         devfreq->previous_freq = new_freq;
25304         if (devfreq->suspend_freq)
25305 -               devfreq->resume_freq = cur_freq;
25306 +               devfreq->resume_freq = new_freq;
25308         return err;
25310 @@ -821,7 +821,8 @@ struct devfreq *devfreq_add_device(struct device *dev,
25312         if (devfreq->profile->timer < 0
25313                 || devfreq->profile->timer >= DEVFREQ_TIMER_NUM) {
25314 -               goto err_out;
25315 +               mutex_unlock(&devfreq->lock);
25316 +               goto err_dev;
25317         }
25319         if (!devfreq->profile->max_state && !devfreq->profile->freq_table) {
25320 diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
25321 index 08d71dafa001..58c8cc8fe0e1 100644
25322 --- a/drivers/dma/dw-edma/dw-edma-core.c
25323 +++ b/drivers/dma/dw-edma/dw-edma-core.c
25324 @@ -937,22 +937,21 @@ int dw_edma_remove(struct dw_edma_chip *chip)
25325         /* Power management */
25326         pm_runtime_disable(dev);
25328 +       /* Deregister eDMA device */
25329 +       dma_async_device_unregister(&dw->wr_edma);
25330         list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels,
25331                                  vc.chan.device_node) {
25332 -               list_del(&chan->vc.chan.device_node);
25333                 tasklet_kill(&chan->vc.task);
25334 +               list_del(&chan->vc.chan.device_node);
25335         }
25337 +       dma_async_device_unregister(&dw->rd_edma);
25338         list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels,
25339                                  vc.chan.device_node) {
25340 -               list_del(&chan->vc.chan.device_node);
25341                 tasklet_kill(&chan->vc.task);
25342 +               list_del(&chan->vc.chan.device_node);
25343         }
25345 -       /* Deregister eDMA device */
25346 -       dma_async_device_unregister(&dw->wr_edma);
25347 -       dma_async_device_unregister(&dw->rd_edma);
25349         /* Turn debugfs off */
25350         dw_edma_v0_core_debugfs_off();
25352 diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
25353 index 0db9b82ed8cf..1d8a3876b745 100644
25354 --- a/drivers/dma/idxd/cdev.c
25355 +++ b/drivers/dma/idxd/cdev.c
25356 @@ -39,15 +39,15 @@ struct idxd_user_context {
25357         struct iommu_sva *sva;
25358  };
25360 -enum idxd_cdev_cleanup {
25361 -       CDEV_NORMAL = 0,
25362 -       CDEV_FAILED,
25365  static void idxd_cdev_dev_release(struct device *dev)
25367 -       dev_dbg(dev, "releasing cdev device\n");
25368 -       kfree(dev);
25369 +       struct idxd_cdev *idxd_cdev = container_of(dev, struct idxd_cdev, dev);
25370 +       struct idxd_cdev_context *cdev_ctx;
25371 +       struct idxd_wq *wq = idxd_cdev->wq;
25373 +       cdev_ctx = &ictx[wq->idxd->type];
25374 +       ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
25375 +       kfree(idxd_cdev);
25378  static struct device_type idxd_cdev_device_type = {
25379 @@ -62,14 +62,11 @@ static inline struct idxd_cdev *inode_idxd_cdev(struct inode *inode)
25380         return container_of(cdev, struct idxd_cdev, cdev);
25383 -static inline struct idxd_wq *idxd_cdev_wq(struct idxd_cdev *idxd_cdev)
25385 -       return container_of(idxd_cdev, struct idxd_wq, idxd_cdev);
25388  static inline struct idxd_wq *inode_wq(struct inode *inode)
25390 -       return idxd_cdev_wq(inode_idxd_cdev(inode));
25391 +       struct idxd_cdev *idxd_cdev = inode_idxd_cdev(inode);
25393 +       return idxd_cdev->wq;
25396  static int idxd_cdev_open(struct inode *inode, struct file *filp)
25397 @@ -220,11 +217,10 @@ static __poll_t idxd_cdev_poll(struct file *filp,
25398         struct idxd_user_context *ctx = filp->private_data;
25399         struct idxd_wq *wq = ctx->wq;
25400         struct idxd_device *idxd = wq->idxd;
25401 -       struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
25402         unsigned long flags;
25403         __poll_t out = 0;
25405 -       poll_wait(filp, &idxd_cdev->err_queue, wait);
25406 +       poll_wait(filp, &wq->err_queue, wait);
25407         spin_lock_irqsave(&idxd->dev_lock, flags);
25408         if (idxd->sw_err.valid)
25409                 out = EPOLLIN | EPOLLRDNORM;
25410 @@ -246,98 +242,67 @@ int idxd_cdev_get_major(struct idxd_device *idxd)
25411         return MAJOR(ictx[idxd->type].devt);
25414 -static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq)
25415 +int idxd_wq_add_cdev(struct idxd_wq *wq)
25417         struct idxd_device *idxd = wq->idxd;
25418 -       struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
25419 -       struct idxd_cdev_context *cdev_ctx;
25420 +       struct idxd_cdev *idxd_cdev;
25421 +       struct cdev *cdev;
25422         struct device *dev;
25423 -       int minor, rc;
25424 +       struct idxd_cdev_context *cdev_ctx;
25425 +       int rc, minor;
25427 -       idxd_cdev->dev = kzalloc(sizeof(*idxd_cdev->dev), GFP_KERNEL);
25428 -       if (!idxd_cdev->dev)
25429 +       idxd_cdev = kzalloc(sizeof(*idxd_cdev), GFP_KERNEL);
25430 +       if (!idxd_cdev)
25431                 return -ENOMEM;
25433 -       dev = idxd_cdev->dev;
25434 -       dev->parent = &idxd->pdev->dev;
25435 -       dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd),
25436 -                    idxd->id, wq->id);
25437 -       dev->bus = idxd_get_bus_type(idxd);
25439 +       idxd_cdev->wq = wq;
25440 +       cdev = &idxd_cdev->cdev;
25441 +       dev = &idxd_cdev->dev;
25442         cdev_ctx = &ictx[wq->idxd->type];
25443         minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
25444         if (minor < 0) {
25445 -               rc = minor;
25446 -               kfree(dev);
25447 -               goto ida_err;
25448 -       }
25450 -       dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
25451 -       dev->type = &idxd_cdev_device_type;
25452 -       rc = device_register(dev);
25453 -       if (rc < 0) {
25454 -               dev_err(&idxd->pdev->dev, "device register failed\n");
25455 -               goto dev_reg_err;
25456 +               kfree(idxd_cdev);
25457 +               return minor;
25458         }
25459         idxd_cdev->minor = minor;
25461 -       return 0;
25463 - dev_reg_err:
25464 -       ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt));
25465 -       put_device(dev);
25466 - ida_err:
25467 -       idxd_cdev->dev = NULL;
25468 -       return rc;
25471 -static void idxd_wq_cdev_cleanup(struct idxd_wq *wq,
25472 -                                enum idxd_cdev_cleanup cdev_state)
25474 -       struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
25475 -       struct idxd_cdev_context *cdev_ctx;
25477 -       cdev_ctx = &ictx[wq->idxd->type];
25478 -       if (cdev_state == CDEV_NORMAL)
25479 -               cdev_del(&idxd_cdev->cdev);
25480 -       device_unregister(idxd_cdev->dev);
25481 -       /*
25482 -        * The device_type->release() will be called on the device and free
25483 -        * the allocated struct device. We can just forget it.
25484 -        */
25485 -       ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
25486 -       idxd_cdev->dev = NULL;
25487 -       idxd_cdev->minor = -1;
25490 -int idxd_wq_add_cdev(struct idxd_wq *wq)
25492 -       struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
25493 -       struct cdev *cdev = &idxd_cdev->cdev;
25494 -       struct device *dev;
25495 -       int rc;
25496 +       device_initialize(dev);
25497 +       dev->parent = &wq->conf_dev;
25498 +       dev->bus = idxd_get_bus_type(idxd);
25499 +       dev->type = &idxd_cdev_device_type;
25500 +       dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
25502 -       rc = idxd_wq_cdev_dev_setup(wq);
25503 +       rc = dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd),
25504 +                         idxd->id, wq->id);
25505         if (rc < 0)
25506 -               return rc;
25507 +               goto err;
25509 -       dev = idxd_cdev->dev;
25510 +       wq->idxd_cdev = idxd_cdev;
25511         cdev_init(cdev, &idxd_cdev_fops);
25512 -       cdev_set_parent(cdev, &dev->kobj);
25513 -       rc = cdev_add(cdev, dev->devt, 1);
25514 +       rc = cdev_device_add(cdev, dev);
25515         if (rc) {
25516                 dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc);
25517 -               idxd_wq_cdev_cleanup(wq, CDEV_FAILED);
25518 -               return rc;
25519 +               goto err;
25520         }
25522 -       init_waitqueue_head(&idxd_cdev->err_queue);
25523         return 0;
25525 + err:
25526 +       put_device(dev);
25527 +       wq->idxd_cdev = NULL;
25528 +       return rc;
25531  void idxd_wq_del_cdev(struct idxd_wq *wq)
25533 -       idxd_wq_cdev_cleanup(wq, CDEV_NORMAL);
25534 +       struct idxd_cdev *idxd_cdev;
25535 +       struct idxd_cdev_context *cdev_ctx;
25537 +       cdev_ctx = &ictx[wq->idxd->type];
25538 +       idxd_cdev = wq->idxd_cdev;
25539 +       wq->idxd_cdev = NULL;
25540 +       cdev_device_del(&idxd_cdev->cdev, &idxd_cdev->dev);
25541 +       put_device(&idxd_cdev->dev);
25544  int idxd_cdev_register(void)
25545 diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
25546 index 31c819544a22..4fef57717049 100644
25547 --- a/drivers/dma/idxd/device.c
25548 +++ b/drivers/dma/idxd/device.c
25549 @@ -19,7 +19,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
25550  /* Interrupt control bits */
25551  void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
25553 -       struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
25554 +       struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
25556         pci_msi_mask_irq(data);
25558 @@ -36,7 +36,7 @@ void idxd_mask_msix_vectors(struct idxd_device *idxd)
25560  void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
25562 -       struct irq_data *data = irq_get_irq_data(idxd->msix_entries[vec_id].vector);
25563 +       struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
25565         pci_msi_unmask_irq(data);
25567 @@ -186,8 +186,6 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
25568                 desc->id = i;
25569                 desc->wq = wq;
25570                 desc->cpu = -1;
25571 -               dma_async_tx_descriptor_init(&desc->txd, &wq->dma_chan);
25572 -               desc->txd.tx_submit = idxd_dma_tx_submit;
25573         }
25575         return 0;
25576 @@ -451,7 +449,8 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
25578         if (idxd_device_is_halted(idxd)) {
25579                 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
25580 -               *status = IDXD_CMDSTS_HW_ERR;
25581 +               if (status)
25582 +                       *status = IDXD_CMDSTS_HW_ERR;
25583                 return;
25584         }
25586 @@ -521,7 +520,7 @@ void idxd_device_wqs_clear_state(struct idxd_device *idxd)
25587         lockdep_assert_held(&idxd->dev_lock);
25589         for (i = 0; i < idxd->max_wqs; i++) {
25590 -               struct idxd_wq *wq = &idxd->wqs[i];
25591 +               struct idxd_wq *wq = idxd->wqs[i];
25593                 if (wq->state == IDXD_WQ_ENABLED) {
25594                         idxd_wq_disable_cleanup(wq);
25595 @@ -660,7 +659,7 @@ static int idxd_groups_config_write(struct idxd_device *idxd)
25596                 ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
25598         for (i = 0; i < idxd->max_groups; i++) {
25599 -               struct idxd_group *group = &idxd->groups[i];
25600 +               struct idxd_group *group = idxd->groups[i];
25602                 idxd_group_config_write(group);
25603         }
25604 @@ -739,7 +738,7 @@ static int idxd_wqs_config_write(struct idxd_device *idxd)
25605         int i, rc;
25607         for (i = 0; i < idxd->max_wqs; i++) {
25608 -               struct idxd_wq *wq = &idxd->wqs[i];
25609 +               struct idxd_wq *wq = idxd->wqs[i];
25611                 rc = idxd_wq_config_write(wq);
25612                 if (rc < 0)
25613 @@ -755,7 +754,7 @@ static void idxd_group_flags_setup(struct idxd_device *idxd)
25615         /* TC-A 0 and TC-B 1 should be defaults */
25616         for (i = 0; i < idxd->max_groups; i++) {
25617 -               struct idxd_group *group = &idxd->groups[i];
25618 +               struct idxd_group *group = idxd->groups[i];
25620                 if (group->tc_a == -1)
25621                         group->tc_a = group->grpcfg.flags.tc_a = 0;
25622 @@ -782,12 +781,12 @@ static int idxd_engines_setup(struct idxd_device *idxd)
25623         struct idxd_group *group;
25625         for (i = 0; i < idxd->max_groups; i++) {
25626 -               group = &idxd->groups[i];
25627 +               group = idxd->groups[i];
25628                 group->grpcfg.engines = 0;
25629         }
25631         for (i = 0; i < idxd->max_engines; i++) {
25632 -               eng = &idxd->engines[i];
25633 +               eng = idxd->engines[i];
25634                 group = eng->group;
25636                 if (!group)
25637 @@ -811,13 +810,13 @@ static int idxd_wqs_setup(struct idxd_device *idxd)
25638         struct device *dev = &idxd->pdev->dev;
25640         for (i = 0; i < idxd->max_groups; i++) {
25641 -               group = &idxd->groups[i];
25642 +               group = idxd->groups[i];
25643                 for (j = 0; j < 4; j++)
25644                         group->grpcfg.wqs[j] = 0;
25645         }
25647         for (i = 0; i < idxd->max_wqs; i++) {
25648 -               wq = &idxd->wqs[i];
25649 +               wq = idxd->wqs[i];
25650                 group = wq->group;
25652                 if (!wq->group)
25653 diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
25654 index a15e50126434..77439b645044 100644
25655 --- a/drivers/dma/idxd/dma.c
25656 +++ b/drivers/dma/idxd/dma.c
25657 @@ -14,7 +14,10 @@
25659  static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
25661 -       return container_of(c, struct idxd_wq, dma_chan);
25662 +       struct idxd_dma_chan *idxd_chan;
25664 +       idxd_chan = container_of(c, struct idxd_dma_chan, chan);
25665 +       return idxd_chan->wq;
25668  void idxd_dma_complete_txd(struct idxd_desc *desc,
25669 @@ -135,7 +138,7 @@ static void idxd_dma_issue_pending(struct dma_chan *dma_chan)
25673 -dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
25674 +static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
25676         struct dma_chan *c = tx->chan;
25677         struct idxd_wq *wq = to_idxd_wq(c);
25678 @@ -156,14 +159,25 @@ dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
25680  static void idxd_dma_release(struct dma_device *device)
25682 +       struct idxd_dma_dev *idxd_dma = container_of(device, struct idxd_dma_dev, dma);
25684 +       kfree(idxd_dma);
25687  int idxd_register_dma_device(struct idxd_device *idxd)
25689 -       struct dma_device *dma = &idxd->dma_dev;
25690 +       struct idxd_dma_dev *idxd_dma;
25691 +       struct dma_device *dma;
25692 +       struct device *dev = &idxd->pdev->dev;
25693 +       int rc;
25695 +       idxd_dma = kzalloc_node(sizeof(*idxd_dma), GFP_KERNEL, dev_to_node(dev));
25696 +       if (!idxd_dma)
25697 +               return -ENOMEM;
25699 +       dma = &idxd_dma->dma;
25700         INIT_LIST_HEAD(&dma->channels);
25701 -       dma->dev = &idxd->pdev->dev;
25702 +       dma->dev = dev;
25704         dma_cap_set(DMA_PRIVATE, dma->cap_mask);
25705         dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
25706 @@ -179,35 +193,72 @@ int idxd_register_dma_device(struct idxd_device *idxd)
25707         dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources;
25708         dma->device_free_chan_resources = idxd_dma_free_chan_resources;
25710 -       return dma_async_device_register(&idxd->dma_dev);
25711 +       rc = dma_async_device_register(dma);
25712 +       if (rc < 0) {
25713 +               kfree(idxd_dma);
25714 +               return rc;
25715 +       }
25717 +       idxd_dma->idxd = idxd;
25718 +       /*
25719 +        * This pointer is protected by the refs taken by the dma_chan. It will remain valid
25720 +        * as long as there are outstanding channels.
25721 +        */
25722 +       idxd->idxd_dma = idxd_dma;
25723 +       return 0;
25726  void idxd_unregister_dma_device(struct idxd_device *idxd)
25728 -       dma_async_device_unregister(&idxd->dma_dev);
25729 +       dma_async_device_unregister(&idxd->idxd_dma->dma);
25732  int idxd_register_dma_channel(struct idxd_wq *wq)
25734         struct idxd_device *idxd = wq->idxd;
25735 -       struct dma_device *dma = &idxd->dma_dev;
25736 -       struct dma_chan *chan = &wq->dma_chan;
25737 -       int rc;
25738 +       struct dma_device *dma = &idxd->idxd_dma->dma;
25739 +       struct device *dev = &idxd->pdev->dev;
25740 +       struct idxd_dma_chan *idxd_chan;
25741 +       struct dma_chan *chan;
25742 +       int rc, i;
25744 +       idxd_chan = kzalloc_node(sizeof(*idxd_chan), GFP_KERNEL, dev_to_node(dev));
25745 +       if (!idxd_chan)
25746 +               return -ENOMEM;
25748 -       memset(&wq->dma_chan, 0, sizeof(struct dma_chan));
25749 +       chan = &idxd_chan->chan;
25750         chan->device = dma;
25751         list_add_tail(&chan->device_node, &dma->channels);
25753 +       for (i = 0; i < wq->num_descs; i++) {
25754 +               struct idxd_desc *desc = wq->descs[i];
25756 +               dma_async_tx_descriptor_init(&desc->txd, chan);
25757 +               desc->txd.tx_submit = idxd_dma_tx_submit;
25758 +       }
25760         rc = dma_async_device_channel_register(dma, chan);
25761 -       if (rc < 0)
25762 +       if (rc < 0) {
25763 +               kfree(idxd_chan);
25764                 return rc;
25765 +       }
25767 +       wq->idxd_chan = idxd_chan;
25768 +       idxd_chan->wq = wq;
25769 +       get_device(&wq->conf_dev);
25771         return 0;
25774  void idxd_unregister_dma_channel(struct idxd_wq *wq)
25776 -       struct dma_chan *chan = &wq->dma_chan;
25777 +       struct idxd_dma_chan *idxd_chan = wq->idxd_chan;
25778 +       struct dma_chan *chan = &idxd_chan->chan;
25779 +       struct idxd_dma_dev *idxd_dma = wq->idxd->idxd_dma;
25781 -       dma_async_device_channel_unregister(&wq->idxd->dma_dev, chan);
25782 +       dma_async_device_channel_unregister(&idxd_dma->dma, chan);
25783         list_del(&chan->device_node);
25784 +       kfree(wq->idxd_chan);
25785 +       wq->idxd_chan = NULL;
25786 +       put_device(&wq->conf_dev);
25788 diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
25789 index 76014c14f473..89daf746d121 100644
25790 --- a/drivers/dma/idxd/idxd.h
25791 +++ b/drivers/dma/idxd/idxd.h
25792 @@ -8,12 +8,16 @@
25793  #include <linux/percpu-rwsem.h>
25794  #include <linux/wait.h>
25795  #include <linux/cdev.h>
25796 +#include <linux/idr.h>
25797  #include "registers.h"
25799  #define IDXD_DRIVER_VERSION    "1.00"
25801  extern struct kmem_cache *idxd_desc_pool;
25803 +struct idxd_device;
25804 +struct idxd_wq;
25806  #define IDXD_REG_TIMEOUT       50
25807  #define IDXD_DRAIN_TIMEOUT     5000
25809 @@ -33,6 +37,7 @@ struct idxd_device_driver {
25810  struct idxd_irq_entry {
25811         struct idxd_device *idxd;
25812         int id;
25813 +       int vector;
25814         struct llist_head pending_llist;
25815         struct list_head work_list;
25816         /*
25817 @@ -75,10 +80,10 @@ enum idxd_wq_type {
25818  };
25820  struct idxd_cdev {
25821 +       struct idxd_wq *wq;
25822         struct cdev cdev;
25823 -       struct device *dev;
25824 +       struct device dev;
25825         int minor;
25826 -       struct wait_queue_head err_queue;
25827  };
25829  #define IDXD_ALLOCATED_BATCH_SIZE      128U
25830 @@ -96,10 +101,16 @@ enum idxd_complete_type {
25831         IDXD_COMPLETE_DEV_FAIL,
25832  };
25834 +struct idxd_dma_chan {
25835 +       struct dma_chan chan;
25836 +       struct idxd_wq *wq;
25839  struct idxd_wq {
25840         void __iomem *portal;
25841         struct device conf_dev;
25842 -       struct idxd_cdev idxd_cdev;
25843 +       struct idxd_cdev *idxd_cdev;
25844 +       struct wait_queue_head err_queue;
25845         struct idxd_device *idxd;
25846         int id;
25847         enum idxd_wq_type type;
25848 @@ -125,7 +136,7 @@ struct idxd_wq {
25849         int compls_size;
25850         struct idxd_desc **descs;
25851         struct sbitmap_queue sbq;
25852 -       struct dma_chan dma_chan;
25853 +       struct idxd_dma_chan *idxd_chan;
25854         char name[WQ_NAME_SIZE + 1];
25855         u64 max_xfer_bytes;
25856         u32 max_batch_size;
25857 @@ -162,6 +173,11 @@ enum idxd_device_flag {
25858         IDXD_FLAG_PASID_ENABLED,
25859  };
25861 +struct idxd_dma_dev {
25862 +       struct idxd_device *idxd;
25863 +       struct dma_device dma;
25866  struct idxd_device {
25867         enum idxd_type type;
25868         struct device conf_dev;
25869 @@ -178,9 +194,9 @@ struct idxd_device {
25871         spinlock_t dev_lock;    /* spinlock for device */
25872         struct completion *cmd_done;
25873 -       struct idxd_group *groups;
25874 -       struct idxd_wq *wqs;
25875 -       struct idxd_engine *engines;
25876 +       struct idxd_group **groups;
25877 +       struct idxd_wq **wqs;
25878 +       struct idxd_engine **engines;
25880         struct iommu_sva *sva;
25881         unsigned int pasid;
25882 @@ -206,11 +222,10 @@ struct idxd_device {
25884         union sw_err_reg sw_err;
25885         wait_queue_head_t cmd_waitq;
25886 -       struct msix_entry *msix_entries;
25887         int num_wq_irqs;
25888         struct idxd_irq_entry *irq_entries;
25890 -       struct dma_device dma_dev;
25891 +       struct idxd_dma_dev *idxd_dma;
25892         struct workqueue_struct *wq;
25893         struct work_struct work;
25894  };
25895 @@ -242,6 +257,43 @@ extern struct bus_type dsa_bus_type;
25896  extern struct bus_type iax_bus_type;
25898  extern bool support_enqcmd;
25899 +extern struct device_type dsa_device_type;
25900 +extern struct device_type iax_device_type;
25901 +extern struct device_type idxd_wq_device_type;
25902 +extern struct device_type idxd_engine_device_type;
25903 +extern struct device_type idxd_group_device_type;
25905 +static inline bool is_dsa_dev(struct device *dev)
25907 +       return dev->type == &dsa_device_type;
25910 +static inline bool is_iax_dev(struct device *dev)
25912 +       return dev->type == &iax_device_type;
25915 +static inline bool is_idxd_dev(struct device *dev)
25917 +       return is_dsa_dev(dev) || is_iax_dev(dev);
25920 +static inline bool is_idxd_wq_dev(struct device *dev)
25922 +       return dev->type == &idxd_wq_device_type;
25925 +static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
25927 +       if (wq->type == IDXD_WQT_KERNEL && strcmp(wq->name, "dmaengine") == 0)
25928 +               return true;
25929 +       return false;
25932 +static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
25934 +       return wq->type == IDXD_WQT_USER;
25937  static inline bool wq_dedicated(struct idxd_wq *wq)
25939 @@ -279,18 +331,6 @@ static inline int idxd_get_wq_portal_full_offset(int wq_id,
25940         return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
25943 -static inline void idxd_set_type(struct idxd_device *idxd)
25945 -       struct pci_dev *pdev = idxd->pdev;
25947 -       if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
25948 -               idxd->type = IDXD_TYPE_DSA;
25949 -       else if (pdev->device == PCI_DEVICE_ID_INTEL_IAX_SPR0)
25950 -               idxd->type = IDXD_TYPE_IAX;
25951 -       else
25952 -               idxd->type = IDXD_TYPE_UNKNOWN;
25955  static inline void idxd_wq_get(struct idxd_wq *wq)
25957         wq->client_count++;
25958 @@ -306,14 +346,16 @@ static inline int idxd_wq_refcount(struct idxd_wq *wq)
25959         return wq->client_count;
25960  };
25962 +struct ida *idxd_ida(struct idxd_device *idxd);
25963  const char *idxd_get_dev_name(struct idxd_device *idxd);
25964  int idxd_register_bus_type(void);
25965  void idxd_unregister_bus_type(void);
25966 -int idxd_setup_sysfs(struct idxd_device *idxd);
25967 -void idxd_cleanup_sysfs(struct idxd_device *idxd);
25968 +int idxd_register_devices(struct idxd_device *idxd);
25969 +void idxd_unregister_devices(struct idxd_device *idxd);
25970  int idxd_register_driver(void);
25971  void idxd_unregister_driver(void);
25972  struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
25973 +struct device_type *idxd_get_device_type(struct idxd_device *idxd);
25975  /* device interrupt control */
25976  void idxd_msix_perm_setup(struct idxd_device *idxd);
25977 @@ -363,7 +405,6 @@ void idxd_unregister_dma_channel(struct idxd_wq *wq);
25978  void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
25979  void idxd_dma_complete_txd(struct idxd_desc *desc,
25980                            enum idxd_complete_type comp_type);
25981 -dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx);
25983  /* cdev */
25984  int idxd_cdev_register(void);
25985 diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
25986 index 6584b0ec07d5..07cf7977a045 100644
25987 --- a/drivers/dma/idxd/init.c
25988 +++ b/drivers/dma/idxd/init.c
25989 @@ -34,8 +34,7 @@ MODULE_PARM_DESC(sva, "Toggle SVA support on/off");
25991  bool support_enqcmd;
25993 -static struct idr idxd_idrs[IDXD_TYPE_MAX];
25994 -static DEFINE_MUTEX(idxd_idr_lock);
25995 +static struct ida idxd_idas[IDXD_TYPE_MAX];
25997  static struct pci_device_id idxd_pci_tbl[] = {
25998         /* DSA ver 1.0 platforms */
25999 @@ -52,6 +51,11 @@ static char *idxd_name[] = {
26000         "iax"
26001  };
26003 +struct ida *idxd_ida(struct idxd_device *idxd)
26005 +       return &idxd_idas[idxd->type];
26008  const char *idxd_get_dev_name(struct idxd_device *idxd)
26010         return idxd_name[idxd->type];
26011 @@ -61,7 +65,6 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
26013         struct pci_dev *pdev = idxd->pdev;
26014         struct device *dev = &pdev->dev;
26015 -       struct msix_entry *msix;
26016         struct idxd_irq_entry *irq_entry;
26017         int i, msixcnt;
26018         int rc = 0;
26019 @@ -69,23 +72,13 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
26020         msixcnt = pci_msix_vec_count(pdev);
26021         if (msixcnt < 0) {
26022                 dev_err(dev, "Not MSI-X interrupt capable.\n");
26023 -               goto err_no_irq;
26024 -       }
26026 -       idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) *
26027 -                       msixcnt, GFP_KERNEL);
26028 -       if (!idxd->msix_entries) {
26029 -               rc = -ENOMEM;
26030 -               goto err_no_irq;
26031 +               return -ENOSPC;
26032         }
26034 -       for (i = 0; i < msixcnt; i++)
26035 -               idxd->msix_entries[i].entry = i;
26037 -       rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt);
26038 -       if (rc) {
26039 -               dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt);
26040 -               goto err_no_irq;
26041 +       rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX);
26042 +       if (rc != msixcnt) {
26043 +               dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc);
26044 +               return -ENOSPC;
26045         }
26046         dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
26048 @@ -93,119 +86,236 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
26049          * We implement 1 completion list per MSI-X entry except for
26050          * entry 0, which is for errors and others.
26051          */
26052 -       idxd->irq_entries = devm_kcalloc(dev, msixcnt,
26053 -                                        sizeof(struct idxd_irq_entry),
26054 -                                        GFP_KERNEL);
26055 +       idxd->irq_entries = kcalloc_node(msixcnt, sizeof(struct idxd_irq_entry),
26056 +                                        GFP_KERNEL, dev_to_node(dev));
26057         if (!idxd->irq_entries) {
26058                 rc = -ENOMEM;
26059 -               goto err_no_irq;
26060 +               goto err_irq_entries;
26061         }
26063         for (i = 0; i < msixcnt; i++) {
26064                 idxd->irq_entries[i].id = i;
26065                 idxd->irq_entries[i].idxd = idxd;
26066 +               idxd->irq_entries[i].vector = pci_irq_vector(pdev, i);
26067                 spin_lock_init(&idxd->irq_entries[i].list_lock);
26068         }
26070 -       msix = &idxd->msix_entries[0];
26071         irq_entry = &idxd->irq_entries[0];
26072 -       rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler,
26073 -                                      idxd_misc_thread, 0, "idxd-misc",
26074 -                                      irq_entry);
26075 +       rc = request_threaded_irq(irq_entry->vector, idxd_irq_handler, idxd_misc_thread,
26076 +                                 0, "idxd-misc", irq_entry);
26077         if (rc < 0) {
26078                 dev_err(dev, "Failed to allocate misc interrupt.\n");
26079 -               goto err_no_irq;
26080 +               goto err_misc_irq;
26081         }
26083 -       dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n",
26084 -               msix->vector);
26085 +       dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", irq_entry->vector);
26087         /* first MSI-X entry is not for wq interrupts */
26088         idxd->num_wq_irqs = msixcnt - 1;
26090         for (i = 1; i < msixcnt; i++) {
26091 -               msix = &idxd->msix_entries[i];
26092                 irq_entry = &idxd->irq_entries[i];
26094                 init_llist_head(&idxd->irq_entries[i].pending_llist);
26095                 INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
26096 -               rc = devm_request_threaded_irq(dev, msix->vector,
26097 -                                              idxd_irq_handler,
26098 -                                              idxd_wq_thread, 0,
26099 -                                              "idxd-portal", irq_entry);
26100 +               rc = request_threaded_irq(irq_entry->vector, idxd_irq_handler,
26101 +                                         idxd_wq_thread, 0, "idxd-portal", irq_entry);
26102                 if (rc < 0) {
26103 -                       dev_err(dev, "Failed to allocate irq %d.\n",
26104 -                               msix->vector);
26105 -                       goto err_no_irq;
26106 +                       dev_err(dev, "Failed to allocate irq %d.\n", irq_entry->vector);
26107 +                       goto err_wq_irqs;
26108                 }
26109 -               dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n",
26110 -                       i, msix->vector);
26111 +               dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, irq_entry->vector);
26112         }
26114         idxd_unmask_error_interrupts(idxd);
26115         idxd_msix_perm_setup(idxd);
26116         return 0;
26118 - err_no_irq:
26119 + err_wq_irqs:
26120 +       while (--i >= 0) {
26121 +               irq_entry = &idxd->irq_entries[i];
26122 +               free_irq(irq_entry->vector, irq_entry);
26123 +       }
26124 + err_misc_irq:
26125         /* Disable error interrupt generation */
26126         idxd_mask_error_interrupts(idxd);
26127 -       pci_disable_msix(pdev);
26128 + err_irq_entries:
26129 +       pci_free_irq_vectors(pdev);
26130         dev_err(dev, "No usable interrupts\n");
26131         return rc;
26134 -static int idxd_setup_internals(struct idxd_device *idxd)
26135 +static int idxd_setup_wqs(struct idxd_device *idxd)
26137         struct device *dev = &idxd->pdev->dev;
26138 -       int i;
26140 -       init_waitqueue_head(&idxd->cmd_waitq);
26141 -       idxd->groups = devm_kcalloc(dev, idxd->max_groups,
26142 -                                   sizeof(struct idxd_group), GFP_KERNEL);
26143 -       if (!idxd->groups)
26144 -               return -ENOMEM;
26146 -       for (i = 0; i < idxd->max_groups; i++) {
26147 -               idxd->groups[i].idxd = idxd;
26148 -               idxd->groups[i].id = i;
26149 -               idxd->groups[i].tc_a = -1;
26150 -               idxd->groups[i].tc_b = -1;
26151 -       }
26152 +       struct idxd_wq *wq;
26153 +       int i, rc;
26155 -       idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq),
26156 -                                GFP_KERNEL);
26157 +       idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
26158 +                                GFP_KERNEL, dev_to_node(dev));
26159         if (!idxd->wqs)
26160                 return -ENOMEM;
26162 -       idxd->engines = devm_kcalloc(dev, idxd->max_engines,
26163 -                                    sizeof(struct idxd_engine), GFP_KERNEL);
26164 -       if (!idxd->engines)
26165 -               return -ENOMEM;
26167         for (i = 0; i < idxd->max_wqs; i++) {
26168 -               struct idxd_wq *wq = &idxd->wqs[i];
26169 +               wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
26170 +               if (!wq) {
26171 +                       rc = -ENOMEM;
26172 +                       goto err;
26173 +               }
26175                 wq->id = i;
26176                 wq->idxd = idxd;
26177 +               device_initialize(&wq->conf_dev);
26178 +               wq->conf_dev.parent = &idxd->conf_dev;
26179 +               wq->conf_dev.bus = idxd_get_bus_type(idxd);
26180 +               wq->conf_dev.type = &idxd_wq_device_type;
26181 +               rc = dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
26182 +               if (rc < 0) {
26183 +                       put_device(&wq->conf_dev);
26184 +                       goto err;
26185 +               }
26187                 mutex_init(&wq->wq_lock);
26188 -               wq->idxd_cdev.minor = -1;
26189 +               init_waitqueue_head(&wq->err_queue);
26190                 wq->max_xfer_bytes = idxd->max_xfer_bytes;
26191                 wq->max_batch_size = idxd->max_batch_size;
26192 -               wq->wqcfg = devm_kzalloc(dev, idxd->wqcfg_size, GFP_KERNEL);
26193 -               if (!wq->wqcfg)
26194 -                       return -ENOMEM;
26195 +               wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
26196 +               if (!wq->wqcfg) {
26197 +                       put_device(&wq->conf_dev);
26198 +                       rc = -ENOMEM;
26199 +                       goto err;
26200 +               }
26201 +               idxd->wqs[i] = wq;
26202         }
26204 +       return 0;
26206 + err:
26207 +       while (--i >= 0)
26208 +               put_device(&idxd->wqs[i]->conf_dev);
26209 +       return rc;
26212 +static int idxd_setup_engines(struct idxd_device *idxd)
26214 +       struct idxd_engine *engine;
26215 +       struct device *dev = &idxd->pdev->dev;
26216 +       int i, rc;
26218 +       idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *),
26219 +                                    GFP_KERNEL, dev_to_node(dev));
26220 +       if (!idxd->engines)
26221 +               return -ENOMEM;
26223         for (i = 0; i < idxd->max_engines; i++) {
26224 -               idxd->engines[i].idxd = idxd;
26225 -               idxd->engines[i].id = i;
26226 +               engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev));
26227 +               if (!engine) {
26228 +                       rc = -ENOMEM;
26229 +                       goto err;
26230 +               }
26232 +               engine->id = i;
26233 +               engine->idxd = idxd;
26234 +               device_initialize(&engine->conf_dev);
26235 +               engine->conf_dev.parent = &idxd->conf_dev;
26236 +               engine->conf_dev.type = &idxd_engine_device_type;
26237 +               rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id);
26238 +               if (rc < 0) {
26239 +                       put_device(&engine->conf_dev);
26240 +                       goto err;
26241 +               }
26243 +               idxd->engines[i] = engine;
26244         }
26246 -       idxd->wq = create_workqueue(dev_name(dev));
26247 -       if (!idxd->wq)
26248 +       return 0;
26250 + err:
26251 +       while (--i >= 0)
26252 +               put_device(&idxd->engines[i]->conf_dev);
26253 +       return rc;
26256 +static int idxd_setup_groups(struct idxd_device *idxd)
26258 +       struct device *dev = &idxd->pdev->dev;
26259 +       struct idxd_group *group;
26260 +       int i, rc;
26262 +       idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *),
26263 +                                   GFP_KERNEL, dev_to_node(dev));
26264 +       if (!idxd->groups)
26265                 return -ENOMEM;
26267 +       for (i = 0; i < idxd->max_groups; i++) {
26268 +               group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev));
26269 +               if (!group) {
26270 +                       rc = -ENOMEM;
26271 +                       goto err;
26272 +               }
26274 +               group->id = i;
26275 +               group->idxd = idxd;
26276 +               device_initialize(&group->conf_dev);
26277 +               group->conf_dev.parent = &idxd->conf_dev;
26278 +               group->conf_dev.bus = idxd_get_bus_type(idxd);
26279 +               group->conf_dev.type = &idxd_group_device_type;
26280 +               rc = dev_set_name(&group->conf_dev, "group%d.%d", idxd->id, group->id);
26281 +               if (rc < 0) {
26282 +                       put_device(&group->conf_dev);
26283 +                       goto err;
26284 +               }
26286 +               idxd->groups[i] = group;
26287 +               group->tc_a = -1;
26288 +               group->tc_b = -1;
26289 +       }
26291 +       return 0;
26293 + err:
26294 +       while (--i >= 0)
26295 +               put_device(&idxd->groups[i]->conf_dev);
26296 +       return rc;
26299 +static int idxd_setup_internals(struct idxd_device *idxd)
26301 +       struct device *dev = &idxd->pdev->dev;
26302 +       int rc, i;
26304 +       init_waitqueue_head(&idxd->cmd_waitq);
26306 +       rc = idxd_setup_wqs(idxd);
26307 +       if (rc < 0)
26308 +               return rc;
26310 +       rc = idxd_setup_engines(idxd);
26311 +       if (rc < 0)
26312 +               goto err_engine;
26314 +       rc = idxd_setup_groups(idxd);
26315 +       if (rc < 0)
26316 +               goto err_group;
26318 +       idxd->wq = create_workqueue(dev_name(dev));
26319 +       if (!idxd->wq) {
26320 +               rc = -ENOMEM;
26321 +               goto err_wkq_create;
26322 +       }
26324         return 0;
26326 + err_wkq_create:
26327 +       for (i = 0; i < idxd->max_groups; i++)
26328 +               put_device(&idxd->groups[i]->conf_dev);
26329 + err_group:
26330 +       for (i = 0; i < idxd->max_engines; i++)
26331 +               put_device(&idxd->engines[i]->conf_dev);
26332 + err_engine:
26333 +       for (i = 0; i < idxd->max_wqs; i++)
26334 +               put_device(&idxd->wqs[i]->conf_dev);
26335 +       return rc;
26338  static void idxd_read_table_offsets(struct idxd_device *idxd)
26339 @@ -275,16 +385,44 @@ static void idxd_read_caps(struct idxd_device *idxd)
26340         }
26343 +static inline void idxd_set_type(struct idxd_device *idxd)
26345 +       struct pci_dev *pdev = idxd->pdev;
26347 +       if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
26348 +               idxd->type = IDXD_TYPE_DSA;
26349 +       else if (pdev->device == PCI_DEVICE_ID_INTEL_IAX_SPR0)
26350 +               idxd->type = IDXD_TYPE_IAX;
26351 +       else
26352 +               idxd->type = IDXD_TYPE_UNKNOWN;
26355  static struct idxd_device *idxd_alloc(struct pci_dev *pdev)
26357         struct device *dev = &pdev->dev;
26358         struct idxd_device *idxd;
26359 +       int rc;
26361 -       idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL);
26362 +       idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev));
26363         if (!idxd)
26364                 return NULL;
26366         idxd->pdev = pdev;
26367 +       idxd_set_type(idxd);
26368 +       idxd->id = ida_alloc(idxd_ida(idxd), GFP_KERNEL);
26369 +       if (idxd->id < 0)
26370 +               return NULL;
26372 +       device_initialize(&idxd->conf_dev);
26373 +       idxd->conf_dev.parent = dev;
26374 +       idxd->conf_dev.bus = idxd_get_bus_type(idxd);
26375 +       idxd->conf_dev.type = idxd_get_device_type(idxd);
26376 +       rc = dev_set_name(&idxd->conf_dev, "%s%d", idxd_get_dev_name(idxd), idxd->id);
26377 +       if (rc < 0) {
26378 +               put_device(&idxd->conf_dev);
26379 +               return NULL;
26380 +       }
26382         spin_lock_init(&idxd->dev_lock);
26384         return idxd;
26385 @@ -352,31 +490,20 @@ static int idxd_probe(struct idxd_device *idxd)
26387         rc = idxd_setup_internals(idxd);
26388         if (rc)
26389 -               goto err_setup;
26390 +               goto err;
26392         rc = idxd_setup_interrupts(idxd);
26393         if (rc)
26394 -               goto err_setup;
26395 +               goto err;
26397         dev_dbg(dev, "IDXD interrupt setup complete.\n");
26399 -       mutex_lock(&idxd_idr_lock);
26400 -       idxd->id = idr_alloc(&idxd_idrs[idxd->type], idxd, 0, 0, GFP_KERNEL);
26401 -       mutex_unlock(&idxd_idr_lock);
26402 -       if (idxd->id < 0) {
26403 -               rc = -ENOMEM;
26404 -               goto err_idr_fail;
26405 -       }
26407         idxd->major = idxd_cdev_get_major(idxd);
26409         dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
26410         return 0;
26412 - err_idr_fail:
26413 -       idxd_mask_error_interrupts(idxd);
26414 -       idxd_mask_msix_vectors(idxd);
26415 - err_setup:
26416 + err:
26417         if (device_pasid_enabled(idxd))
26418                 idxd_disable_system_pasid(idxd);
26419         return rc;
26420 @@ -396,34 +523,37 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
26421         struct idxd_device *idxd;
26422         int rc;
26424 -       rc = pcim_enable_device(pdev);
26425 +       rc = pci_enable_device(pdev);
26426         if (rc)
26427                 return rc;
26429         dev_dbg(dev, "Alloc IDXD context\n");
26430         idxd = idxd_alloc(pdev);
26431 -       if (!idxd)
26432 -               return -ENOMEM;
26433 +       if (!idxd) {
26434 +               rc = -ENOMEM;
26435 +               goto err_idxd_alloc;
26436 +       }
26438         dev_dbg(dev, "Mapping BARs\n");
26439 -       idxd->reg_base = pcim_iomap(pdev, IDXD_MMIO_BAR, 0);
26440 -       if (!idxd->reg_base)
26441 -               return -ENOMEM;
26442 +       idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
26443 +       if (!idxd->reg_base) {
26444 +               rc = -ENOMEM;
26445 +               goto err_iomap;
26446 +       }
26448         dev_dbg(dev, "Set DMA masks\n");
26449         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
26450         if (rc)
26451                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
26452         if (rc)
26453 -               return rc;
26454 +               goto err;
26456         rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
26457         if (rc)
26458                 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
26459         if (rc)
26460 -               return rc;
26461 +               goto err;
26463 -       idxd_set_type(idxd);
26465         idxd_type_init(idxd);
26467 @@ -435,13 +565,13 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
26468         rc = idxd_probe(idxd);
26469         if (rc) {
26470                 dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
26471 -               return -ENODEV;
26472 +               goto err;
26473         }
26475 -       rc = idxd_setup_sysfs(idxd);
26476 +       rc = idxd_register_devices(idxd);
26477         if (rc) {
26478                 dev_err(dev, "IDXD sysfs setup failed\n");
26479 -               return -ENODEV;
26480 +               goto err;
26481         }
26483         idxd->state = IDXD_DEV_CONF_READY;
26484 @@ -450,6 +580,14 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
26485                  idxd->hw.version);
26487         return 0;
26489 + err:
26490 +       pci_iounmap(pdev, idxd->reg_base);
26491 + err_iomap:
26492 +       put_device(&idxd->conf_dev);
26493 + err_idxd_alloc:
26494 +       pci_disable_device(pdev);
26495 +       return rc;
26498  static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
26499 @@ -495,7 +633,8 @@ static void idxd_shutdown(struct pci_dev *pdev)
26501         for (i = 0; i < msixcnt; i++) {
26502                 irq_entry = &idxd->irq_entries[i];
26503 -               synchronize_irq(idxd->msix_entries[i].vector);
26504 +               synchronize_irq(irq_entry->vector);
26505 +               free_irq(irq_entry->vector, irq_entry);
26506                 if (i == 0)
26507                         continue;
26508                 idxd_flush_pending_llist(irq_entry);
26509 @@ -503,6 +642,9 @@ static void idxd_shutdown(struct pci_dev *pdev)
26510         }
26512         idxd_msix_perm_clear(idxd);
26513 +       pci_free_irq_vectors(pdev);
26514 +       pci_iounmap(pdev, idxd->reg_base);
26515 +       pci_disable_device(pdev);
26516         destroy_workqueue(idxd->wq);
26519 @@ -511,13 +653,10 @@ static void idxd_remove(struct pci_dev *pdev)
26520         struct idxd_device *idxd = pci_get_drvdata(pdev);
26522         dev_dbg(&pdev->dev, "%s called\n", __func__);
26523 -       idxd_cleanup_sysfs(idxd);
26524         idxd_shutdown(pdev);
26525         if (device_pasid_enabled(idxd))
26526                 idxd_disable_system_pasid(idxd);
26527 -       mutex_lock(&idxd_idr_lock);
26528 -       idr_remove(&idxd_idrs[idxd->type], idxd->id);
26529 -       mutex_unlock(&idxd_idr_lock);
26530 +       idxd_unregister_devices(idxd);
26533  static struct pci_driver idxd_pci_driver = {
26534 @@ -547,7 +686,7 @@ static int __init idxd_init_module(void)
26535                 support_enqcmd = true;
26537         for (i = 0; i < IDXD_TYPE_MAX; i++)
26538 -               idr_init(&idxd_idrs[i]);
26539 +               ida_init(&idxd_idas[i]);
26541         err = idxd_register_bus_type();
26542         if (err < 0)
26543 diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
26544 index f1463fc58112..fc0781e3f36d 100644
26545 --- a/drivers/dma/idxd/irq.c
26546 +++ b/drivers/dma/idxd/irq.c
26547 @@ -45,7 +45,7 @@ static void idxd_device_reinit(struct work_struct *work)
26548                 goto out;
26550         for (i = 0; i < idxd->max_wqs; i++) {
26551 -               struct idxd_wq *wq = &idxd->wqs[i];
26552 +               struct idxd_wq *wq = idxd->wqs[i];
26554                 if (wq->state == IDXD_WQ_ENABLED) {
26555                         rc = idxd_wq_enable(wq);
26556 @@ -130,18 +130,18 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
26558                 if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
26559                         int id = idxd->sw_err.wq_idx;
26560 -                       struct idxd_wq *wq = &idxd->wqs[id];
26561 +                       struct idxd_wq *wq = idxd->wqs[id];
26563                         if (wq->type == IDXD_WQT_USER)
26564 -                               wake_up_interruptible(&wq->idxd_cdev.err_queue);
26565 +                               wake_up_interruptible(&wq->err_queue);
26566                 } else {
26567                         int i;
26569                         for (i = 0; i < idxd->max_wqs; i++) {
26570 -                               struct idxd_wq *wq = &idxd->wqs[i];
26571 +                               struct idxd_wq *wq = idxd->wqs[i];
26573                                 if (wq->type == IDXD_WQT_USER)
26574 -                                       wake_up_interruptible(&wq->idxd_cdev.err_queue);
26575 +                                       wake_up_interruptible(&wq->err_queue);
26576                         }
26577                 }
26579 diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
26580 index 18bf4d148989..9586b55abce5 100644
26581 --- a/drivers/dma/idxd/sysfs.c
26582 +++ b/drivers/dma/idxd/sysfs.c
26583 @@ -16,69 +16,6 @@ static char *idxd_wq_type_names[] = {
26584         [IDXD_WQT_USER]         = "user",
26585  };
26587 -static void idxd_conf_device_release(struct device *dev)
26589 -       dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev));
26592 -static struct device_type idxd_group_device_type = {
26593 -       .name = "group",
26594 -       .release = idxd_conf_device_release,
26597 -static struct device_type idxd_wq_device_type = {
26598 -       .name = "wq",
26599 -       .release = idxd_conf_device_release,
26602 -static struct device_type idxd_engine_device_type = {
26603 -       .name = "engine",
26604 -       .release = idxd_conf_device_release,
26607 -static struct device_type dsa_device_type = {
26608 -       .name = "dsa",
26609 -       .release = idxd_conf_device_release,
26612 -static struct device_type iax_device_type = {
26613 -       .name = "iax",
26614 -       .release = idxd_conf_device_release,
26617 -static inline bool is_dsa_dev(struct device *dev)
26619 -       return dev ? dev->type == &dsa_device_type : false;
26622 -static inline bool is_iax_dev(struct device *dev)
26624 -       return dev ? dev->type == &iax_device_type : false;
26627 -static inline bool is_idxd_dev(struct device *dev)
26629 -       return is_dsa_dev(dev) || is_iax_dev(dev);
26632 -static inline bool is_idxd_wq_dev(struct device *dev)
26634 -       return dev ? dev->type == &idxd_wq_device_type : false;
26637 -static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
26639 -       if (wq->type == IDXD_WQT_KERNEL &&
26640 -           strcmp(wq->name, "dmaengine") == 0)
26641 -               return true;
26642 -       return false;
26645 -static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
26647 -       return wq->type == IDXD_WQT_USER;
26650  static int idxd_config_bus_match(struct device *dev,
26651                                  struct device_driver *drv)
26653 @@ -322,7 +259,7 @@ static int idxd_config_bus_remove(struct device *dev)
26654                 dev_dbg(dev, "%s removing dev %s\n", __func__,
26655                         dev_name(&idxd->conf_dev));
26656                 for (i = 0; i < idxd->max_wqs; i++) {
26657 -                       struct idxd_wq *wq = &idxd->wqs[i];
26658 +                       struct idxd_wq *wq = idxd->wqs[i];
26660                         if (wq->state == IDXD_WQ_DISABLED)
26661                                 continue;
26662 @@ -334,7 +271,7 @@ static int idxd_config_bus_remove(struct device *dev)
26663                 idxd_unregister_dma_device(idxd);
26664                 rc = idxd_device_disable(idxd);
26665                 for (i = 0; i < idxd->max_wqs; i++) {
26666 -                       struct idxd_wq *wq = &idxd->wqs[i];
26667 +                       struct idxd_wq *wq = idxd->wqs[i];
26669                         mutex_lock(&wq->wq_lock);
26670                         idxd_wq_disable_cleanup(wq);
26671 @@ -405,7 +342,7 @@ struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
26672         return idxd_bus_types[idxd->type];
26675 -static struct device_type *idxd_get_device_type(struct idxd_device *idxd)
26676 +struct device_type *idxd_get_device_type(struct idxd_device *idxd)
26678         if (idxd->type == IDXD_TYPE_DSA)
26679                 return &dsa_device_type;
26680 @@ -488,7 +425,7 @@ static ssize_t engine_group_id_store(struct device *dev,
26682         if (prevg)
26683                 prevg->num_engines--;
26684 -       engine->group = &idxd->groups[id];
26685 +       engine->group = idxd->groups[id];
26686         engine->group->num_engines++;
26688         return count;
26689 @@ -512,6 +449,19 @@ static const struct attribute_group *idxd_engine_attribute_groups[] = {
26690         NULL,
26691  };
26693 +static void idxd_conf_engine_release(struct device *dev)
26695 +       struct idxd_engine *engine = container_of(dev, struct idxd_engine, conf_dev);
26697 +       kfree(engine);
26700 +struct device_type idxd_engine_device_type = {
26701 +       .name = "engine",
26702 +       .release = idxd_conf_engine_release,
26703 +       .groups = idxd_engine_attribute_groups,
26706  /* Group attributes */
26708  static void idxd_set_free_tokens(struct idxd_device *idxd)
26709 @@ -519,7 +469,7 @@ static void idxd_set_free_tokens(struct idxd_device *idxd)
26710         int i, tokens;
26712         for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
26713 -               struct idxd_group *g = &idxd->groups[i];
26714 +               struct idxd_group *g = idxd->groups[i];
26716                 tokens += g->tokens_reserved;
26717         }
26718 @@ -674,7 +624,7 @@ static ssize_t group_engines_show(struct device *dev,
26719         struct idxd_device *idxd = group->idxd;
26721         for (i = 0; i < idxd->max_engines; i++) {
26722 -               struct idxd_engine *engine = &idxd->engines[i];
26723 +               struct idxd_engine *engine = idxd->engines[i];
26725                 if (!engine->group)
26726                         continue;
26727 @@ -703,7 +653,7 @@ static ssize_t group_work_queues_show(struct device *dev,
26728         struct idxd_device *idxd = group->idxd;
26730         for (i = 0; i < idxd->max_wqs; i++) {
26731 -               struct idxd_wq *wq = &idxd->wqs[i];
26732 +               struct idxd_wq *wq = idxd->wqs[i];
26734                 if (!wq->group)
26735                         continue;
26736 @@ -824,6 +774,19 @@ static const struct attribute_group *idxd_group_attribute_groups[] = {
26737         NULL,
26738  };
26740 +static void idxd_conf_group_release(struct device *dev)
26742 +       struct idxd_group *group = container_of(dev, struct idxd_group, conf_dev);
26744 +       kfree(group);
26747 +struct device_type idxd_group_device_type = {
26748 +       .name = "group",
26749 +       .release = idxd_conf_group_release,
26750 +       .groups = idxd_group_attribute_groups,
26753  /* IDXD work queue attribs */
26754  static ssize_t wq_clients_show(struct device *dev,
26755                                struct device_attribute *attr, char *buf)
26756 @@ -896,7 +859,7 @@ static ssize_t wq_group_id_store(struct device *dev,
26757                 return count;
26758         }
26760 -       group = &idxd->groups[id];
26761 +       group = idxd->groups[id];
26762         prevg = wq->group;
26764         if (prevg)
26765 @@ -960,7 +923,7 @@ static int total_claimed_wq_size(struct idxd_device *idxd)
26766         int wq_size = 0;
26768         for (i = 0; i < idxd->max_wqs; i++) {
26769 -               struct idxd_wq *wq = &idxd->wqs[i];
26770 +               struct idxd_wq *wq = idxd->wqs[i];
26772                 wq_size += wq->size;
26773         }
26774 @@ -1206,8 +1169,16 @@ static ssize_t wq_cdev_minor_show(struct device *dev,
26775                                   struct device_attribute *attr, char *buf)
26777         struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
26778 +       int minor = -1;
26780 -       return sprintf(buf, "%d\n", wq->idxd_cdev.minor);
26781 +       mutex_lock(&wq->wq_lock);
26782 +       if (wq->idxd_cdev)
26783 +               minor = wq->idxd_cdev->minor;
26784 +       mutex_unlock(&wq->wq_lock);
26786 +       if (minor == -1)
26787 +               return -ENXIO;
26788 +       return sysfs_emit(buf, "%d\n", minor);
26791  static struct device_attribute dev_attr_wq_cdev_minor =
26792 @@ -1356,6 +1327,20 @@ static const struct attribute_group *idxd_wq_attribute_groups[] = {
26793         NULL,
26794  };
26796 +static void idxd_conf_wq_release(struct device *dev)
26798 +       struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
26800 +       kfree(wq->wqcfg);
26801 +       kfree(wq);
26804 +struct device_type idxd_wq_device_type = {
26805 +       .name = "wq",
26806 +       .release = idxd_conf_wq_release,
26807 +       .groups = idxd_wq_attribute_groups,
26810  /* IDXD device attribs */
26811  static ssize_t version_show(struct device *dev, struct device_attribute *attr,
26812                             char *buf)
26813 @@ -1486,7 +1471,7 @@ static ssize_t clients_show(struct device *dev,
26815         spin_lock_irqsave(&idxd->dev_lock, flags);
26816         for (i = 0; i < idxd->max_wqs; i++) {
26817 -               struct idxd_wq *wq = &idxd->wqs[i];
26818 +               struct idxd_wq *wq = idxd->wqs[i];
26820                 count += wq->client_count;
26821         }
26822 @@ -1644,183 +1629,160 @@ static const struct attribute_group *idxd_attribute_groups[] = {
26823         NULL,
26824  };
26826 -static int idxd_setup_engine_sysfs(struct idxd_device *idxd)
26827 +static void idxd_conf_device_release(struct device *dev)
26829 -       struct device *dev = &idxd->pdev->dev;
26830 -       int i, rc;
26831 +       struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
26833 +       kfree(idxd->groups);
26834 +       kfree(idxd->wqs);
26835 +       kfree(idxd->engines);
26836 +       kfree(idxd->irq_entries);
26837 +       ida_free(idxd_ida(idxd), idxd->id);
26838 +       kfree(idxd);
26841 +struct device_type dsa_device_type = {
26842 +       .name = "dsa",
26843 +       .release = idxd_conf_device_release,
26844 +       .groups = idxd_attribute_groups,
26847 +struct device_type iax_device_type = {
26848 +       .name = "iax",
26849 +       .release = idxd_conf_device_release,
26850 +       .groups = idxd_attribute_groups,
26853 +static int idxd_register_engine_devices(struct idxd_device *idxd)
26855 +       int i, j, rc;
26857         for (i = 0; i < idxd->max_engines; i++) {
26858 -               struct idxd_engine *engine = &idxd->engines[i];
26860 -               engine->conf_dev.parent = &idxd->conf_dev;
26861 -               dev_set_name(&engine->conf_dev, "engine%d.%d",
26862 -                            idxd->id, engine->id);
26863 -               engine->conf_dev.bus = idxd_get_bus_type(idxd);
26864 -               engine->conf_dev.groups = idxd_engine_attribute_groups;
26865 -               engine->conf_dev.type = &idxd_engine_device_type;
26866 -               dev_dbg(dev, "Engine device register: %s\n",
26867 -                       dev_name(&engine->conf_dev));
26868 -               rc = device_register(&engine->conf_dev);
26869 -               if (rc < 0) {
26870 -                       put_device(&engine->conf_dev);
26871 +               struct idxd_engine *engine = idxd->engines[i];
26873 +               rc = device_add(&engine->conf_dev);
26874 +               if (rc < 0)
26875                         goto cleanup;
26876 -               }
26877         }
26879         return 0;
26881  cleanup:
26882 -       while (i--) {
26883 -               struct idxd_engine *engine = &idxd->engines[i];
26884 +       j = i - 1;
26885 +       for (; i < idxd->max_engines; i++)
26886 +               put_device(&idxd->engines[i]->conf_dev);
26888 -               device_unregister(&engine->conf_dev);
26889 -       }
26890 +       while (j--)
26891 +               device_unregister(&idxd->engines[j]->conf_dev);
26892         return rc;
26895 -static int idxd_setup_group_sysfs(struct idxd_device *idxd)
26896 +static int idxd_register_group_devices(struct idxd_device *idxd)
26898 -       struct device *dev = &idxd->pdev->dev;
26899 -       int i, rc;
26900 +       int i, j, rc;
26902         for (i = 0; i < idxd->max_groups; i++) {
26903 -               struct idxd_group *group = &idxd->groups[i];
26905 -               group->conf_dev.parent = &idxd->conf_dev;
26906 -               dev_set_name(&group->conf_dev, "group%d.%d",
26907 -                            idxd->id, group->id);
26908 -               group->conf_dev.bus = idxd_get_bus_type(idxd);
26909 -               group->conf_dev.groups = idxd_group_attribute_groups;
26910 -               group->conf_dev.type = &idxd_group_device_type;
26911 -               dev_dbg(dev, "Group device register: %s\n",
26912 -                       dev_name(&group->conf_dev));
26913 -               rc = device_register(&group->conf_dev);
26914 -               if (rc < 0) {
26915 -                       put_device(&group->conf_dev);
26916 +               struct idxd_group *group = idxd->groups[i];
26918 +               rc = device_add(&group->conf_dev);
26919 +               if (rc < 0)
26920                         goto cleanup;
26921 -               }
26922         }
26924         return 0;
26926  cleanup:
26927 -       while (i--) {
26928 -               struct idxd_group *group = &idxd->groups[i];
26929 +       j = i - 1;
26930 +       for (; i < idxd->max_groups; i++)
26931 +               put_device(&idxd->groups[i]->conf_dev);
26933 -               device_unregister(&group->conf_dev);
26934 -       }
26935 +       while (j--)
26936 +               device_unregister(&idxd->groups[j]->conf_dev);
26937         return rc;
26940 -static int idxd_setup_wq_sysfs(struct idxd_device *idxd)
26941 +static int idxd_register_wq_devices(struct idxd_device *idxd)
26943 -       struct device *dev = &idxd->pdev->dev;
26944 -       int i, rc;
26945 +       int i, rc, j;
26947         for (i = 0; i < idxd->max_wqs; i++) {
26948 -               struct idxd_wq *wq = &idxd->wqs[i];
26950 -               wq->conf_dev.parent = &idxd->conf_dev;
26951 -               dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
26952 -               wq->conf_dev.bus = idxd_get_bus_type(idxd);
26953 -               wq->conf_dev.groups = idxd_wq_attribute_groups;
26954 -               wq->conf_dev.type = &idxd_wq_device_type;
26955 -               dev_dbg(dev, "WQ device register: %s\n",
26956 -                       dev_name(&wq->conf_dev));
26957 -               rc = device_register(&wq->conf_dev);
26958 -               if (rc < 0) {
26959 -                       put_device(&wq->conf_dev);
26960 +               struct idxd_wq *wq = idxd->wqs[i];
26962 +               rc = device_add(&wq->conf_dev);
26963 +               if (rc < 0)
26964                         goto cleanup;
26965 -               }
26966         }
26968         return 0;
26970  cleanup:
26971 -       while (i--) {
26972 -               struct idxd_wq *wq = &idxd->wqs[i];
26973 +       j = i - 1;
26974 +       for (; i < idxd->max_wqs; i++)
26975 +               put_device(&idxd->wqs[i]->conf_dev);
26977 -               device_unregister(&wq->conf_dev);
26978 -       }
26979 +       while (j--)
26980 +               device_unregister(&idxd->wqs[j]->conf_dev);
26981         return rc;
26984 -static int idxd_setup_device_sysfs(struct idxd_device *idxd)
26985 +int idxd_register_devices(struct idxd_device *idxd)
26987         struct device *dev = &idxd->pdev->dev;
26988 -       int rc;
26989 -       char devname[IDXD_NAME_SIZE];
26991 -       sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id);
26992 -       idxd->conf_dev.parent = dev;
26993 -       dev_set_name(&idxd->conf_dev, "%s", devname);
26994 -       idxd->conf_dev.bus = idxd_get_bus_type(idxd);
26995 -       idxd->conf_dev.groups = idxd_attribute_groups;
26996 -       idxd->conf_dev.type = idxd_get_device_type(idxd);
26998 -       dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev));
26999 -       rc = device_register(&idxd->conf_dev);
27000 -       if (rc < 0) {
27001 -               put_device(&idxd->conf_dev);
27002 -               return rc;
27003 -       }
27004 +       int rc, i;
27006 -       return 0;
27009 -int idxd_setup_sysfs(struct idxd_device *idxd)
27011 -       struct device *dev = &idxd->pdev->dev;
27012 -       int rc;
27014 -       rc = idxd_setup_device_sysfs(idxd);
27015 -       if (rc < 0) {
27016 -               dev_dbg(dev, "Device sysfs registering failed: %d\n", rc);
27017 +       rc = device_add(&idxd->conf_dev);
27018 +       if (rc < 0)
27019                 return rc;
27020 -       }
27022 -       rc = idxd_setup_wq_sysfs(idxd);
27023 +       rc = idxd_register_wq_devices(idxd);
27024         if (rc < 0) {
27025 -               /* unregister conf dev */
27026 -               dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc);
27027 -               return rc;
27028 +               dev_dbg(dev, "WQ devices registering failed: %d\n", rc);
27029 +               goto err_wq;
27030         }
27032 -       rc = idxd_setup_group_sysfs(idxd);
27033 +       rc = idxd_register_engine_devices(idxd);
27034         if (rc < 0) {
27035 -               /* unregister conf dev */
27036 -               dev_dbg(dev, "Group sysfs registering failed: %d\n", rc);
27037 -               return rc;
27038 +               dev_dbg(dev, "Engine devices registering failed: %d\n", rc);
27039 +               goto err_engine;
27040         }
27042 -       rc = idxd_setup_engine_sysfs(idxd);
27043 +       rc = idxd_register_group_devices(idxd);
27044         if (rc < 0) {
27045 -               /* unregister conf dev */
27046 -               dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc);
27047 -               return rc;
27048 +               dev_dbg(dev, "Group device registering failed: %d\n", rc);
27049 +               goto err_group;
27050         }
27052         return 0;
27054 + err_group:
27055 +       for (i = 0; i < idxd->max_engines; i++)
27056 +               device_unregister(&idxd->engines[i]->conf_dev);
27057 + err_engine:
27058 +       for (i = 0; i < idxd->max_wqs; i++)
27059 +               device_unregister(&idxd->wqs[i]->conf_dev);
27060 + err_wq:
27061 +       device_del(&idxd->conf_dev);
27062 +       return rc;
27065 -void idxd_cleanup_sysfs(struct idxd_device *idxd)
27066 +void idxd_unregister_devices(struct idxd_device *idxd)
27068         int i;
27070         for (i = 0; i < idxd->max_wqs; i++) {
27071 -               struct idxd_wq *wq = &idxd->wqs[i];
27072 +               struct idxd_wq *wq = idxd->wqs[i];
27074                 device_unregister(&wq->conf_dev);
27075         }
27077         for (i = 0; i < idxd->max_engines; i++) {
27078 -               struct idxd_engine *engine = &idxd->engines[i];
27079 +               struct idxd_engine *engine = idxd->engines[i];
27081                 device_unregister(&engine->conf_dev);
27082         }
27084         for (i = 0; i < idxd->max_groups; i++) {
27085 -               struct idxd_group *group = &idxd->groups[i];
27086 +               struct idxd_group *group = idxd->groups[i];
27088                 device_unregister(&group->conf_dev);
27089         }
27090 diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
27091 index aae82db542a5..76aacbac5869 100644
27092 --- a/drivers/extcon/extcon-arizona.c
27093 +++ b/drivers/extcon/extcon-arizona.c
27094 @@ -601,7 +601,7 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
27095         struct arizona *arizona = info->arizona;
27096         int id_gpio = arizona->pdata.hpdet_id_gpio;
27097         unsigned int report = EXTCON_JACK_HEADPHONE;
27098 -       int ret, reading;
27099 +       int ret, reading, state;
27100         bool mic = false;
27102         mutex_lock(&info->lock);
27103 @@ -614,12 +614,11 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
27104         }
27106         /* If the cable was removed while measuring ignore the result */
27107 -       ret = extcon_get_state(info->edev, EXTCON_MECHANICAL);
27108 -       if (ret < 0) {
27109 -               dev_err(arizona->dev, "Failed to check cable state: %d\n",
27110 -                       ret);
27111 +       state = extcon_get_state(info->edev, EXTCON_MECHANICAL);
27112 +       if (state < 0) {
27113 +               dev_err(arizona->dev, "Failed to check cable state: %d\n", state);
27114                 goto out;
27115 -       } else if (!ret) {
27116 +       } else if (!state) {
27117                 dev_dbg(arizona->dev, "Ignoring HPDET for removed cable\n");
27118                 goto done;
27119         }
27120 @@ -667,7 +666,7 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
27121                 gpio_set_value_cansleep(id_gpio, 0);
27123         /* If we have a mic then reenable MICDET */
27124 -       if (mic || info->mic)
27125 +       if (state && (mic || info->mic))
27126                 arizona_start_mic(info);
27128         if (info->hpdet_active) {
27129 @@ -675,7 +674,9 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
27130                 info->hpdet_active = false;
27131         }
27133 -       info->hpdet_done = true;
27134 +       /* Do not set hp_det done when the cable has been unplugged */
27135 +       if (state)
27136 +               info->hpdet_done = true;
27138  out:
27139         mutex_unlock(&info->lock);
27140 @@ -1759,25 +1760,6 @@ static int arizona_extcon_remove(struct platform_device *pdev)
27141         bool change;
27142         int ret;
27144 -       ret = regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
27145 -                                      ARIZONA_MICD_ENA, 0,
27146 -                                      &change);
27147 -       if (ret < 0) {
27148 -               dev_err(&pdev->dev, "Failed to disable micd on remove: %d\n",
27149 -                       ret);
27150 -       } else if (change) {
27151 -               regulator_disable(info->micvdd);
27152 -               pm_runtime_put(info->dev);
27153 -       }
27155 -       gpiod_put(info->micd_pol_gpio);
27157 -       pm_runtime_disable(&pdev->dev);
27159 -       regmap_update_bits(arizona->regmap,
27160 -                          ARIZONA_MICD_CLAMP_CONTROL,
27161 -                          ARIZONA_MICD_CLAMP_MODE_MASK, 0);
27163         if (info->micd_clamp) {
27164                 jack_irq_rise = ARIZONA_IRQ_MICD_CLAMP_RISE;
27165                 jack_irq_fall = ARIZONA_IRQ_MICD_CLAMP_FALL;
27166 @@ -1793,10 +1775,31 @@ static int arizona_extcon_remove(struct platform_device *pdev)
27167         arizona_free_irq(arizona, jack_irq_rise, info);
27168         arizona_free_irq(arizona, jack_irq_fall, info);
27169         cancel_delayed_work_sync(&info->hpdet_work);
27170 +       cancel_delayed_work_sync(&info->micd_detect_work);
27171 +       cancel_delayed_work_sync(&info->micd_timeout_work);
27173 +       ret = regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
27174 +                                      ARIZONA_MICD_ENA, 0,
27175 +                                      &change);
27176 +       if (ret < 0) {
27177 +               dev_err(&pdev->dev, "Failed to disable micd on remove: %d\n",
27178 +                       ret);
27179 +       } else if (change) {
27180 +               regulator_disable(info->micvdd);
27181 +               pm_runtime_put(info->dev);
27182 +       }
27184 +       regmap_update_bits(arizona->regmap,
27185 +                          ARIZONA_MICD_CLAMP_CONTROL,
27186 +                          ARIZONA_MICD_CLAMP_MODE_MASK, 0);
27187         regmap_update_bits(arizona->regmap, ARIZONA_JACK_DETECT_ANALOGUE,
27188                            ARIZONA_JD1_ENA, 0);
27189         arizona_clk32k_disable(arizona);
27191 +       gpiod_put(info->micd_pol_gpio);
27193 +       pm_runtime_disable(&pdev->dev);
27195         return 0;
27198 diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
27199 index 3f14dffb9669..5dd19dbd67a3 100644
27200 --- a/drivers/firmware/Kconfig
27201 +++ b/drivers/firmware/Kconfig
27202 @@ -237,6 +237,7 @@ config INTEL_STRATIX10_RSU
27203  config QCOM_SCM
27204         bool
27205         depends on ARM || ARM64
27206 +       depends on HAVE_ARM_SMCCC
27207         select RESET_CONTROLLER
27209  config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
27210 diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
27211 index c23466e05e60..d0537573501e 100644
27212 --- a/drivers/firmware/efi/libstub/Makefile
27213 +++ b/drivers/firmware/efi/libstub/Makefile
27214 @@ -13,7 +13,8 @@ cflags-$(CONFIG_X86)          += -m$(BITS) -D__KERNEL__ \
27215                                    -Wno-pointer-sign \
27216                                    $(call cc-disable-warning, address-of-packed-member) \
27217                                    $(call cc-disable-warning, gnu) \
27218 -                                  -fno-asynchronous-unwind-tables
27219 +                                  -fno-asynchronous-unwind-tables \
27220 +                                  $(CLANG_FLAGS)
27222  # arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
27223  # disable the stackleak plugin
27224 diff --git a/drivers/firmware/qcom_scm-smc.c b/drivers/firmware/qcom_scm-smc.c
27225 index 497c13ba98d6..d111833364ba 100644
27226 --- a/drivers/firmware/qcom_scm-smc.c
27227 +++ b/drivers/firmware/qcom_scm-smc.c
27228 @@ -77,8 +77,10 @@ static void __scm_smc_do(const struct arm_smccc_args *smc,
27229         }  while (res->a0 == QCOM_SCM_V2_EBUSY);
27232 -int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
27233 -                struct qcom_scm_res *res, bool atomic)
27235 +int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
27236 +                  enum qcom_scm_convention qcom_convention,
27237 +                  struct qcom_scm_res *res, bool atomic)
27239         int arglen = desc->arginfo & 0xf;
27240         int i;
27241 @@ -87,9 +89,8 @@ int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
27242         size_t alloc_len;
27243         gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
27244         u32 smccc_call_type = atomic ? ARM_SMCCC_FAST_CALL : ARM_SMCCC_STD_CALL;
27245 -       u32 qcom_smccc_convention =
27246 -                       (qcom_scm_convention == SMC_CONVENTION_ARM_32) ?
27247 -                       ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64;
27248 +       u32 qcom_smccc_convention = (qcom_convention == SMC_CONVENTION_ARM_32) ?
27249 +                                   ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64;
27250         struct arm_smccc_res smc_res;
27251         struct arm_smccc_args smc = {0};
27253 @@ -148,4 +149,5 @@ int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
27254         }
27256         return (long)smc_res.a0 ? qcom_scm_remap_error(smc_res.a0) : 0;
27259 diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
27260 index f57779fc7ee9..9ac84b5d6ce0 100644
27261 --- a/drivers/firmware/qcom_scm.c
27262 +++ b/drivers/firmware/qcom_scm.c
27263 @@ -113,14 +113,10 @@ static void qcom_scm_clk_disable(void)
27264         clk_disable_unprepare(__scm->bus_clk);
27267 -static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
27268 -                                       u32 cmd_id);
27269 +enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
27270 +static DEFINE_SPINLOCK(scm_query_lock);
27272 -enum qcom_scm_convention qcom_scm_convention;
27273 -static bool has_queried __read_mostly;
27274 -static DEFINE_SPINLOCK(query_lock);
27276 -static void __query_convention(void)
27277 +static enum qcom_scm_convention __get_convention(void)
27279         unsigned long flags;
27280         struct qcom_scm_desc desc = {
27281 @@ -133,36 +129,50 @@ static void __query_convention(void)
27282                 .owner = ARM_SMCCC_OWNER_SIP,
27283         };
27284         struct qcom_scm_res res;
27285 +       enum qcom_scm_convention probed_convention;
27286         int ret;
27287 +       bool forced = false;
27289 -       spin_lock_irqsave(&query_lock, flags);
27290 -       if (has_queried)
27291 -               goto out;
27292 +       if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
27293 +               return qcom_scm_convention;
27295 -       qcom_scm_convention = SMC_CONVENTION_ARM_64;
27296 -       // Device isn't required as there is only one argument - no device
27297 -       // needed to dma_map_single to secure world
27298 -       ret = scm_smc_call(NULL, &desc, &res, true);
27299 +       /*
27300 +        * Device isn't required as there is only one argument - no device
27301 +        * needed to dma_map_single to secure world
27302 +        */
27303 +       probed_convention = SMC_CONVENTION_ARM_64;
27304 +       ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
27305         if (!ret && res.result[0] == 1)
27306 -               goto out;
27307 +               goto found;
27309 +       /*
27310 +        * Some SC7180 firmwares didn't implement the
27311 +        * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
27312 +        * calling conventions on these firmwares. Luckily we don't make any
27313 +        * early calls into the firmware on these SoCs so the device pointer
27314 +        * will be valid here to check if the compatible matches.
27315 +        */
27316 +       if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
27317 +               forced = true;
27318 +               goto found;
27319 +       }
27321 -       qcom_scm_convention = SMC_CONVENTION_ARM_32;
27322 -       ret = scm_smc_call(NULL, &desc, &res, true);
27323 +       probed_convention = SMC_CONVENTION_ARM_32;
27324 +       ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
27325         if (!ret && res.result[0] == 1)
27326 -               goto out;
27328 -       qcom_scm_convention = SMC_CONVENTION_LEGACY;
27329 -out:
27330 -       has_queried = true;
27331 -       spin_unlock_irqrestore(&query_lock, flags);
27332 -       pr_info("qcom_scm: convention: %s\n",
27333 -               qcom_scm_convention_names[qcom_scm_convention]);
27335 +               goto found;
27337 +       probed_convention = SMC_CONVENTION_LEGACY;
27338 +found:
27339 +       spin_lock_irqsave(&scm_query_lock, flags);
27340 +       if (probed_convention != qcom_scm_convention) {
27341 +               qcom_scm_convention = probed_convention;
27342 +               pr_info("qcom_scm: convention: %s%s\n",
27343 +                       qcom_scm_convention_names[qcom_scm_convention],
27344 +                       forced ? " (forced)" : "");
27345 +       }
27346 +       spin_unlock_irqrestore(&scm_query_lock, flags);
27348 -static inline enum qcom_scm_convention __get_convention(void)
27350 -       if (unlikely(!has_queried))
27351 -               __query_convention();
27352         return qcom_scm_convention;
27355 @@ -219,8 +229,8 @@ static int qcom_scm_call_atomic(struct device *dev,
27356         }
27359 -static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
27360 -                                       u32 cmd_id)
27361 +static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
27362 +                                        u32 cmd_id)
27364         int ret;
27365         struct qcom_scm_desc desc = {
27366 @@ -247,7 +257,7 @@ static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
27368         ret = qcom_scm_call(dev, &desc, &res);
27370 -       return ret ? : res.result[0];
27371 +       return ret ? false : !!res.result[0];
27374  /**
27375 @@ -585,9 +595,8 @@ bool qcom_scm_pas_supported(u32 peripheral)
27376         };
27377         struct qcom_scm_res res;
27379 -       ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
27380 -                                          QCOM_SCM_PIL_PAS_IS_SUPPORTED);
27381 -       if (ret <= 0)
27382 +       if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
27383 +                                         QCOM_SCM_PIL_PAS_IS_SUPPORTED))
27384                 return false;
27386         ret = qcom_scm_call(__scm->dev, &desc, &res);
27387 @@ -1060,17 +1069,18 @@ EXPORT_SYMBOL(qcom_scm_ice_set_key);
27388   */
27389  bool qcom_scm_hdcp_available(void)
27391 +       bool avail;
27392         int ret = qcom_scm_clk_enable();
27394         if (ret)
27395                 return ret;
27397 -       ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
27398 +       avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
27399                                                 QCOM_SCM_HDCP_INVOKE);
27401         qcom_scm_clk_disable();
27403 -       return ret > 0;
27404 +       return avail;
27406  EXPORT_SYMBOL(qcom_scm_hdcp_available);
27408 @@ -1242,7 +1252,7 @@ static int qcom_scm_probe(struct platform_device *pdev)
27409         __scm = scm;
27410         __scm->dev = &pdev->dev;
27412 -       __query_convention();
27413 +       __get_convention();
27415         /*
27416          * If requested enable "download mode", from this point on warmboot
27417 diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
27418 index 95cd1ac30ab0..632fe3142462 100644
27419 --- a/drivers/firmware/qcom_scm.h
27420 +++ b/drivers/firmware/qcom_scm.h
27421 @@ -61,8 +61,11 @@ struct qcom_scm_res {
27422  };
27424  #define SCM_SMC_FNID(s, c)     ((((s) & 0xFF) << 8) | ((c) & 0xFF))
27425 -extern int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
27426 -                       struct qcom_scm_res *res, bool atomic);
27427 +extern int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
27428 +                         enum qcom_scm_convention qcom_convention,
27429 +                         struct qcom_scm_res *res, bool atomic);
27430 +#define scm_smc_call(dev, desc, res, atomic) \
27431 +       __scm_smc_call((dev), (desc), qcom_scm_convention, (res), (atomic))
27433  #define SCM_LEGACY_FNID(s, c)  (((s) << 10) | ((c) & 0x3ff))
27434  extern int scm_legacy_call_atomic(struct device *dev,
27435 diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
27436 index 7eb9958662dd..83082e2f2e44 100644
27437 --- a/drivers/firmware/xilinx/zynqmp.c
27438 +++ b/drivers/firmware/xilinx/zynqmp.c
27439 @@ -2,7 +2,7 @@
27440  /*
27441   * Xilinx Zynq MPSoC Firmware layer
27442   *
27443 - *  Copyright (C) 2014-2020 Xilinx, Inc.
27444 + *  Copyright (C) 2014-2021 Xilinx, Inc.
27445   *
27446   *  Michal Simek <michal.simek@xilinx.com>
27447   *  Davorin Mista <davorin.mista@aggios.com>
27448 @@ -1280,12 +1280,13 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
27449  static int zynqmp_firmware_remove(struct platform_device *pdev)
27451         struct pm_api_feature_data *feature_data;
27452 +       struct hlist_node *tmp;
27453         int i;
27455         mfd_remove_devices(&pdev->dev);
27456         zynqmp_pm_api_debugfs_exit();
27458 -       hash_for_each(pm_api_features_map, i, feature_data, hentry) {
27459 +       hash_for_each_safe(pm_api_features_map, i, tmp, feature_data, hentry) {
27460                 hash_del(&feature_data->hentry);
27461                 kfree(feature_data);
27462         }
27463 diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
27464 index 04e47e266f26..b44523ea8c91 100644
27465 --- a/drivers/fpga/dfl-pci.c
27466 +++ b/drivers/fpga/dfl-pci.c
27467 @@ -69,14 +69,16 @@ static void cci_pci_free_irq(struct pci_dev *pcidev)
27470  /* PCI Device ID */
27471 -#define PCIE_DEVICE_ID_PF_INT_5_X      0xBCBD
27472 -#define PCIE_DEVICE_ID_PF_INT_6_X      0xBCC0
27473 -#define PCIE_DEVICE_ID_PF_DSC_1_X      0x09C4
27474 -#define PCIE_DEVICE_ID_INTEL_PAC_N3000 0x0B30
27475 +#define PCIE_DEVICE_ID_PF_INT_5_X              0xBCBD
27476 +#define PCIE_DEVICE_ID_PF_INT_6_X              0xBCC0
27477 +#define PCIE_DEVICE_ID_PF_DSC_1_X              0x09C4
27478 +#define PCIE_DEVICE_ID_INTEL_PAC_N3000         0x0B30
27479 +#define PCIE_DEVICE_ID_INTEL_PAC_D5005         0x0B2B
27480  /* VF Device */
27481 -#define PCIE_DEVICE_ID_VF_INT_5_X      0xBCBF
27482 -#define PCIE_DEVICE_ID_VF_INT_6_X      0xBCC1
27483 -#define PCIE_DEVICE_ID_VF_DSC_1_X      0x09C5
27484 +#define PCIE_DEVICE_ID_VF_INT_5_X              0xBCBF
27485 +#define PCIE_DEVICE_ID_VF_INT_6_X              0xBCC1
27486 +#define PCIE_DEVICE_ID_VF_DSC_1_X              0x09C5
27487 +#define PCIE_DEVICE_ID_INTEL_PAC_D5005_VF      0x0B2C
27489  static struct pci_device_id cci_pcie_id_tbl[] = {
27490         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X),},
27491 @@ -86,6 +88,8 @@ static struct pci_device_id cci_pcie_id_tbl[] = {
27492         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X),},
27493         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X),},
27494         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_N3000),},
27495 +       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005),},
27496 +       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_INTEL_PAC_D5005_VF),},
27497         {0,}
27498  };
27499  MODULE_DEVICE_TABLE(pci, cci_pcie_id_tbl);
27500 diff --git a/drivers/fpga/xilinx-spi.c b/drivers/fpga/xilinx-spi.c
27501 index 27defa98092d..fee4d0abf6bf 100644
27502 --- a/drivers/fpga/xilinx-spi.c
27503 +++ b/drivers/fpga/xilinx-spi.c
27504 @@ -233,25 +233,19 @@ static int xilinx_spi_probe(struct spi_device *spi)
27506         /* PROGRAM_B is active low */
27507         conf->prog_b = devm_gpiod_get(&spi->dev, "prog_b", GPIOD_OUT_LOW);
27508 -       if (IS_ERR(conf->prog_b)) {
27509 -               dev_err(&spi->dev, "Failed to get PROGRAM_B gpio: %ld\n",
27510 -                       PTR_ERR(conf->prog_b));
27511 -               return PTR_ERR(conf->prog_b);
27512 -       }
27513 +       if (IS_ERR(conf->prog_b))
27514 +               return dev_err_probe(&spi->dev, PTR_ERR(conf->prog_b),
27515 +                                    "Failed to get PROGRAM_B gpio\n");
27517         conf->init_b = devm_gpiod_get_optional(&spi->dev, "init-b", GPIOD_IN);
27518 -       if (IS_ERR(conf->init_b)) {
27519 -               dev_err(&spi->dev, "Failed to get INIT_B gpio: %ld\n",
27520 -                       PTR_ERR(conf->init_b));
27521 -               return PTR_ERR(conf->init_b);
27522 -       }
27523 +       if (IS_ERR(conf->init_b))
27524 +               return dev_err_probe(&spi->dev, PTR_ERR(conf->init_b),
27525 +                                    "Failed to get INIT_B gpio\n");
27527         conf->done = devm_gpiod_get(&spi->dev, "done", GPIOD_IN);
27528 -       if (IS_ERR(conf->done)) {
27529 -               dev_err(&spi->dev, "Failed to get DONE gpio: %ld\n",
27530 -                       PTR_ERR(conf->done));
27531 -               return PTR_ERR(conf->done);
27532 -       }
27533 +       if (IS_ERR(conf->done))
27534 +               return dev_err_probe(&spi->dev, PTR_ERR(conf->done),
27535 +                                    "Failed to get DONE gpio\n");
27537         mgr = devm_fpga_mgr_create(&spi->dev,
27538                                    "Xilinx Slave Serial FPGA Manager",
27539 diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
27540 index 1aacd2a5a1fd..174839f3772f 100644
27541 --- a/drivers/gpio/gpiolib-acpi.c
27542 +++ b/drivers/gpio/gpiolib-acpi.c
27543 @@ -1438,6 +1438,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
27544                         .no_edge_events_on_boot = true,
27545                 },
27546         },
27547 +       {
27548 +               /*
27549 +                * The Dell Venue 10 Pro 5055, with Bay Trail SoC + TI PMIC uses an
27550 +                * external embedded-controller connected via I2C + an ACPI GPIO
27551 +                * event handler on INT33FFC:02 pin 12, causing spurious wakeups.
27552 +                */
27553 +               .matches = {
27554 +                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
27555 +                       DMI_MATCH(DMI_PRODUCT_NAME, "Venue 10 Pro 5055"),
27556 +               },
27557 +               .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
27558 +                       .ignore_wake = "INT33FC:02@12",
27559 +               },
27560 +       },
27561         {
27562                 /*
27563                  * HP X2 10 models with Cherry Trail SoC + TI PMIC use an
27564 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
27565 index 8a5a8ff5d362..5eee251e3335 100644
27566 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
27567 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
27568 @@ -3613,6 +3613,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
27570         dev_info(adev->dev, "amdgpu: finishing device.\n");
27571         flush_delayed_work(&adev->delayed_init_work);
27572 +       ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
27573         adev->shutdown = true;
27575         kfree(adev->pci_state);
27576 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
27577 index f753e04fee99..a2ac44cc2a6d 100644
27578 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
27579 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
27580 @@ -1355,7 +1355,7 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
27581                         }
27582                 }
27583         }
27584 -       return r;
27585 +       return 0;
27588  int amdgpu_display_resume_helper(struct amdgpu_device *adev)
27589 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
27590 index d56f4023ebb3..7e8e46c39dbd 100644
27591 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
27592 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
27593 @@ -533,6 +533,8 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
27595                 if (!ring || !ring->fence_drv.initialized)
27596                         continue;
27597 +               if (!ring->no_scheduler)
27598 +                       drm_sched_fini(&ring->sched);
27599                 r = amdgpu_fence_wait_empty(ring);
27600                 if (r) {
27601                         /* no need to trigger GPU reset as we are unloading */
27602 @@ -541,8 +543,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
27603                 if (ring->fence_drv.irq_src)
27604                         amdgpu_irq_put(adev, ring->fence_drv.irq_src,
27605                                        ring->fence_drv.irq_type);
27606 -               if (!ring->no_scheduler)
27607 -                       drm_sched_fini(&ring->sched);
27609                 del_timer_sync(&ring->fence_drv.fallback_timer);
27610                 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
27611                         dma_fence_put(ring->fence_drv.fences[j]);
27612 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
27613 index 7645223ea0ef..97c11aa47ad0 100644
27614 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
27615 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
27616 @@ -77,6 +77,8 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
27617                 }
27619                 ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo);
27620 +               /* flush the cache before commit the IB */
27621 +               ib->flags = AMDGPU_IB_FLAG_EMIT_MEM_SYNC;
27623                 if (!vm)
27624                         ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
27625 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
27626 index 94b069630db3..b4971e90b98c 100644
27627 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
27628 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
27629 @@ -215,7 +215,11 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
27630         /* Check if we have an idle VMID */
27631         i = 0;
27632         list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
27633 -               fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring);
27634 +               /* Don't use per engine and per process VMID at the same time */
27635 +               struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ?
27636 +                       NULL : ring;
27638 +               fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r);
27639                 if (!fences[i])
27640                         break;
27641                 ++i;
27642 @@ -281,7 +285,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
27643         if (updates && (*id)->flushed_updates &&
27644             updates->context == (*id)->flushed_updates->context &&
27645             !dma_fence_is_later(updates, (*id)->flushed_updates))
27646 -           updates = NULL;
27647 +               updates = NULL;
27649         if ((*id)->owner != vm->immediate.fence_context ||
27650             job->vm_pd_addr != (*id)->pd_gpu_addr ||
27651 @@ -290,6 +294,10 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
27652              !dma_fence_is_signaled((*id)->last_flush))) {
27653                 struct dma_fence *tmp;
27655 +               /* Don't use per engine and per process VMID at the same time */
27656 +               if (adev->vm_manager.concurrent_flush)
27657 +                       ring = NULL;
27659                 /* to prevent one context starved by another context */
27660                 (*id)->pd_gpu_addr = 0;
27661                 tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
27662 @@ -365,12 +373,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
27663                 if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
27664                         needs_flush = true;
27666 -               /* Concurrent flushes are only possible starting with Vega10 and
27667 -                * are broken on Navi10 and Navi14.
27668 -                */
27669 -               if (needs_flush && (adev->asic_type < CHIP_VEGA10 ||
27670 -                                   adev->asic_type == CHIP_NAVI10 ||
27671 -                                   adev->asic_type == CHIP_NAVI14))
27672 +               if (needs_flush && !adev->vm_manager.concurrent_flush)
27673                         continue;
27675                 /* Good, we can use this VMID. Remember this submission as
27676 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
27677 index afbbec82a289..9be945d8e72f 100644
27678 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
27679 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
27680 @@ -535,7 +535,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
27681                 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
27682                         struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
27684 -                       if (!src)
27685 +                       if (!src || !src->funcs || !src->funcs->set)
27686                                 continue;
27687                         for (k = 0; k < src->num_types; k++)
27688                                 amdgpu_irq_update(adev, src, k);
27689 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
27690 index 19c0a3655228..82e9ecf84352 100644
27691 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
27692 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
27693 @@ -519,8 +519,10 @@ static int init_pmu_entry_by_type_and_add(struct amdgpu_pmu_entry *pmu_entry,
27694         pmu_entry->pmu.attr_groups = kmemdup(attr_groups, sizeof(attr_groups),
27695                                                                 GFP_KERNEL);
27697 -       if (!pmu_entry->pmu.attr_groups)
27698 +       if (!pmu_entry->pmu.attr_groups) {
27699 +               ret = -ENOMEM;
27700                 goto err_attr_group;
27701 +       }
27703         snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d", pmu_entry->pmu_file_prefix,
27704                                 adev_to_drm(pmu_entry->adev)->primary->index);
27705 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
27706 index 5efa331e3ee8..383c178cf074 100644
27707 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
27708 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
27709 @@ -942,7 +942,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev,
27710                 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
27712         /* double check that we don't free the table twice */
27713 -       if (!ttm->sg->sgl)
27714 +       if (!ttm->sg || !ttm->sg->sgl)
27715                 return;
27717         /* unmap the pages mapped to the device */
27718 @@ -1162,13 +1162,13 @@ static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
27719         struct amdgpu_ttm_tt *gtt = (void *)ttm;
27720         int r;
27722 -       if (!gtt->bound)
27723 -               return;
27725         /* if the pages have userptr pinning then clear that first */
27726         if (gtt->userptr)
27727                 amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
27729 +       if (!gtt->bound)
27730 +               return;
27732         if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
27733                 return;
27735 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
27736 index e2ed4689118a..c6dbc0801604 100644
27737 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
27738 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
27739 @@ -259,7 +259,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
27740                 if ((adev->asic_type == CHIP_POLARIS10 ||
27741                      adev->asic_type == CHIP_POLARIS11) &&
27742                     (adev->uvd.fw_version < FW_1_66_16))
27743 -                       DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
27744 +                       DRM_ERROR("POLARIS10/11 UVD firmware version %u.%u is too old.\n",
27745                                   version_major, version_minor);
27746         } else {
27747                 unsigned int enc_major, enc_minor, dec_minor;
27748 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
27749 index 326dae31b675..a566bbe26bdd 100644
27750 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
27751 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
27752 @@ -92,13 +92,13 @@ struct amdgpu_prt_cb {
27753  static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
27755         mutex_lock(&vm->eviction_lock);
27756 -       vm->saved_flags = memalloc_nofs_save();
27757 +       vm->saved_flags = memalloc_noreclaim_save();
27760  static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
27762         if (mutex_trylock(&vm->eviction_lock)) {
27763 -               vm->saved_flags = memalloc_nofs_save();
27764 +               vm->saved_flags = memalloc_noreclaim_save();
27765                 return 1;
27766         }
27767         return 0;
27768 @@ -106,7 +106,7 @@ static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
27770  static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
27772 -       memalloc_nofs_restore(vm->saved_flags);
27773 +       memalloc_noreclaim_restore(vm->saved_flags);
27774         mutex_unlock(&vm->eviction_lock);
27777 @@ -3147,6 +3147,12 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
27779         unsigned i;
27781 +       /* Concurrent flushes are only possible starting with Vega10 and
27782 +        * are broken on Navi10 and Navi14.
27783 +        */
27784 +       adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
27785 +                                             adev->asic_type == CHIP_NAVI10 ||
27786 +                                             adev->asic_type == CHIP_NAVI14);
27787         amdgpu_vmid_mgr_init(adev);
27789         adev->vm_manager.fence_context =
27790 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
27791 index 976a12e5a8b9..4e140288159c 100644
27792 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
27793 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
27794 @@ -331,6 +331,7 @@ struct amdgpu_vm_manager {
27795         /* Handling of VMIDs */
27796         struct amdgpu_vmid_mgr                  id_mgr[AMDGPU_MAX_VMHUBS];
27797         unsigned int                            first_kfd_vmid;
27798 +       bool                                    concurrent_flush;
27800         /* Handling of VM fences */
27801         u64                                     fence_context;
27802 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
27803 index 659b385b27b5..4d3a24fdeb9c 100644
27804 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
27805 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
27806 @@ -468,15 +468,22 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev
27811 + * NOTE psp_xgmi_node_info.num_hops layout is as follows:
27812 + * num_hops[7:6] = link type (0 = xGMI2, 1 = xGMI3, 2/3 = reserved)
27813 + * num_hops[5:3] = reserved
27814 + * num_hops[2:0] = number of hops
27815 + */
27816  int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
27817                 struct amdgpu_device *peer_adev)
27819         struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
27820 +       uint8_t num_hops_mask = 0x7;
27821         int i;
27823         for (i = 0 ; i < top->num_nodes; ++i)
27824                 if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
27825 -                       return top->nodes[i].num_hops;
27826 +                       return top->nodes[i].num_hops & num_hops_mask;
27827         return  -EINVAL;
27830 diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
27831 index 2d832fc23119..421d6069c509 100644
27832 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
27833 +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
27834 @@ -59,6 +59,7 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
27835  MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
27836  MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
27837  MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
27838 +MODULE_FIRMWARE("amdgpu/polaris12_32_mc.bin");
27839  MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
27840  MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
27841  MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
27842 @@ -243,10 +244,16 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
27843                         chip_name = "polaris10";
27844                 break;
27845         case CHIP_POLARIS12:
27846 -               if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision))
27847 +               if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) {
27848                         chip_name = "polaris12_k";
27849 -               else
27850 -                       chip_name = "polaris12";
27851 +               } else {
27852 +                       WREG32(mmMC_SEQ_IO_DEBUG_INDEX, ixMC_IO_DEBUG_UP_159);
27853 +                       /* Polaris12 32bit ASIC needs a special MC firmware */
27854 +                       if (RREG32(mmMC_SEQ_IO_DEBUG_DATA) == 0x05b4dc40)
27855 +                               chip_name = "polaris12_32";
27856 +                       else
27857 +                               chip_name = "polaris12";
27858 +               }
27859                 break;
27860         case CHIP_FIJI:
27861         case CHIP_CARRIZO:
27862 diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
27863 index def583916294..9b844e9fb16f 100644
27864 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
27865 +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
27866 @@ -584,6 +584,10 @@ static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
27867         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
27868                         VCN, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0),
27869                         AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
27871 +       /* VCN global tiling registers */
27872 +       WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
27873 +               UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
27876  static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
27877 diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
27878 index 88626d83e07b..ca8efa5c6978 100644
27879 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
27880 +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
27881 @@ -220,10 +220,8 @@ static int vega10_ih_enable_ring(struct amdgpu_device *adev,
27882         tmp = vega10_ih_rb_cntl(ih, tmp);
27883         if (ih == &adev->irq.ih)
27884                 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
27885 -       if (ih == &adev->irq.ih1) {
27886 -               tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
27887 +       if (ih == &adev->irq.ih1)
27888                 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
27889 -       }
27890         if (amdgpu_sriov_vf(adev)) {
27891                 if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
27892                         dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
27893 @@ -265,7 +263,6 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
27894         u32 ih_chicken;
27895         int ret;
27896         int i;
27897 -       u32 tmp;
27899         /* disable irqs */
27900         ret = vega10_ih_toggle_interrupts(adev, false);
27901 @@ -291,15 +288,6 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
27902                 }
27903         }
27905 -       tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
27906 -       tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
27907 -                           CLIENT18_IS_STORM_CLIENT, 1);
27908 -       WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
27910 -       tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
27911 -       tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
27912 -       WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
27914         pci_set_master(adev->pdev);
27916         /* enable interrupts */
27917 @@ -345,11 +333,17 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
27918         u32 wptr, tmp;
27919         struct amdgpu_ih_regs *ih_regs;
27921 -       wptr = le32_to_cpu(*ih->wptr_cpu);
27922 -       ih_regs = &ih->ih_regs;
27923 +       if (ih == &adev->irq.ih) {
27924 +               /* Only ring0 supports writeback. On other rings fall back
27925 +                * to register-based code with overflow checking below.
27926 +                */
27927 +               wptr = le32_to_cpu(*ih->wptr_cpu);
27929 -       if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
27930 -               goto out;
27931 +               if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
27932 +                       goto out;
27933 +       }
27935 +       ih_regs = &ih->ih_regs;
27937         /* Double check that the overflow wasn't already cleared. */
27938         wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
27939 @@ -440,15 +434,11 @@ static int vega10_ih_self_irq(struct amdgpu_device *adev,
27940                               struct amdgpu_irq_src *source,
27941                               struct amdgpu_iv_entry *entry)
27943 -       uint32_t wptr = cpu_to_le32(entry->src_data[0]);
27945         switch (entry->ring_id) {
27946         case 1:
27947 -               *adev->irq.ih1.wptr_cpu = wptr;
27948                 schedule_work(&adev->irq.ih1_work);
27949                 break;
27950         case 2:
27951 -               *adev->irq.ih2.wptr_cpu = wptr;
27952                 schedule_work(&adev->irq.ih2_work);
27953                 break;
27954         default: break;
27955 diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
27956 index 5a3c867d5881..86dcf448e0c2 100644
27957 --- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
27958 +++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
27959 @@ -104,6 +104,8 @@ static int vega20_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
27961         tmp = RREG32(ih_regs->ih_rb_cntl);
27962         tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
27963 +       tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1);
27965         /* enable_intr field is only valid in ring0 */
27966         if (ih == &adev->irq.ih)
27967                 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
27968 @@ -220,10 +222,8 @@ static int vega20_ih_enable_ring(struct amdgpu_device *adev,
27969         tmp = vega20_ih_rb_cntl(ih, tmp);
27970         if (ih == &adev->irq.ih)
27971                 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
27972 -       if (ih == &adev->irq.ih1) {
27973 -               tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
27974 +       if (ih == &adev->irq.ih1)
27975                 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
27976 -       }
27977         if (amdgpu_sriov_vf(adev)) {
27978                 if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
27979                         dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
27980 @@ -297,7 +297,6 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
27981         u32 ih_chicken;
27982         int ret;
27983         int i;
27984 -       u32 tmp;
27986         /* disable irqs */
27987         ret = vega20_ih_toggle_interrupts(adev, false);
27988 @@ -326,15 +325,6 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
27989                 }
27990         }
27992 -       tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
27993 -       tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
27994 -                           CLIENT18_IS_STORM_CLIENT, 1);
27995 -       WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
27997 -       tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
27998 -       tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
27999 -       WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
28001         pci_set_master(adev->pdev);
28003         /* enable interrupts */
28004 @@ -380,11 +370,17 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
28005         u32 wptr, tmp;
28006         struct amdgpu_ih_regs *ih_regs;
28008 -       wptr = le32_to_cpu(*ih->wptr_cpu);
28009 -       ih_regs = &ih->ih_regs;
28010 +       if (ih == &adev->irq.ih) {
28011 +               /* Only ring0 supports writeback. On other rings fall back
28012 +                * to register-based code with overflow checking below.
28013 +                */
28014 +               wptr = le32_to_cpu(*ih->wptr_cpu);
28016 -       if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
28017 -               goto out;
28018 +               if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
28019 +                       goto out;
28020 +       }
28022 +       ih_regs = &ih->ih_regs;
28024         /* Double check that the overflow wasn't already cleared. */
28025         wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
28026 @@ -476,15 +472,11 @@ static int vega20_ih_self_irq(struct amdgpu_device *adev,
28027                               struct amdgpu_irq_src *source,
28028                               struct amdgpu_iv_entry *entry)
28030 -       uint32_t wptr = cpu_to_le32(entry->src_data[0]);
28032         switch (entry->ring_id) {
28033         case 1:
28034 -               *adev->irq.ih1.wptr_cpu = wptr;
28035                 schedule_work(&adev->irq.ih1_work);
28036                 break;
28037         case 2:
28038 -               *adev->irq.ih2.wptr_cpu = wptr;
28039                 schedule_work(&adev->irq.ih2_work);
28040                 break;
28041         default: break;
28042 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
28043 index 511712c2e382..673d5e34f213 100644
28044 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
28045 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
28046 @@ -33,6 +33,11 @@ static int kfd_debugfs_open(struct inode *inode, struct file *file)
28048         return single_open(file, show, NULL);
28050 +static int kfd_debugfs_hang_hws_read(struct seq_file *m, void *data)
28052 +       seq_printf(m, "echo gpu_id > hang_hws\n");
28053 +       return 0;
28056  static ssize_t kfd_debugfs_hang_hws_write(struct file *file,
28057         const char __user *user_buf, size_t size, loff_t *ppos)
28058 @@ -94,7 +99,7 @@ void kfd_debugfs_init(void)
28059         debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
28060                             kfd_debugfs_rls_by_device, &kfd_debugfs_fops);
28061         debugfs_create_file("hang_hws", S_IFREG | 0200, debugfs_root,
28062 -                           NULL, &kfd_debugfs_hang_hws_fops);
28063 +                           kfd_debugfs_hang_hws_read, &kfd_debugfs_hang_hws_fops);
28066  void kfd_debugfs_fini(void)
28067 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
28068 index 4598a9a58125..a4266c4bca13 100644
28069 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
28070 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
28071 @@ -1128,6 +1128,9 @@ static int set_sched_resources(struct device_queue_manager *dqm)
28073  static int initialize_cpsch(struct device_queue_manager *dqm)
28075 +       uint64_t num_sdma_queues;
28076 +       uint64_t num_xgmi_sdma_queues;
28078         pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
28080         mutex_init(&dqm->lock_hidden);
28081 @@ -1136,8 +1139,18 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
28082         dqm->active_cp_queue_count = 0;
28083         dqm->gws_queue_count = 0;
28084         dqm->active_runlist = false;
28085 -       dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
28086 -       dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
28088 +       num_sdma_queues = get_num_sdma_queues(dqm);
28089 +       if (num_sdma_queues >= BITS_PER_TYPE(dqm->sdma_bitmap))
28090 +               dqm->sdma_bitmap = ULLONG_MAX;
28091 +       else
28092 +               dqm->sdma_bitmap = (BIT_ULL(num_sdma_queues) - 1);
28094 +       num_xgmi_sdma_queues = get_num_xgmi_sdma_queues(dqm);
28095 +       if (num_xgmi_sdma_queues >= BITS_PER_TYPE(dqm->xgmi_sdma_bitmap))
28096 +               dqm->xgmi_sdma_bitmap = ULLONG_MAX;
28097 +       else
28098 +               dqm->xgmi_sdma_bitmap = (BIT_ULL(num_xgmi_sdma_queues) - 1);
28100         INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
28102 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
28103 index 66bbca61e3ef..9318936aa805 100644
28104 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
28105 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
28106 @@ -20,6 +20,10 @@
28107   * OTHER DEALINGS IN THE SOFTWARE.
28108   */
28110 +#include <linux/kconfig.h>
28112 +#if IS_REACHABLE(CONFIG_AMD_IOMMU_V2)
28114  #include <linux/printk.h>
28115  #include <linux/device.h>
28116  #include <linux/slab.h>
28117 @@ -355,3 +359,5 @@ int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
28119         return 0;
28122 +#endif
28123 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
28124 index dd23d9fdf6a8..afd420b01a0c 100644
28125 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
28126 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
28127 @@ -23,7 +23,9 @@
28128  #ifndef __KFD_IOMMU_H__
28129  #define __KFD_IOMMU_H__
28131 -#if defined(CONFIG_AMD_IOMMU_V2_MODULE) || defined(CONFIG_AMD_IOMMU_V2)
28132 +#include <linux/kconfig.h>
28134 +#if IS_REACHABLE(CONFIG_AMD_IOMMU_V2)
28136  #define KFD_SUPPORT_IOMMU_V2
28138 @@ -46,6 +48,9 @@ static inline int kfd_iommu_check_device(struct kfd_dev *kfd)
28140  static inline int kfd_iommu_device_init(struct kfd_dev *kfd)
28142 +#if IS_MODULE(CONFIG_AMD_IOMMU_V2)
28143 +       WARN_ONCE(1, "iommu_v2 module is not usable by built-in KFD");
28144 +#endif
28145         return 0;
28148 @@ -73,6 +78,6 @@ static inline int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
28149         return 0;
28152 -#endif /* defined(CONFIG_AMD_IOMMU_V2) */
28153 +#endif /* IS_REACHABLE(CONFIG_AMD_IOMMU_V2) */
28155  #endif /* __KFD_IOMMU_H__ */
28156 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
28157 index d699a5cf6c11..b63f55ea8758 100644
28158 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
28159 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
28160 @@ -1191,6 +1191,15 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
28161         if (adev->dm.dc)
28162                 dc_deinit_callbacks(adev->dm.dc);
28163  #endif
28165 +#if defined(CONFIG_DRM_AMD_DC_DCN)
28166 +       if (adev->dm.vblank_workqueue) {
28167 +               adev->dm.vblank_workqueue->dm = NULL;
28168 +               kfree(adev->dm.vblank_workqueue);
28169 +               adev->dm.vblank_workqueue = NULL;
28170 +       }
28171 +#endif
28173         if (adev->dm.dc->ctx->dmub_srv) {
28174                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
28175                 adev->dm.dc->ctx->dmub_srv = NULL;
28176 @@ -3841,6 +3850,23 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
28177         scaling_info->src_rect.x = state->src_x >> 16;
28178         scaling_info->src_rect.y = state->src_y >> 16;
28180 +       /*
28181 +        * For reasons we don't (yet) fully understand a non-zero
28182 +        * src_y coordinate into an NV12 buffer can cause a
28183 +        * system hang. To avoid hangs (and maybe be overly cautious)
28184 +        * let's reject both non-zero src_x and src_y.
28185 +        *
28186 +        * We currently know of only one use-case to reproduce a
28187 +        * scenario with non-zero src_x and src_y for NV12, which
28188 +        * is to gesture the YouTube Android app into full screen
28189 +        * on ChromeOS.
28190 +        */
28191 +       if (state->fb &&
28192 +           state->fb->format->format == DRM_FORMAT_NV12 &&
28193 +           (scaling_info->src_rect.x != 0 ||
28194 +            scaling_info->src_rect.y != 0))
28195 +               return -EINVAL;
28197         scaling_info->src_rect.width = state->src_w >> 16;
28198         if (scaling_info->src_rect.width == 0)
28199                 return -EINVAL;
28200 @@ -5863,6 +5889,15 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
28202         } while (stream == NULL && requested_bpc >= 6);
28204 +       if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
28205 +               DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
28207 +               aconnector->force_yuv420_output = true;
28208 +               stream = create_validate_stream_for_sink(aconnector, drm_mode,
28209 +                                               dm_state, old_stream);
28210 +               aconnector->force_yuv420_output = false;
28211 +       }
28213         return stream;
28216 @@ -7417,10 +7452,6 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
28217         int x, y;
28218         int xorigin = 0, yorigin = 0;
28220 -       position->enable = false;
28221 -       position->x = 0;
28222 -       position->y = 0;
28224         if (!crtc || !plane->state->fb)
28225                 return 0;
28227 @@ -7467,7 +7498,7 @@ static void handle_cursor_update(struct drm_plane *plane,
28228         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
28229         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
28230         uint64_t address = afb ? afb->address : 0;
28231 -       struct dc_cursor_position position;
28232 +       struct dc_cursor_position position = {0};
28233         struct dc_cursor_attributes attributes;
28234         int ret;
28236 @@ -9264,7 +9295,8 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
28238         new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
28239         new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
28240 -       if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
28241 +       if (!new_cursor_state || !new_primary_state ||
28242 +           !new_cursor_state->fb || !new_primary_state->fb) {
28243                 return 0;
28244         }
28246 @@ -9312,6 +9344,53 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
28248  #endif
28250 +static int validate_overlay(struct drm_atomic_state *state)
28252 +       int i;
28253 +       struct drm_plane *plane;
28254 +       struct drm_plane_state *old_plane_state, *new_plane_state;
28255 +       struct drm_plane_state *primary_state, *overlay_state = NULL;
28257 +       /* Check if primary plane is contained inside overlay */
28258 +       for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
28259 +               if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
28260 +                       if (drm_atomic_plane_disabling(plane->state, new_plane_state))
28261 +                               return 0;
28263 +                       overlay_state = new_plane_state;
28264 +                       continue;
28265 +               }
28266 +       }
28268 +       /* check if we're making changes to the overlay plane */
28269 +       if (!overlay_state)
28270 +               return 0;
28272 +       /* check if overlay plane is enabled */
28273 +       if (!overlay_state->crtc)
28274 +               return 0;
28276 +       /* find the primary plane for the CRTC that the overlay is enabled on */
28277 +       primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
28278 +       if (IS_ERR(primary_state))
28279 +               return PTR_ERR(primary_state);
28281 +       /* check if primary plane is enabled */
28282 +       if (!primary_state->crtc)
28283 +               return 0;
28285 +       /* Perform the bounds check to ensure the overlay plane covers the primary */
28286 +       if (primary_state->crtc_x < overlay_state->crtc_x ||
28287 +           primary_state->crtc_y < overlay_state->crtc_y ||
28288 +           primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
28289 +           primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
28290 +               DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
28291 +               return -EINVAL;
28292 +       }
28294 +       return 0;
28297  /**
28298   * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
28299   * @dev: The DRM device
28300 @@ -9383,7 +9462,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
28301         }
28303  #if defined(CONFIG_DRM_AMD_DC_DCN)
28304 -       if (adev->asic_type >= CHIP_NAVI10) {
28305 +       if (dc_resource_is_dsc_encoding_supported(dc)) {
28306                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
28307                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
28308                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
28309 @@ -9486,6 +9565,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
28310                         goto fail;
28311         }
28313 +       ret = validate_overlay(state);
28314 +       if (ret)
28315 +               goto fail;
28317         /* Add new/modified planes */
28318         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
28319                 ret = dm_update_plane_state(dc, state, plane,
28320 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
28321 index 8bfe901cf237..52cc81705280 100644
28322 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
28323 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
28324 @@ -68,18 +68,6 @@ struct common_irq_params {
28325         enum dc_irq_source irq_src;
28326  };
28328 -/**
28329 - * struct irq_list_head - Linked-list for low context IRQ handlers.
28330 - *
28331 - * @head: The list_head within &struct handler_data
28332 - * @work: A work_struct containing the deferred handler work
28333 - */
28334 -struct irq_list_head {
28335 -       struct list_head head;
28336 -       /* In case this interrupt needs post-processing, 'work' will be queued*/
28337 -       struct work_struct work;
28340  /**
28341   * struct dm_compressor_info - Buffer info used by frame buffer compression
28342   * @cpu_addr: MMIO cpu addr
28343 @@ -293,7 +281,7 @@ struct amdgpu_display_manager {
28344          * Note that handlers are called in the same order as they were
28345          * registered (FIFO).
28346          */
28347 -       struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
28348 +       struct list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
28350         /**
28351          * @irq_handler_list_high_tab:
28352 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
28353 index 360952129b6d..29139b34dbe2 100644
28354 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
28355 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
28356 @@ -150,7 +150,7 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
28357   *
28358   * --- to get dp configuration
28359   *
28360 - * cat link_settings
28361 + * cat /sys/kernel/debug/dri/0/DP-x/link_settings
28362   *
28363   * It will list current, verified, reported, preferred dp configuration.
28364   * current -- for current video mode
28365 @@ -163,7 +163,7 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
28366   * echo <lane_count>  <link_rate> > link_settings
28367   *
28368   * for example, to force to  2 lane, 2.7GHz,
28369 - * echo 4 0xa > link_settings
28370 + * echo 4 0xa > /sys/kernel/debug/dri/0/DP-x/link_settings
28371   *
28372   * spread_spectrum could not be changed dynamically.
28373   *
28374 @@ -171,7 +171,7 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
28375   * done. please check link settings after force operation to see if HW get
28376   * programming.
28377   *
28378 - * cat link_settings
28379 + * cat /sys/kernel/debug/dri/0/DP-x/link_settings
28380   *
28381   * check current and preferred settings.
28382   *
28383 @@ -255,7 +255,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
28384         int max_param_num = 2;
28385         uint8_t param_nums = 0;
28386         long param[2];
28387 -       bool valid_input = false;
28388 +       bool valid_input = true;
28390         if (size == 0)
28391                 return -EINVAL;
28392 @@ -282,9 +282,9 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
28393         case LANE_COUNT_ONE:
28394         case LANE_COUNT_TWO:
28395         case LANE_COUNT_FOUR:
28396 -               valid_input = true;
28397                 break;
28398         default:
28399 +               valid_input = false;
28400                 break;
28401         }
28403 @@ -294,9 +294,9 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
28404         case LINK_RATE_RBR2:
28405         case LINK_RATE_HIGH2:
28406         case LINK_RATE_HIGH3:
28407 -               valid_input = true;
28408                 break;
28409         default:
28410 +               valid_input = false;
28411                 break;
28412         }
28414 @@ -310,10 +310,11 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
28415          * spread spectrum will not be changed
28416          */
28417         prefer_link_settings.link_spread = link->cur_link_settings.link_spread;
28418 +       prefer_link_settings.use_link_rate_set = false;
28419         prefer_link_settings.lane_count = param[0];
28420         prefer_link_settings.link_rate = param[1];
28422 -       dc_link_set_preferred_link_settings(dc, &prefer_link_settings, link);
28423 +       dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, true);
28425         kfree(wr_buf);
28426         return size;
28427 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
28428 index 0cdbfcd475ec..71a15f68514b 100644
28429 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
28430 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
28431 @@ -644,6 +644,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct
28433         /* File created at /sys/class/drm/card0/device/hdcp_srm*/
28434         hdcp_work[0].attr = data_attr;
28435 +       sysfs_bin_attr_init(&hdcp_work[0].attr);
28437         if (sysfs_create_bin_file(&adev->dev->kobj, &hdcp_work[0].attr))
28438                 DRM_WARN("Failed to create device file hdcp_srm");
28439 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
28440 index e0000c180ed1..8ce10d0973c5 100644
28441 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
28442 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
28443 @@ -82,6 +82,7 @@ struct amdgpu_dm_irq_handler_data {
28444         struct amdgpu_display_manager *dm;
28445         /* DAL irq source which registered for this interrupt. */
28446         enum dc_irq_source irq_source;
28447 +       struct work_struct work;
28448  };
28450  #define DM_IRQ_TABLE_LOCK(adev, flags) \
28451 @@ -111,20 +112,10 @@ static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
28452   */
28453  static void dm_irq_work_func(struct work_struct *work)
28455 -       struct irq_list_head *irq_list_head =
28456 -               container_of(work, struct irq_list_head, work);
28457 -       struct list_head *handler_list = &irq_list_head->head;
28458 -       struct amdgpu_dm_irq_handler_data *handler_data;
28460 -       list_for_each_entry(handler_data, handler_list, list) {
28461 -               DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
28462 -                               handler_data->irq_source);
28463 +       struct amdgpu_dm_irq_handler_data *handler_data =
28464 +               container_of(work, struct amdgpu_dm_irq_handler_data, work);
28466 -               DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n",
28467 -                       handler_data->irq_source);
28469 -               handler_data->handler(handler_data->handler_arg);
28470 -       }
28471 +       handler_data->handler(handler_data->handler_arg);
28473         /* Call a DAL subcomponent which registered for interrupt notification
28474          * at INTERRUPT_LOW_IRQ_CONTEXT.
28475 @@ -156,7 +147,7 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
28476                 break;
28477         case INTERRUPT_LOW_IRQ_CONTEXT:
28478         default:
28479 -               hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
28480 +               hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
28481                 break;
28482         }
28484 @@ -290,7 +281,8 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
28485                 break;
28486         case INTERRUPT_LOW_IRQ_CONTEXT:
28487         default:
28488 -               hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
28489 +               hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
28490 +               INIT_WORK(&handler_data->work, dm_irq_work_func);
28491                 break;
28492         }
28494 @@ -372,7 +364,7 @@ void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
28495  int amdgpu_dm_irq_init(struct amdgpu_device *adev)
28497         int src;
28498 -       struct irq_list_head *lh;
28499 +       struct list_head *lh;
28501         DRM_DEBUG_KMS("DM_IRQ\n");
28503 @@ -381,9 +373,7 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev)
28504         for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
28505                 /* low context handler list init */
28506                 lh = &adev->dm.irq_handler_list_low_tab[src];
28507 -               INIT_LIST_HEAD(&lh->head);
28508 -               INIT_WORK(&lh->work, dm_irq_work_func);
28510 +               INIT_LIST_HEAD(lh);
28511                 /* high context handler init */
28512                 INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
28513         }
28514 @@ -400,8 +390,11 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev)
28515  void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
28517         int src;
28518 -       struct irq_list_head *lh;
28519 +       struct list_head *lh;
28520 +       struct list_head *entry, *tmp;
28521 +       struct amdgpu_dm_irq_handler_data *handler;
28522         unsigned long irq_table_flags;
28524         DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
28525         for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
28526                 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
28527 @@ -410,7 +403,16 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
28528                  * (because no code can schedule a new one). */
28529                 lh = &adev->dm.irq_handler_list_low_tab[src];
28530                 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
28531 -               flush_work(&lh->work);
28533 +               if (!list_empty(lh)) {
28534 +                       list_for_each_safe(entry, tmp, lh) {
28535 +                               handler = list_entry(
28536 +                                       entry,
28537 +                                       struct amdgpu_dm_irq_handler_data,
28538 +                                       list);
28539 +                               flush_work(&handler->work);
28540 +                       }
28541 +               }
28542         }
28545 @@ -420,6 +422,8 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
28546         struct list_head *hnd_list_h;
28547         struct list_head *hnd_list_l;
28548         unsigned long irq_table_flags;
28549 +       struct list_head *entry, *tmp;
28550 +       struct amdgpu_dm_irq_handler_data *handler;
28552         DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
28554 @@ -430,14 +434,22 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
28555          * will be disabled from manage_dm_interrupts on disable CRTC.
28556          */
28557         for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
28558 -               hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
28559 +               hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
28560                 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
28561                 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
28562                         dc_interrupt_set(adev->dm.dc, src, false);
28564                 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
28565 -               flush_work(&adev->dm.irq_handler_list_low_tab[src].work);
28567 +               if (!list_empty(hnd_list_l)) {
28568 +                       list_for_each_safe (entry, tmp, hnd_list_l) {
28569 +                               handler = list_entry(
28570 +                                       entry,
28571 +                                       struct amdgpu_dm_irq_handler_data,
28572 +                                       list);
28573 +                               flush_work(&handler->work);
28574 +                       }
28575 +               }
28576                 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
28577         }
28579 @@ -457,7 +469,7 @@ int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
28581         /* re-enable short pulse interrupts HW interrupt */
28582         for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
28583 -               hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
28584 +               hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
28585                 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
28586                 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
28587                         dc_interrupt_set(adev->dm.dc, src, true);
28588 @@ -483,7 +495,7 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
28589          * will be enabled from manage_dm_interrupts on enable CRTC.
28590          */
28591         for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) {
28592 -               hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
28593 +               hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
28594                 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
28595                 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
28596                         dc_interrupt_set(adev->dm.dc, src, true);
28597 @@ -500,22 +512,53 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
28598  static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
28599                                         enum dc_irq_source irq_source)
28601 -       unsigned long irq_table_flags;
28602 -       struct work_struct *work = NULL;
28603 +       struct  list_head *handler_list = &adev->dm.irq_handler_list_low_tab[irq_source];
28604 +       struct  amdgpu_dm_irq_handler_data *handler_data;
28605 +       bool    work_queued = false;
28607 -       DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
28608 +       if (list_empty(handler_list))
28609 +               return;
28611 +       list_for_each_entry (handler_data, handler_list, list) {
28612 +               if (!queue_work(system_highpri_wq, &handler_data->work)) {
28613 +                       continue;
28614 +               } else {
28615 +                       work_queued = true;
28616 +                       break;
28617 +               }
28618 +       }
28620 -       if (!list_empty(&adev->dm.irq_handler_list_low_tab[irq_source].head))
28621 -               work = &adev->dm.irq_handler_list_low_tab[irq_source].work;
28622 +       if (!work_queued) {
28623 +               struct  amdgpu_dm_irq_handler_data *handler_data_add;
28624 +               /*get the amdgpu_dm_irq_handler_data of first item pointed by handler_list*/
28625 +               handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list);
28627 -       DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
28628 +               /*allocate a new amdgpu_dm_irq_handler_data*/
28629 +               handler_data_add = kzalloc(sizeof(*handler_data), GFP_KERNEL);
28630 +               if (!handler_data_add) {
28631 +                       DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
28632 +                       return;
28633 +               }
28635 -       if (work) {
28636 -               if (!schedule_work(work))
28637 -                       DRM_INFO("amdgpu_dm_irq_schedule_work FAILED src %d\n",
28638 -                                               irq_source);
28639 -       }
28640 +               /*copy new amdgpu_dm_irq_handler_data members from handler_data*/
28641 +               handler_data_add->handler       = handler_data->handler;
28642 +               handler_data_add->handler_arg   = handler_data->handler_arg;
28643 +               handler_data_add->dm            = handler_data->dm;
28644 +               handler_data_add->irq_source    = irq_source;
28646 +               list_add_tail(&handler_data_add->list, handler_list);
28648 +               INIT_WORK(&handler_data_add->work, dm_irq_work_func);
28650 +               if (queue_work(system_highpri_wq, &handler_data_add->work))
28651 +                       DRM_DEBUG("Queued work for handling interrupt from "
28652 +                                 "display for IRQ source %d\n",
28653 +                                 irq_source);
28654 +               else
28655 +                       DRM_ERROR("Failed to queue work for handling interrupt "
28656 +                                 "from display for IRQ source %d\n",
28657 +                                 irq_source);
28658 +       }
28661  /*
28662 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
28663 index 995ffbbf64e7..1ee27f2f28f1 100644
28664 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
28665 +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
28666 @@ -217,6 +217,9 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
28667                 if (ASICREV_IS_SIENNA_CICHLID_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
28668                         dcn3_clk_mgr_destroy(clk_mgr);
28669                 }
28670 +               if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
28671 +                       dcn3_clk_mgr_destroy(clk_mgr);
28672 +               }
28673                 break;
28675         case FAMILY_VGH:
28676 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
28677 index c7e5a64e06af..81ea5d3a1947 100644
28678 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
28679 +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
28680 @@ -252,6 +252,7 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
28681         bool force_reset = false;
28682         bool update_uclk = false;
28683         bool p_state_change_support;
28684 +       int total_plane_count;
28686         if (dc->work_arounds.skip_clock_update || !clk_mgr->smu_present)
28687                 return;
28688 @@ -292,7 +293,8 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
28689                 clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
28691         clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
28692 -       p_state_change_support = new_clocks->p_state_change_support || (display_count == 0);
28693 +       total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
28694 +       p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
28695         if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
28696                 clk_mgr_base->clks.p_state_change_support = p_state_change_support;
28698 diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
28699 index 8f8a13c7cf73..4781279024a9 100644
28700 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c
28701 +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
28702 @@ -2398,7 +2398,8 @@ static void commit_planes_do_stream_update(struct dc *dc,
28703                                         if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
28704                                                 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
28706 -                                       dc->hwss.optimize_bandwidth(dc, dc->current_state);
28707 +                                       dc->optimized_required = true;
28709                                 } else {
28710                                         if (dc->optimize_seamless_boot_streams == 0)
28711                                                 dc->hwss.prepare_bandwidth(dc, dc->current_state);
28712 @@ -2545,6 +2546,10 @@ static void commit_planes_for_stream(struct dc *dc,
28713                                                 plane_state->triplebuffer_flips = true;
28714                                 }
28715                         }
28716 +                       if (update_type == UPDATE_TYPE_FULL) {
28717 +                               /* force vsync flip when reconfiguring pipes to prevent underflow */
28718 +                               plane_state->flip_immediate = false;
28719 +                       }
28720                 }
28721         }
28723 diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
28724 index bd0101013ec8..440bf0a0e12a 100644
28725 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
28726 +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
28727 @@ -1603,6 +1603,7 @@ static bool dc_link_construct(struct dc_link *link,
28728         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
28730         DC_LOG_DC("BIOS object table - %s finished successfully.\n", __func__);
28731 +       kfree(info);
28732         return true;
28733  device_tag_fail:
28734         link->link_enc->funcs->destroy(&link->link_enc);
28735 diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
28736 index 4e87e70237e3..874b132fe1d7 100644
28737 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
28738 +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
28739 @@ -283,7 +283,7 @@ struct abm *dce_abm_create(
28740         const struct dce_abm_shift *abm_shift,
28741         const struct dce_abm_mask *abm_mask)
28743 -       struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL);
28744 +       struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_ATOMIC);
28746         if (abm_dce == NULL) {
28747                 BREAK_TO_DEBUGGER();
28748 diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
28749 index 277484cf853e..d4be5954d7aa 100644
28750 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
28751 +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
28752 @@ -99,7 +99,6 @@ struct dce110_aux_registers {
28753         AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\
28754         AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
28755         AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\
28756 -       AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\
28757         AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\
28758         AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\
28759         AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\
28760 diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
28761 index ddc789daf3b1..09d4cb5c97b6 100644
28762 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
28763 +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
28764 @@ -1049,7 +1049,7 @@ struct dmcu *dcn10_dmcu_create(
28765         const struct dce_dmcu_shift *dmcu_shift,
28766         const struct dce_dmcu_mask *dmcu_mask)
28768 -       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
28769 +       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
28771         if (dmcu_dce == NULL) {
28772                 BREAK_TO_DEBUGGER();
28773 @@ -1070,7 +1070,7 @@ struct dmcu *dcn20_dmcu_create(
28774         const struct dce_dmcu_shift *dmcu_shift,
28775         const struct dce_dmcu_mask *dmcu_mask)
28777 -       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
28778 +       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
28780         if (dmcu_dce == NULL) {
28781                 BREAK_TO_DEBUGGER();
28782 @@ -1091,7 +1091,7 @@ struct dmcu *dcn21_dmcu_create(
28783         const struct dce_dmcu_shift *dmcu_shift,
28784         const struct dce_dmcu_mask *dmcu_mask)
28786 -       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
28787 +       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
28789         if (dmcu_dce == NULL) {
28790                 BREAK_TO_DEBUGGER();
28791 diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
28792 index 69e34bef274c..febccb35ddad 100644
28793 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
28794 +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
28795 @@ -81,13 +81,18 @@ static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state)
28797         struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
28798         uint32_t raw_state;
28799 +       enum dmub_status status = DMUB_STATUS_INVALID;
28801         // Send gpint command and wait for ack
28802 -       dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
28804 -       dmub_srv_get_gpint_response(srv, &raw_state);
28806 -       *state = convert_psr_state(raw_state);
28807 +       status = dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
28809 +       if (status == DMUB_STATUS_OK) {
28810 +               // GPINT was executed, get response
28811 +               dmub_srv_get_gpint_response(srv, &raw_state);
28812 +               *state = convert_psr_state(raw_state);
28813 +       } else
28814 +               // Return invalid state when GPINT times out
28815 +               *state = 0xFF;
28818  /*
28819 diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
28820 index 62cc2651e00c..8774406120fc 100644
28821 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
28822 +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
28823 @@ -112,7 +112,7 @@ struct dccg *dccg2_create(
28824         const struct dccg_shift *dccg_shift,
28825         const struct dccg_mask *dccg_mask)
28827 -       struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL);
28828 +       struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_ATOMIC);
28829         struct dccg *base;
28831         if (dccg_dcn == NULL) {
28832 diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
28833 index bec7059f6d5d..a1318c31bcfa 100644
28834 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
28835 +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
28836 @@ -1,5 +1,5 @@
28837  /*
28838 - * Copyright 2012-17 Advanced Micro Devices, Inc.
28839 + * Copyright 2012-2021 Advanced Micro Devices, Inc.
28840   *
28841   * Permission is hereby granted, free of charge, to any person obtaining a
28842   * copy of this software and associated documentation files (the "Software"),
28843 @@ -181,11 +181,14 @@ void hubp2_vready_at_or_After_vsync(struct hubp *hubp,
28844         else
28845                 Set HUBP_VREADY_AT_OR_AFTER_VSYNC = 0
28846         */
28847 -       if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
28848 -               + pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
28849 -               value = 1;
28850 -       } else
28851 -               value = 0;
28852 +       if (pipe_dest->htotal != 0) {
28853 +               if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
28854 +                       + pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
28855 +                       value = 1;
28856 +               } else
28857 +                       value = 0;
28858 +       }
28860         REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, value);
28863 diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
28864 index 2c2dbfcd8957..bfbc23b76cd5 100644
28865 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
28866 +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
28867 @@ -1104,7 +1104,7 @@ struct dpp *dcn20_dpp_create(
28868         uint32_t inst)
28870         struct dcn20_dpp *dpp =
28871 -               kzalloc(sizeof(struct dcn20_dpp), GFP_KERNEL);
28872 +               kzalloc(sizeof(struct dcn20_dpp), GFP_ATOMIC);
28874         if (!dpp)
28875                 return NULL;
28876 @@ -1122,7 +1122,7 @@ struct input_pixel_processor *dcn20_ipp_create(
28877         struct dc_context *ctx, uint32_t inst)
28879         struct dcn10_ipp *ipp =
28880 -               kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
28881 +               kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC);
28883         if (!ipp) {
28884                 BREAK_TO_DEBUGGER();
28885 @@ -1139,7 +1139,7 @@ struct output_pixel_processor *dcn20_opp_create(
28886         struct dc_context *ctx, uint32_t inst)
28888         struct dcn20_opp *opp =
28889 -               kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
28890 +               kzalloc(sizeof(struct dcn20_opp), GFP_ATOMIC);
28892         if (!opp) {
28893                 BREAK_TO_DEBUGGER();
28894 @@ -1156,7 +1156,7 @@ struct dce_aux *dcn20_aux_engine_create(
28895         uint32_t inst)
28897         struct aux_engine_dce110 *aux_engine =
28898 -               kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
28899 +               kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC);
28901         if (!aux_engine)
28902                 return NULL;
28903 @@ -1194,7 +1194,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create(
28904         uint32_t inst)
28906         struct dce_i2c_hw *dce_i2c_hw =
28907 -               kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
28908 +               kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC);
28910         if (!dce_i2c_hw)
28911                 return NULL;
28912 @@ -1207,7 +1207,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create(
28913  struct mpc *dcn20_mpc_create(struct dc_context *ctx)
28915         struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc),
28916 -                                         GFP_KERNEL);
28917 +                                         GFP_ATOMIC);
28919         if (!mpc20)
28920                 return NULL;
28921 @@ -1225,7 +1225,7 @@ struct hubbub *dcn20_hubbub_create(struct dc_context *ctx)
28923         int i;
28924         struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
28925 -                                         GFP_KERNEL);
28926 +                                         GFP_ATOMIC);
28928         if (!hubbub)
28929                 return NULL;
28930 @@ -1253,7 +1253,7 @@ struct timing_generator *dcn20_timing_generator_create(
28931                 uint32_t instance)
28933         struct optc *tgn10 =
28934 -               kzalloc(sizeof(struct optc), GFP_KERNEL);
28935 +               kzalloc(sizeof(struct optc), GFP_ATOMIC);
28937         if (!tgn10)
28938                 return NULL;
28939 @@ -1332,7 +1332,7 @@ static struct clock_source *dcn20_clock_source_create(
28940         bool dp_clk_src)
28942         struct dce110_clk_src *clk_src =
28943 -               kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
28944 +               kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC);
28946         if (!clk_src)
28947                 return NULL;
28948 @@ -1438,7 +1438,7 @@ struct display_stream_compressor *dcn20_dsc_create(
28949         struct dc_context *ctx, uint32_t inst)
28951         struct dcn20_dsc *dsc =
28952 -               kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
28953 +               kzalloc(sizeof(struct dcn20_dsc), GFP_ATOMIC);
28955         if (!dsc) {
28956                 BREAK_TO_DEBUGGER();
28957 @@ -1572,7 +1572,7 @@ struct hubp *dcn20_hubp_create(
28958         uint32_t inst)
28960         struct dcn20_hubp *hubp2 =
28961 -               kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
28962 +               kzalloc(sizeof(struct dcn20_hubp), GFP_ATOMIC);
28964         if (!hubp2)
28965                 return NULL;
28966 @@ -3390,7 +3390,7 @@ bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
28968  static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
28970 -       struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
28971 +       struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_ATOMIC);
28973         if (!pp_smu)
28974                 return pp_smu;
28975 @@ -4034,7 +4034,7 @@ struct resource_pool *dcn20_create_resource_pool(
28976                 struct dc *dc)
28978         struct dcn20_resource_pool *pool =
28979 -               kzalloc(sizeof(struct dcn20_resource_pool), GFP_KERNEL);
28980 +               kzalloc(sizeof(struct dcn20_resource_pool), GFP_ATOMIC);
28982         if (!pool)
28983                 return NULL;
28984 diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
28985 index 06dc1e2e8383..07c8d2e2c09c 100644
28986 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
28987 +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
28988 @@ -848,7 +848,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
28990                                         cmd.mall.cursor_copy_src.quad_part = cursor_attr.address.quad_part;
28991                                         cmd.mall.cursor_copy_dst.quad_part =
28992 -                                                       plane->address.grph.cursor_cache_addr.quad_part;
28993 +                                                       (plane->address.grph.cursor_cache_addr.quad_part + 2047) & ~2047;
28994                                         cmd.mall.cursor_width = cursor_attr.width;
28995                                         cmd.mall.cursor_height = cursor_attr.height;
28996                                         cmd.mall.cursor_pitch = cursor_attr.pitch;
28997 @@ -858,8 +858,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
28998                                         dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
29000                                         /* Use copied cursor, and it's okay to not switch back */
29001 -                                       cursor_attr.address.quad_part =
29002 -                                                       plane->address.grph.cursor_cache_addr.quad_part;
29003 +                                       cursor_attr.address.quad_part = cmd.mall.cursor_copy_dst.quad_part;
29004                                         dc_stream_set_cursor_attributes(stream, &cursor_attr);
29005                                 }
29007 diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
29008 index 3e6f76096119..a7598356f37d 100644
29009 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
29010 +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
29011 @@ -143,16 +143,18 @@ static void mpc3_power_on_ogam_lut(
29013         struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
29015 -       if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) {
29016 -               // Force power on
29017 -               REG_UPDATE(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_PWR_DIS, power_on == true ? 1:0);
29018 -               // Wait for confirmation when powering on
29019 -               if (power_on)
29020 -                       REG_WAIT(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_PWR_STATE, 0, 10, 10);
29021 -       } else {
29022 -               REG_SET(MPCC_MEM_PWR_CTRL[mpcc_id], 0,
29023 -                               MPCC_OGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1);
29024 -       }
29025 +       /*
29026 +        * Powering on: force memory active so the LUT can be updated.
29027 +        * Powering off: allow entering memory low power mode
29028 +        *
29029 +        * Memory low power mode is controlled during MPC OGAM LUT init.
29030 +        */
29031 +       REG_UPDATE(MPCC_MEM_PWR_CTRL[mpcc_id],
29032 +                  MPCC_OGAM_MEM_PWR_DIS, power_on != 0);
29034 +       /* Wait for memory to be powered on - we won't be able to write to it otherwise. */
29035 +       if (power_on)
29036 +               REG_WAIT(MPCC_MEM_PWR_CTRL[mpcc_id], MPCC_OGAM_MEM_PWR_STATE, 0, 10, 10);
29039  static void mpc3_configure_ogam_lut(
29040 @@ -1427,7 +1429,7 @@ const struct mpc_funcs dcn30_mpc_funcs = {
29041         .acquire_rmu = mpcc3_acquire_rmu,
29042         .program_3dlut = mpc3_program_3dlut,
29043         .release_rmu = mpcc3_release_rmu,
29044 -       .power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut,
29045 +       .power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
29046         .get_mpc_out_mux = mpc1_get_mpc_out_mux,
29048  };
29049 diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
29050 index fb7f1dea3c46..71e2d5e02571 100644
29051 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
29052 +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
29053 @@ -181,7 +181,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_0_soc = {
29054                 },
29055         .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
29056         .num_states = 1,
29057 -       .sr_exit_time_us = 12,
29058 +       .sr_exit_time_us = 15.5,
29059         .sr_enter_plus_exit_time_us = 20,
29060         .urgent_latency_us = 4.0,
29061         .urgent_latency_pixel_data_only_us = 4.0,
29062 diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
29063 index 4b659b63f75b..d03b1975e417 100644
29064 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
29065 +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
29066 @@ -164,7 +164,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_02_soc = {
29068                 .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
29069                 .num_states = 1,
29070 -               .sr_exit_time_us = 12,
29071 +               .sr_exit_time_us = 15.5,
29072                 .sr_enter_plus_exit_time_us = 20,
29073                 .urgent_latency_us = 4.0,
29074                 .urgent_latency_pixel_data_only_us = 4.0,
29075 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
29076 index 0f3f510fd83b..9729cf292e84 100644
29077 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
29078 +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
29079 @@ -3437,6 +3437,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
29080                         mode_lib->vba.DCCEnabledInAnyPlane = true;
29081                 }
29082         }
29083 +       mode_lib->vba.UrgentLatency = mode_lib->vba.UrgentLatencyPixelDataOnly;
29084         for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
29085                 locals->FabricAndDRAMBandwidthPerState[i] = dml_min(
29086                                 mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels
29087 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
29088 index 210c96cd5b03..51098c2c9854 100644
29089 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
29090 +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
29091 @@ -3544,6 +3544,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
29092                         mode_lib->vba.DCCEnabledInAnyPlane = true;
29093                 }
29094         }
29095 +       mode_lib->vba.UrgentLatency = mode_lib->vba.UrgentLatencyPixelDataOnly;
29096         for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
29097                 locals->FabricAndDRAMBandwidthPerState[i] = dml_min(
29098                                 mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels
29099 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
29100 index 72423dc425dc..799bae229e67 100644
29101 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
29102 +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
29103 @@ -293,13 +293,31 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
29104         if (surf_linear) {
29105                 log2_swath_height_l = 0;
29106                 log2_swath_height_c = 0;
29107 -       } else if (!surf_vert) {
29108 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
29109 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
29110         } else {
29111 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
29112 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
29113 +               unsigned int swath_height_l;
29114 +               unsigned int swath_height_c;
29116 +               if (!surf_vert) {
29117 +                       swath_height_l = rq_param->misc.rq_l.blk256_height;
29118 +                       swath_height_c = rq_param->misc.rq_c.blk256_height;
29119 +               } else {
29120 +                       swath_height_l = rq_param->misc.rq_l.blk256_width;
29121 +                       swath_height_c = rq_param->misc.rq_c.blk256_width;
29122 +               }
29124 +               if (swath_height_l > 0)
29125 +                       log2_swath_height_l = dml_log2(swath_height_l);
29127 +               if (req128_l && log2_swath_height_l > 0)
29128 +                       log2_swath_height_l -= 1;
29130 +               if (swath_height_c > 0)
29131 +                       log2_swath_height_c = dml_log2(swath_height_c);
29133 +               if (req128_c && log2_swath_height_c > 0)
29134 +                       log2_swath_height_c -= 1;
29135         }
29137         rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
29138         rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
29140 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
29141 index 9c78446c3a9d..6a6d5970d1d5 100644
29142 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
29143 +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
29144 @@ -293,13 +293,31 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
29145         if (surf_linear) {
29146                 log2_swath_height_l = 0;
29147                 log2_swath_height_c = 0;
29148 -       } else if (!surf_vert) {
29149 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
29150 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
29151         } else {
29152 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
29153 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
29154 +               unsigned int swath_height_l;
29155 +               unsigned int swath_height_c;
29157 +               if (!surf_vert) {
29158 +                       swath_height_l = rq_param->misc.rq_l.blk256_height;
29159 +                       swath_height_c = rq_param->misc.rq_c.blk256_height;
29160 +               } else {
29161 +                       swath_height_l = rq_param->misc.rq_l.blk256_width;
29162 +                       swath_height_c = rq_param->misc.rq_c.blk256_width;
29163 +               }
29165 +               if (swath_height_l > 0)
29166 +                       log2_swath_height_l = dml_log2(swath_height_l);
29168 +               if (req128_l && log2_swath_height_l > 0)
29169 +                       log2_swath_height_l -= 1;
29171 +               if (swath_height_c > 0)
29172 +                       log2_swath_height_c = dml_log2(swath_height_c);
29174 +               if (req128_c && log2_swath_height_c > 0)
29175 +                       log2_swath_height_c -= 1;
29176         }
29178         rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
29179         rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
29181 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
29182 index edd41d358291..dc1c81a6e377 100644
29183 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
29184 +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
29185 @@ -277,13 +277,31 @@ static void handle_det_buf_split(
29186         if (surf_linear) {
29187                 log2_swath_height_l = 0;
29188                 log2_swath_height_c = 0;
29189 -       } else if (!surf_vert) {
29190 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
29191 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
29192         } else {
29193 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
29194 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
29195 +               unsigned int swath_height_l;
29196 +               unsigned int swath_height_c;
29198 +               if (!surf_vert) {
29199 +                       swath_height_l = rq_param->misc.rq_l.blk256_height;
29200 +                       swath_height_c = rq_param->misc.rq_c.blk256_height;
29201 +               } else {
29202 +                       swath_height_l = rq_param->misc.rq_l.blk256_width;
29203 +                       swath_height_c = rq_param->misc.rq_c.blk256_width;
29204 +               }
29206 +               if (swath_height_l > 0)
29207 +                       log2_swath_height_l = dml_log2(swath_height_l);
29209 +               if (req128_l && log2_swath_height_l > 0)
29210 +                       log2_swath_height_l -= 1;
29212 +               if (swath_height_c > 0)
29213 +                       log2_swath_height_c = dml_log2(swath_height_c);
29215 +               if (req128_c && log2_swath_height_c > 0)
29216 +                       log2_swath_height_c -= 1;
29217         }
29219         rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
29220         rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
29222 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
29223 index 0f14f205ebe5..04601a767a8f 100644
29224 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
29225 +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
29226 @@ -237,13 +237,31 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
29227         if (surf_linear) {
29228                 log2_swath_height_l = 0;
29229                 log2_swath_height_c = 0;
29230 -       } else if (!surf_vert) {
29231 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
29232 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
29233         } else {
29234 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
29235 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
29236 +               unsigned int swath_height_l;
29237 +               unsigned int swath_height_c;
29239 +               if (!surf_vert) {
29240 +                       swath_height_l = rq_param->misc.rq_l.blk256_height;
29241 +                       swath_height_c = rq_param->misc.rq_c.blk256_height;
29242 +               } else {
29243 +                       swath_height_l = rq_param->misc.rq_l.blk256_width;
29244 +                       swath_height_c = rq_param->misc.rq_c.blk256_width;
29245 +               }
29247 +               if (swath_height_l > 0)
29248 +                       log2_swath_height_l = dml_log2(swath_height_l);
29250 +               if (req128_l && log2_swath_height_l > 0)
29251 +                       log2_swath_height_l -= 1;
29253 +               if (swath_height_c > 0)
29254 +                       log2_swath_height_c = dml_log2(swath_height_c);
29256 +               if (req128_c && log2_swath_height_c > 0)
29257 +                       log2_swath_height_c -= 1;
29258         }
29260         rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
29261         rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
29263 diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
29264 index 4c3e9cc30167..414da64f5734 100644
29265 --- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
29266 +++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
29267 @@ -344,13 +344,31 @@ static void handle_det_buf_split(
29268         if (surf_linear) {
29269                 log2_swath_height_l = 0;
29270                 log2_swath_height_c = 0;
29271 -       } else if (!surf_vert) {
29272 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
29273 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
29274         } else {
29275 -               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
29276 -               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
29277 +               unsigned int swath_height_l;
29278 +               unsigned int swath_height_c;
29280 +               if (!surf_vert) {
29281 +                       swath_height_l = rq_param->misc.rq_l.blk256_height;
29282 +                       swath_height_c = rq_param->misc.rq_c.blk256_height;
29283 +               } else {
29284 +                       swath_height_l = rq_param->misc.rq_l.blk256_width;
29285 +                       swath_height_c = rq_param->misc.rq_c.blk256_width;
29286 +               }
29288 +               if (swath_height_l > 0)
29289 +                       log2_swath_height_l = dml_log2(swath_height_l);
29291 +               if (req128_l && log2_swath_height_l > 0)
29292 +                       log2_swath_height_l -= 1;
29294 +               if (swath_height_c > 0)
29295 +                       log2_swath_height_c = dml_log2(swath_height_c);
29297 +               if (req128_c && log2_swath_height_c > 0)
29298 +                       log2_swath_height_c -= 1;
29299         }
29301         rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
29302         rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
29304 diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
29305 index 5e384a8a83dc..51855a2624cf 100644
29306 --- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
29307 +++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
29308 @@ -39,7 +39,7 @@
29309  #define HDCP14_KSV_SIZE 5
29310  #define HDCP14_MAX_KSV_FIFO_SIZE 127*HDCP14_KSV_SIZE
29312 -static const bool hdcp_cmd_is_read[] = {
29313 +static const bool hdcp_cmd_is_read[HDCP_MESSAGE_ID_MAX] = {
29314         [HDCP_MESSAGE_ID_READ_BKSV] = true,
29315         [HDCP_MESSAGE_ID_READ_RI_R0] = true,
29316         [HDCP_MESSAGE_ID_READ_PJ] = true,
29317 @@ -75,7 +75,7 @@ static const bool hdcp_cmd_is_read[] = {
29318         [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = false
29319  };
29321 -static const uint8_t hdcp_i2c_offsets[] = {
29322 +static const uint8_t hdcp_i2c_offsets[HDCP_MESSAGE_ID_MAX] = {
29323         [HDCP_MESSAGE_ID_READ_BKSV] = 0x0,
29324         [HDCP_MESSAGE_ID_READ_RI_R0] = 0x8,
29325         [HDCP_MESSAGE_ID_READ_PJ] = 0xA,
29326 @@ -106,7 +106,8 @@ static const uint8_t hdcp_i2c_offsets[] = {
29327         [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60,
29328         [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60,
29329         [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80,
29330 -       [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70
29331 +       [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70,
29332 +       [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x0,
29333  };
29335  struct protection_properties {
29336 @@ -184,7 +185,7 @@ static const struct protection_properties hdmi_14_protection = {
29337         .process_transaction = hdmi_14_process_transaction
29338  };
29340 -static const uint32_t hdcp_dpcd_addrs[] = {
29341 +static const uint32_t hdcp_dpcd_addrs[HDCP_MESSAGE_ID_MAX] = {
29342         [HDCP_MESSAGE_ID_READ_BKSV] = 0x68000,
29343         [HDCP_MESSAGE_ID_READ_RI_R0] = 0x68005,
29344         [HDCP_MESSAGE_ID_READ_PJ] = 0xFFFFFFFF,
29345 diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
29346 index 904ce9b88088..afbe8856468a 100644
29347 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
29348 +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
29349 @@ -791,6 +791,8 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
29350                            TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
29351                         hdcp->connection.is_hdcp2_revoked = 1;
29352                         status = MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED;
29353 +               } else {
29354 +                       status = MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
29355                 }
29356         }
29357         mutex_unlock(&psp->hdcp_context.mutex);
29358 diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
29359 index ed05a30d1139..e2a56a7f3d7a 100644
29360 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
29361 +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
29362 @@ -1526,20 +1526,6 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
29364                 smu10_data->gfx_actual_soft_min_freq = min_freq;
29365                 smu10_data->gfx_actual_soft_max_freq = max_freq;
29367 -               ret = smum_send_msg_to_smc_with_parameter(hwmgr,
29368 -                                       PPSMC_MSG_SetHardMinGfxClk,
29369 -                                       min_freq,
29370 -                                       NULL);
29371 -               if (ret)
29372 -                       return ret;
29374 -               ret = smum_send_msg_to_smc_with_parameter(hwmgr,
29375 -                                       PPSMC_MSG_SetSoftMaxGfxClk,
29376 -                                       max_freq,
29377 -                                       NULL);
29378 -               if (ret)
29379 -                       return ret;
29380         } else if (type == PP_OD_COMMIT_DPM_TABLE) {
29381                 if (size != 0) {
29382                         pr_err("Input parameter number not correct\n");
29383 diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
29384 index 599ec9726601..959143eff651 100644
29385 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
29386 +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
29387 @@ -5160,7 +5160,7 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
29389  out:
29390         smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
29391 -                                               1 << power_profile_mode,
29392 +                                               (!power_profile_mode) ? 0 : 1 << (power_profile_mode - 1),
29393                                                 NULL);
29394         hwmgr->power_profile_mode = power_profile_mode;
29396 diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
29397 index cd905e41080e..ec0037a21331 100644
29398 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
29399 +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
29400 @@ -279,35 +279,25 @@ static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_
29401         if (smu->adev->in_suspend)
29402                 return;
29404 -       /*
29405 -        * mclk, fclk and socclk are interdependent
29406 -        * on each other
29407 -        */
29408         if (clk == SMU_MCLK) {
29409 -               /* reset clock dependency */
29410                 smu->user_dpm_profile.clk_dependency = 0;
29411 -               /* set mclk dependent clocks(fclk and socclk) */
29412                 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
29413         } else if (clk == SMU_FCLK) {
29414 -               /* give priority to mclk, if mclk dependent clocks are set */
29415 +               /* MCLK takes precedence over FCLK */
29416                 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
29417                         return;
29419 -               /* reset clock dependency */
29420                 smu->user_dpm_profile.clk_dependency = 0;
29421 -               /* set fclk dependent clocks(mclk and socclk) */
29422                 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
29423         } else if (clk == SMU_SOCCLK) {
29424 -               /* give priority to mclk, if mclk dependent clocks are set */
29425 +               /* MCLK takes precedence over SOCCLK */
29426                 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
29427                         return;
29429 -               /* reset clock dependency */
29430                 smu->user_dpm_profile.clk_dependency = 0;
29431 -               /* set socclk dependent clocks(mclk and fclk) */
29432                 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
29433         } else
29434 -               /* add clk dependencies here, if any */
29435 +               /* Add clk dependencies here, if any */
29436                 return;
29439 @@ -331,7 +321,7 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu)
29440                 return;
29442         /* Enable restore flag */
29443 -       smu->user_dpm_profile.flags = SMU_DPM_USER_PROFILE_RESTORE;
29444 +       smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
29446         /* set the user dpm power limit */
29447         if (smu->user_dpm_profile.power_limit) {
29448 @@ -354,8 +344,8 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu)
29449                                 ret = smu_force_clk_levels(smu, clk_type,
29450                                                 smu->user_dpm_profile.clk_mask[clk_type]);
29451                                 if (ret)
29452 -                                       dev_err(smu->adev->dev, "Failed to set clock type = %d\n",
29453 -                                                       clk_type);
29454 +                                       dev_err(smu->adev->dev,
29455 +                                               "Failed to set clock type = %d\n", clk_type);
29456                         }
29457                 }
29458         }
29459 @@ -1777,7 +1767,7 @@ int smu_force_clk_levels(struct smu_context *smu,
29461         if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
29462                 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
29463 -               if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE) {
29464 +               if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
29465                         smu->user_dpm_profile.clk_mask[clk_type] = mask;
29466                         smu_set_user_clk_dependencies(smu, clk_type);
29467                 }
29468 @@ -2034,7 +2024,7 @@ int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
29469         if (smu->ppt_funcs->set_fan_speed_percent) {
29470                 percent = speed * 100 / smu->fan_max_rpm;
29471                 ret = smu->ppt_funcs->set_fan_speed_percent(smu, percent);
29472 -               if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
29473 +               if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
29474                         smu->user_dpm_profile.fan_speed_percent = percent;
29475         }
29477 @@ -2096,6 +2086,7 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
29478                 dev_err(smu->adev->dev,
29479                         "New power limit (%d) is over the max allowed %d\n",
29480                         limit, smu->max_power_limit);
29481 +               ret = -EINVAL;
29482                 goto out;
29483         }
29485 @@ -2104,7 +2095,7 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
29487         if (smu->ppt_funcs->set_power_limit) {
29488                 ret = smu->ppt_funcs->set_power_limit(smu, limit);
29489 -               if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
29490 +               if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
29491                         smu->user_dpm_profile.power_limit = limit;
29492         }
29494 @@ -2285,7 +2276,7 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value)
29496         if (smu->ppt_funcs->set_fan_control_mode) {
29497                 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
29498 -               if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
29499 +               if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
29500                         smu->user_dpm_profile.fan_mode = value;
29501         }
29503 @@ -2293,7 +2284,7 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value)
29505         /* reset user dpm fan speed */
29506         if (!ret && value != AMD_FAN_CTRL_MANUAL &&
29507 -                       smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
29508 +                       !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
29509                 smu->user_dpm_profile.fan_speed_percent = 0;
29511         return ret;
29512 @@ -2335,7 +2326,7 @@ int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
29513                 if (speed > 100)
29514                         speed = 100;
29515                 ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
29516 -               if (!ret && smu->user_dpm_profile.flags != SMU_DPM_USER_PROFILE_RESTORE)
29517 +               if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
29518                         smu->user_dpm_profile.fan_speed_percent = speed;
29519         }
29521 diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
29522 index 6e641f1513d8..fbff3df72e6c 100644
29523 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
29524 +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
29525 @@ -1110,7 +1110,6 @@ static int navi10_force_clk_levels(struct smu_context *smu,
29526         case SMU_SOCCLK:
29527         case SMU_MCLK:
29528         case SMU_UCLK:
29529 -       case SMU_DCEFCLK:
29530         case SMU_FCLK:
29531                 /* There is only 2 levels for fine grained DPM */
29532                 if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
29533 @@ -1130,6 +1129,10 @@ static int navi10_force_clk_levels(struct smu_context *smu,
29534                 if (ret)
29535                         return size;
29536                 break;
29537 +       case SMU_DCEFCLK:
29538 +               dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n");
29539 +               break;
29541         default:
29542                 break;
29543         }
29544 diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
29545 index af73e1430af5..61438940c26e 100644
29546 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
29547 +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
29548 @@ -1127,7 +1127,6 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
29549         case SMU_SOCCLK:
29550         case SMU_MCLK:
29551         case SMU_UCLK:
29552 -       case SMU_DCEFCLK:
29553         case SMU_FCLK:
29554                 /* There is only 2 levels for fine grained DPM */
29555                 if (sienna_cichlid_is_support_fine_grained_dpm(smu, clk_type)) {
29556 @@ -1147,6 +1146,9 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
29557                 if (ret)
29558                         goto forec_level_out;
29559                 break;
29560 +       case SMU_DCEFCLK:
29561 +               dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n");
29562 +               break;
29563         default:
29564                 break;
29565         }
29566 diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
29567 index 101eaa20db9b..a80f551771b9 100644
29568 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
29569 +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
29570 @@ -1462,7 +1462,6 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
29571                                         long input[], uint32_t size)
29573         int ret = 0;
29574 -       int i;
29575         struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
29577         if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) {
29578 @@ -1535,43 +1534,6 @@ static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TAB
29579                         smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
29580                         smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
29581                         smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
29583 -                       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
29584 -                                                                       smu->gfx_actual_hard_min_freq, NULL);
29585 -                       if (ret) {
29586 -                               dev_err(smu->adev->dev, "Restore the default hard min sclk failed!");
29587 -                               return ret;
29588 -                       }
29590 -                       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
29591 -                                                                       smu->gfx_actual_soft_max_freq, NULL);
29592 -                       if (ret) {
29593 -                               dev_err(smu->adev->dev, "Restore the default soft max sclk failed!");
29594 -                               return ret;
29595 -                       }
29597 -                       if (smu->adev->pm.fw_version < 0x43f1b00) {
29598 -                               dev_warn(smu->adev->dev, "CPUSoftMax/CPUSoftMin are not supported, please update SBIOS!\n");
29599 -                               break;
29600 -                       }
29602 -                       for (i = 0; i < smu->cpu_core_num; i++) {
29603 -                               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk,
29604 -                                                                     (i << 20) | smu->cpu_actual_soft_min_freq,
29605 -                                                                     NULL);
29606 -                               if (ret) {
29607 -                                       dev_err(smu->adev->dev, "Set hard min cclk failed!");
29608 -                                       return ret;
29609 -                               }
29611 -                               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk,
29612 -                                                                     (i << 20) | smu->cpu_actual_soft_max_freq,
29613 -                                                                     NULL);
29614 -                               if (ret) {
29615 -                                       dev_err(smu->adev->dev, "Set soft max cclk failed!");
29616 -                                       return ret;
29617 -                               }
29618 -                       }
29619                 }
29620                 break;
29621         case PP_OD_COMMIT_DPM_TABLE:
29622 diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
29623 index 5493388fcb10..dbe6d0caddb7 100644
29624 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
29625 +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
29626 @@ -389,24 +389,6 @@ static int renoir_od_edit_dpm_table(struct smu_context *smu,
29627                 }
29628                 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
29629                 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
29631 -               ret = smu_cmn_send_smc_msg_with_param(smu,
29632 -                                                               SMU_MSG_SetHardMinGfxClk,
29633 -                                                               smu->gfx_actual_hard_min_freq,
29634 -                                                               NULL);
29635 -               if (ret) {
29636 -                       dev_err(smu->adev->dev, "Restore the default hard min sclk failed!");
29637 -                       return ret;
29638 -               }
29640 -               ret = smu_cmn_send_smc_msg_with_param(smu,
29641 -                                                               SMU_MSG_SetSoftMaxGfxClk,
29642 -                                                               smu->gfx_actual_soft_max_freq,
29643 -                                                               NULL);
29644 -               if (ret) {
29645 -                       dev_err(smu->adev->dev, "Restore the default soft max sclk failed!");
29646 -                       return ret;
29647 -               }
29648                 break;
29649         case PP_OD_COMMIT_DPM_TABLE:
29650                 if (size != 0) {
29651 diff --git a/drivers/gpu/drm/arm/display/include/malidp_utils.h b/drivers/gpu/drm/arm/display/include/malidp_utils.h
29652 index 3bc383d5bf73..49a1d7f3539c 100644
29653 --- a/drivers/gpu/drm/arm/display/include/malidp_utils.h
29654 +++ b/drivers/gpu/drm/arm/display/include/malidp_utils.h
29655 @@ -13,9 +13,6 @@
29656  #define has_bit(nr, mask)      (BIT(nr) & (mask))
29657  #define has_bits(bits, mask)   (((bits) & (mask)) == (bits))
29659 -#define dp_for_each_set_bit(bit, mask) \
29660 -       for_each_set_bit((bit), ((unsigned long *)&(mask)), sizeof(mask) * 8)
29662  #define dp_wait_cond(__cond, __tries, __min_range, __max_range)        \
29663  ({                                                     \
29664         int num_tries = __tries;                        \
29665 diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
29666 index 719a79728e24..06c595378dda 100644
29667 --- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
29668 +++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
29669 @@ -46,8 +46,9 @@ void komeda_pipeline_destroy(struct komeda_dev *mdev,
29671         struct komeda_component *c;
29672         int i;
29673 +       unsigned long avail_comps = pipe->avail_comps;
29675 -       dp_for_each_set_bit(i, pipe->avail_comps) {
29676 +       for_each_set_bit(i, &avail_comps, 32) {
29677                 c = komeda_pipeline_get_component(pipe, i);
29678                 komeda_component_destroy(mdev, c);
29679         }
29680 @@ -247,6 +248,7 @@ static void komeda_pipeline_dump(struct komeda_pipeline *pipe)
29682         struct komeda_component *c;
29683         int id;
29684 +       unsigned long avail_comps = pipe->avail_comps;
29686         DRM_INFO("Pipeline-%d: n_layers: %d, n_scalers: %d, output: %s.\n",
29687                  pipe->id, pipe->n_layers, pipe->n_scalers,
29688 @@ -258,7 +260,7 @@ static void komeda_pipeline_dump(struct komeda_pipeline *pipe)
29689                  pipe->of_output_links[1] ?
29690                  pipe->of_output_links[1]->full_name : "none");
29692 -       dp_for_each_set_bit(id, pipe->avail_comps) {
29693 +       for_each_set_bit(id, &avail_comps, 32) {
29694                 c = komeda_pipeline_get_component(pipe, id);
29696                 komeda_component_dump(c);
29697 @@ -270,8 +272,9 @@ static void komeda_component_verify_inputs(struct komeda_component *c)
29698         struct komeda_pipeline *pipe = c->pipeline;
29699         struct komeda_component *input;
29700         int id;
29701 +       unsigned long supported_inputs = c->supported_inputs;
29703 -       dp_for_each_set_bit(id, c->supported_inputs) {
29704 +       for_each_set_bit(id, &supported_inputs, 32) {
29705                 input = komeda_pipeline_get_component(pipe, id);
29706                 if (!input) {
29707                         c->supported_inputs &= ~(BIT(id));
29708 @@ -302,8 +305,9 @@ static void komeda_pipeline_assemble(struct komeda_pipeline *pipe)
29709         struct komeda_component *c;
29710         struct komeda_layer *layer;
29711         int i, id;
29712 +       unsigned long avail_comps = pipe->avail_comps;
29714 -       dp_for_each_set_bit(id, pipe->avail_comps) {
29715 +       for_each_set_bit(id, &avail_comps, 32) {
29716                 c = komeda_pipeline_get_component(pipe, id);
29717                 komeda_component_verify_inputs(c);
29718         }
29719 @@ -355,13 +359,15 @@ void komeda_pipeline_dump_register(struct komeda_pipeline *pipe,
29721         struct komeda_component *c;
29722         u32 id;
29723 +       unsigned long avail_comps;
29725         seq_printf(sf, "\n======== Pipeline-%d ==========\n", pipe->id);
29727         if (pipe->funcs && pipe->funcs->dump_register)
29728                 pipe->funcs->dump_register(pipe, sf);
29730 -       dp_for_each_set_bit(id, pipe->avail_comps) {
29731 +       avail_comps = pipe->avail_comps;
29732 +       for_each_set_bit(id, &avail_comps, 32) {
29733                 c = komeda_pipeline_get_component(pipe, id);
29735                 seq_printf(sf, "\n------%s------\n", c->name);
29736 diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
29737 index 5c085116de3f..e672b9cffee3 100644
29738 --- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
29739 +++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
29740 @@ -1231,14 +1231,15 @@ komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
29741         struct komeda_pipeline_state *old = priv_to_pipe_st(pipe->obj.state);
29742         struct komeda_component_state *c_st;
29743         struct komeda_component *c;
29744 -       u32 disabling_comps, id;
29745 +       u32 id;
29746 +       unsigned long disabling_comps;
29748         WARN_ON(!old);
29750         disabling_comps = (~new->active_comps) & old->active_comps;
29752         /* unbound all disabling component */
29753 -       dp_for_each_set_bit(id, disabling_comps) {
29754 +       for_each_set_bit(id, &disabling_comps, 32) {
29755                 c = komeda_pipeline_get_component(pipe, id);
29756                 c_st = komeda_component_get_state_and_set_user(c,
29757                                 drm_st, NULL, new->crtc);
29758 @@ -1286,7 +1287,8 @@ bool komeda_pipeline_disable(struct komeda_pipeline *pipe,
29759         struct komeda_pipeline_state *old;
29760         struct komeda_component *c;
29761         struct komeda_component_state *c_st;
29762 -       u32 id, disabling_comps = 0;
29763 +       u32 id;
29764 +       unsigned long disabling_comps;
29766         old = komeda_pipeline_get_old_state(pipe, old_state);
29768 @@ -1296,10 +1298,10 @@ bool komeda_pipeline_disable(struct komeda_pipeline *pipe,
29769                 disabling_comps = old->active_comps &
29770                                   pipe->standalone_disabled_comps;
29772 -       DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, disabling_comps: 0x%x.\n",
29773 +       DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, disabling_comps: 0x%lx.\n",
29774                          pipe->id, old->active_comps, disabling_comps);
29776 -       dp_for_each_set_bit(id, disabling_comps) {
29777 +       for_each_set_bit(id, &disabling_comps, 32) {
29778                 c = komeda_pipeline_get_component(pipe, id);
29779                 c_st = priv_to_comp_st(c->obj.state);
29781 @@ -1330,16 +1332,17 @@ void komeda_pipeline_update(struct komeda_pipeline *pipe,
29782         struct komeda_pipeline_state *new = priv_to_pipe_st(pipe->obj.state);
29783         struct komeda_pipeline_state *old;
29784         struct komeda_component *c;
29785 -       u32 id, changed_comps = 0;
29786 +       u32 id;
29787 +       unsigned long changed_comps;
29789         old = komeda_pipeline_get_old_state(pipe, old_state);
29791         changed_comps = new->active_comps | old->active_comps;
29793 -       DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, changed: 0x%x.\n",
29794 +       DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, changed: 0x%lx.\n",
29795                          pipe->id, new->active_comps, changed_comps);
29797 -       dp_for_each_set_bit(id, changed_comps) {
29798 +       for_each_set_bit(id, &changed_comps, 32) {
29799                 c = komeda_pipeline_get_component(pipe, id);
29801                 if (new->active_comps & BIT(c->id))
29802 diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
29803 index ea8164e7a6dc..01837bea18c2 100644
29804 --- a/drivers/gpu/drm/ast/ast_drv.c
29805 +++ b/drivers/gpu/drm/ast/ast_drv.c
29806 @@ -30,6 +30,7 @@
29807  #include <linux/module.h>
29808  #include <linux/pci.h>
29810 +#include <drm/drm_atomic_helper.h>
29811  #include <drm/drm_crtc_helper.h>
29812  #include <drm/drm_drv.h>
29813  #include <drm/drm_fb_helper.h>
29814 @@ -138,6 +139,7 @@ static void ast_pci_remove(struct pci_dev *pdev)
29815         struct drm_device *dev = pci_get_drvdata(pdev);
29817         drm_dev_unregister(dev);
29818 +       drm_atomic_helper_shutdown(dev);
29821  static int ast_drm_freeze(struct drm_device *dev)
29822 diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
29823 index 988b270fea5e..758c69aa7232 100644
29824 --- a/drivers/gpu/drm/ast/ast_mode.c
29825 +++ b/drivers/gpu/drm/ast/ast_mode.c
29826 @@ -688,7 +688,7 @@ ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
29827         unsigned int offset_x, offset_y;
29829         offset_x = AST_MAX_HWC_WIDTH - fb->width;
29830 -       offset_y = AST_MAX_HWC_WIDTH - fb->height;
29831 +       offset_y = AST_MAX_HWC_HEIGHT - fb->height;
29833         if (state->fb != old_state->fb) {
29834                 /* A new cursor image was installed. */
29835 diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
29836 index e4110d6ca7b3..bc60fc4728d7 100644
29837 --- a/drivers/gpu/drm/bridge/Kconfig
29838 +++ b/drivers/gpu/drm/bridge/Kconfig
29839 @@ -67,6 +67,7 @@ config DRM_LONTIUM_LT9611UXC
29840         depends on OF
29841         select DRM_PANEL_BRIDGE
29842         select DRM_KMS_HELPER
29843 +       select DRM_MIPI_DSI
29844         select REGMAP_I2C
29845         help
29846           Driver for Lontium LT9611UXC DSI to HDMI bridge
29847 @@ -151,6 +152,7 @@ config DRM_SII902X
29848         tristate "Silicon Image sii902x RGB/HDMI bridge"
29849         depends on OF
29850         select DRM_KMS_HELPER
29851 +       select DRM_MIPI_DSI
29852         select REGMAP_I2C
29853         select I2C_MUX
29854         select SND_SOC_HDMI_CODEC if SND_SOC
29855 @@ -200,6 +202,7 @@ config DRM_TOSHIBA_TC358767
29856         tristate "Toshiba TC358767 eDP bridge"
29857         depends on OF
29858         select DRM_KMS_HELPER
29859 +       select DRM_MIPI_DSI
29860         select REGMAP_I2C
29861         select DRM_PANEL
29862         help
29863 diff --git a/drivers/gpu/drm/bridge/analogix/Kconfig b/drivers/gpu/drm/bridge/analogix/Kconfig
29864 index 024ea2a570e7..9160fd80dd70 100644
29865 --- a/drivers/gpu/drm/bridge/analogix/Kconfig
29866 +++ b/drivers/gpu/drm/bridge/analogix/Kconfig
29867 @@ -30,6 +30,7 @@ config DRM_ANALOGIX_ANX7625
29868         tristate "Analogix Anx7625 MIPI to DP interface support"
29869         depends on DRM
29870         depends on OF
29871 +       select DRM_MIPI_DSI
29872         help
29873           ANX7625 is an ultra-low power 4K mobile HD transmitter
29874           designed for portable devices. It converts MIPI/DPI to
29875 diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
29876 index 0ddc37551194..c916f4b8907e 100644
29877 --- a/drivers/gpu/drm/bridge/panel.c
29878 +++ b/drivers/gpu/drm/bridge/panel.c
29879 @@ -87,6 +87,18 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
29881  static void panel_bridge_detach(struct drm_bridge *bridge)
29883 +       struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
29884 +       struct drm_connector *connector = &panel_bridge->connector;
29886 +       /*
29887 +        * Cleanup the connector if we know it was initialized.
29888 +        *
29889 +        * FIXME: This wouldn't be needed if the panel_bridge structure was
29890 +        * allocated with drmm_kzalloc(). This might be tricky since the
29891 +        * drm_device pointer can only be retrieved when the bridge is attached.
29892 +        */
29893 +       if (connector->dev)
29894 +               drm_connector_cleanup(connector);
29897  static void panel_bridge_pre_enable(struct drm_bridge *bridge)
29898 diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
29899 index 309afe61afdd..9c75c8815056 100644
29900 --- a/drivers/gpu/drm/drm_dp_mst_topology.c
29901 +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
29902 @@ -1154,6 +1154,7 @@ static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
29904         req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
29905         drm_dp_encode_sideband_req(&req, msg);
29906 +       msg->path_msg = true;
29909  static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
29910 @@ -2824,15 +2825,21 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
29912         req_type = txmsg->msg[0] & 0x7f;
29913         if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
29914 -               req_type == DP_RESOURCE_STATUS_NOTIFY)
29915 +               req_type == DP_RESOURCE_STATUS_NOTIFY ||
29916 +               req_type == DP_CLEAR_PAYLOAD_ID_TABLE)
29917                 hdr->broadcast = 1;
29918         else
29919                 hdr->broadcast = 0;
29920         hdr->path_msg = txmsg->path_msg;
29921 -       hdr->lct = mstb->lct;
29922 -       hdr->lcr = mstb->lct - 1;
29923 -       if (mstb->lct > 1)
29924 -               memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
29925 +       if (hdr->broadcast) {
29926 +               hdr->lct = 1;
29927 +               hdr->lcr = 6;
29928 +       } else {
29929 +               hdr->lct = mstb->lct;
29930 +               hdr->lcr = mstb->lct - 1;
29931 +       }
29933 +       memcpy(hdr->rad, mstb->rad, hdr->lct / 2);
29935         return 0;
29937 diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
29938 index 58f5dc2f6dd5..f6bdec7fa925 100644
29939 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
29940 +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
29941 @@ -84,6 +84,13 @@ static const struct drm_dmi_panel_orientation_data itworks_tw891 = {
29942         .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
29943  };
29945 +static const struct drm_dmi_panel_orientation_data onegx1_pro = {
29946 +       .width = 1200,
29947 +       .height = 1920,
29948 +       .bios_dates = (const char * const []){ "12/17/2020", NULL },
29949 +       .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
29952  static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = {
29953         .width = 720,
29954         .height = 1280,
29955 @@ -211,6 +218,13 @@ static const struct dmi_system_id orientation_data[] = {
29956                   DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
29957                 },
29958                 .driver_data = (void *)&lcd1200x1920_rightside_up,
29959 +       }, {    /* OneGX1 Pro */
29960 +               .matches = {
29961 +                 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SYSTEM_MANUFACTURER"),
29962 +                 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SYSTEM_PRODUCT_NAME"),
29963 +                 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Default string"),
29964 +               },
29965 +               .driver_data = (void *)&onegx1_pro,
29966         }, {    /* VIOS LTH17 */
29967                 .matches = {
29968                   DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),
29969 diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
29970 index ad59a51eab6d..e7e1ee2aa352 100644
29971 --- a/drivers/gpu/drm/drm_probe_helper.c
29972 +++ b/drivers/gpu/drm/drm_probe_helper.c
29973 @@ -624,6 +624,7 @@ static void output_poll_execute(struct work_struct *work)
29974         struct drm_connector_list_iter conn_iter;
29975         enum drm_connector_status old_status;
29976         bool repoll = false, changed;
29977 +       u64 old_epoch_counter;
29979         if (!dev->mode_config.poll_enabled)
29980                 return;
29981 @@ -660,8 +661,9 @@ static void output_poll_execute(struct work_struct *work)
29983                 repoll = true;
29985 +               old_epoch_counter = connector->epoch_counter;
29986                 connector->status = drm_helper_probe_detect(connector, NULL, false);
29987 -               if (old_status != connector->status) {
29988 +               if (old_epoch_counter != connector->epoch_counter) {
29989                         const char *old, *new;
29991                         /*
29992 @@ -690,6 +692,9 @@ static void output_poll_execute(struct work_struct *work)
29993                                       connector->base.id,
29994                                       connector->name,
29995                                       old, new);
29996 +                       DRM_DEBUG_KMS("[CONNECTOR:%d:%s] epoch counter %llu -> %llu\n",
29997 +                                     connector->base.id, connector->name,
29998 +                                     old_epoch_counter, connector->epoch_counter);
30000                         changed = true;
30001                 }
30002 diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
30003 index 775d89b6c3fc..97a785aa8839 100644
30004 --- a/drivers/gpu/drm/i915/display/intel_dp.c
30005 +++ b/drivers/gpu/drm/i915/display/intel_dp.c
30006 @@ -1174,44 +1174,6 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
30007         return -EINVAL;
30010 -/* Optimize link config in order: max bpp, min lanes, min clock */
30011 -static int
30012 -intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
30013 -                                 struct intel_crtc_state *pipe_config,
30014 -                                 const struct link_config_limits *limits)
30016 -       const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
30017 -       int bpp, clock, lane_count;
30018 -       int mode_rate, link_clock, link_avail;
30020 -       for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
30021 -               int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
30023 -               mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
30024 -                                                  output_bpp);
30026 -               for (lane_count = limits->min_lane_count;
30027 -                    lane_count <= limits->max_lane_count;
30028 -                    lane_count <<= 1) {
30029 -                       for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
30030 -                               link_clock = intel_dp->common_rates[clock];
30031 -                               link_avail = intel_dp_max_data_rate(link_clock,
30032 -                                                                   lane_count);
30034 -                               if (mode_rate <= link_avail) {
30035 -                                       pipe_config->lane_count = lane_count;
30036 -                                       pipe_config->pipe_bpp = bpp;
30037 -                                       pipe_config->port_clock = link_clock;
30039 -                                       return 0;
30040 -                               }
30041 -                       }
30042 -               }
30043 -       }
30045 -       return -EINVAL;
30048  static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
30050         int i, num_bpc;
30051 @@ -1461,22 +1423,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
30052             intel_dp_can_bigjoiner(intel_dp))
30053                 pipe_config->bigjoiner = true;
30055 -       if (intel_dp_is_edp(intel_dp))
30056 -               /*
30057 -                * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
30058 -                * section A.1: "It is recommended that the minimum number of
30059 -                * lanes be used, using the minimum link rate allowed for that
30060 -                * lane configuration."
30061 -                *
30062 -                * Note that we fall back to the max clock and lane count for eDP
30063 -                * panels that fail with the fast optimal settings (see
30064 -                * intel_dp->use_max_params), in which case the fast vs. wide
30065 -                * choice doesn't matter.
30066 -                */
30067 -               ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config, &limits);
30068 -       else
30069 -               /* Optimize for slow and wide. */
30070 -               ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
30071 +       /*
30072 +        * Optimize for slow and wide for everything, because there are some
30073 +        * eDP 1.3 and 1.4 panels don't work well with fast and narrow.
30074 +        */
30075 +       ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
30077         /* enable compression if the mode doesn't fit available BW */
30078         drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
30079 @@ -4537,7 +4488,18 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
30080         drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
30082         for (;;) {
30083 -               u8 esi[DP_DPRX_ESI_LEN] = {};
30084 +               /*
30085 +                * The +2 is because DP_DPRX_ESI_LEN is 14, but we then
30086 +                * pass in "esi+10" to drm_dp_channel_eq_ok(), which
30087 +                * takes a 6-byte array. So we actually need 16 bytes
30088 +                * here.
30089 +                *
30090 +                * Somebody who knows what the limits actually are
30091 +                * should check this, but for now this is at least
30092 +                * harmless and avoids a valid compiler warning about
30093 +                * using more of the array than we have allocated.
30094 +                */
30095 +               u8 esi[DP_DPRX_ESI_LEN+2] = {};
30096                 bool handled;
30097                 int retry;
30099 diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
30100 index f455040fa989..7cbc81da80b7 100644
30101 --- a/drivers/gpu/drm/i915/display/intel_overlay.c
30102 +++ b/drivers/gpu/drm/i915/display/intel_overlay.c
30103 @@ -383,7 +383,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
30104                 i830_overlay_clock_gating(dev_priv, true);
30107 -static void
30108 +__i915_active_call static void
30109  intel_overlay_last_flip_retire(struct i915_active *active)
30111         struct intel_overlay *overlay =
30112 diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
30113 index ec28a6cde49b..0b2434e29d00 100644
30114 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
30115 +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
30116 @@ -189,7 +189,7 @@ compute_partial_view(const struct drm_i915_gem_object *obj,
30117         struct i915_ggtt_view view;
30119         if (i915_gem_object_is_tiled(obj))
30120 -               chunk = roundup(chunk, tile_row_pages(obj));
30121 +               chunk = roundup(chunk, tile_row_pages(obj) ?: 1);
30123         view.type = I915_GGTT_VIEW_PARTIAL;
30124         view.partial.offset = rounddown(page_offset, chunk);
30125 diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
30126 index 755522ced60d..3ae16945bd43 100644
30127 --- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
30128 +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
30129 @@ -630,7 +630,6 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
30131                 err = pin_pt_dma(vm, pde->pt.base);
30132                 if (err) {
30133 -                       i915_gem_object_put(pde->pt.base);
30134                         free_pd(vm, pde);
30135                         return err;
30136                 }
30137 diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
30138 index 67de2b189598..4b09490c20c0 100644
30139 --- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
30140 +++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
30141 @@ -670,8 +670,8 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
30142                  * banks of memory are paired and unswizzled on the
30143                  * uneven portion, so leave that as unknown.
30144                  */
30145 -               if (intel_uncore_read(uncore, C0DRB3) ==
30146 -                   intel_uncore_read(uncore, C1DRB3)) {
30147 +               if (intel_uncore_read16(uncore, C0DRB3) ==
30148 +                   intel_uncore_read16(uncore, C1DRB3)) {
30149                         swizzle_x = I915_BIT_6_SWIZZLE_9_10;
30150                         swizzle_y = I915_BIT_6_SWIZZLE_9;
30151                 }
30152 diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
30153 index d1d8ee4a5f16..57578bf28d77 100644
30154 --- a/drivers/gpu/drm/i915/gvt/gvt.c
30155 +++ b/drivers/gpu/drm/i915/gvt/gvt.c
30156 @@ -126,7 +126,7 @@ static bool intel_get_gvt_attrs(struct attribute_group ***intel_vgpu_type_groups
30157         return true;
30160 -static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
30161 +static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
30163         int i, j;
30164         struct intel_vgpu_type *type;
30165 @@ -144,7 +144,7 @@ static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
30166                 gvt_vgpu_type_groups[i] = group;
30167         }
30169 -       return true;
30170 +       return 0;
30172  unwind:
30173         for (j = 0; j < i; j++) {
30174 @@ -152,7 +152,7 @@ static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
30175                 kfree(group);
30176         }
30178 -       return false;
30179 +       return -ENOMEM;
30182  static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
30183 @@ -360,7 +360,7 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
30184                 goto out_clean_thread;
30186         ret = intel_gvt_init_vgpu_type_groups(gvt);
30187 -       if (ret == false) {
30188 +       if (ret) {
30189                 gvt_err("failed to init vgpu type groups: %d\n", ret);
30190                 goto out_clean_types;
30191         }
30192 diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
30193 index 3bc616cc1ad2..ea660e541c90 100644
30194 --- a/drivers/gpu/drm/i915/i915_active.c
30195 +++ b/drivers/gpu/drm/i915/i915_active.c
30196 @@ -1156,7 +1156,8 @@ static int auto_active(struct i915_active *ref)
30197         return 0;
30200 -static void auto_retire(struct i915_active *ref)
30201 +__i915_active_call static void
30202 +auto_retire(struct i915_active *ref)
30204         i915_active_put(ref);
30206 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
30207 index 8e9cb44e66e5..4ecb813c9bc7 100644
30208 --- a/drivers/gpu/drm/i915/i915_drv.c
30209 +++ b/drivers/gpu/drm/i915/i915_drv.c
30210 @@ -1049,6 +1049,8 @@ static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
30211  void i915_driver_shutdown(struct drm_i915_private *i915)
30213         disable_rpm_wakeref_asserts(&i915->runtime_pm);
30214 +       intel_runtime_pm_disable(&i915->runtime_pm);
30215 +       intel_power_domains_disable(i915);
30217         i915_gem_suspend(i915);
30219 @@ -1064,7 +1066,15 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
30220         intel_suspend_encoders(i915);
30221         intel_shutdown_encoders(i915);
30223 +       /*
30224 +        * The only requirement is to reboot with display DC states disabled,
30225 +        * for now leaving all display power wells in the INIT power domain
30226 +        * enabled matching the driver reload sequence.
30227 +        */
30228 +       intel_power_domains_driver_remove(i915);
30229         enable_rpm_wakeref_asserts(&i915->runtime_pm);
30231 +       intel_runtime_pm_driver_release(&i915->runtime_pm);
30234  static bool suspend_to_idle(struct drm_i915_private *dev_priv)
30235 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
30236 index 4b4d8d034782..4ba20f959a71 100644
30237 --- a/drivers/gpu/drm/i915/intel_pm.c
30238 +++ b/drivers/gpu/drm/i915/intel_pm.c
30239 @@ -2993,7 +2993,7 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
30241  static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
30242                                    const char *name,
30243 -                                  const u16 wm[8])
30244 +                                  const u16 wm[])
30246         int level, max_level = ilk_wm_max_level(dev_priv);
30248 diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
30249 index 7bb31fbee29d..fd8870edde0e 100644
30250 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
30251 +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
30252 @@ -554,7 +554,7 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
30253                 height = state->src_h >> 16;
30254                 cpp = state->fb->format->cpp[0];
30256 -               if (priv->soc_info->has_osd && plane->type == DRM_PLANE_TYPE_OVERLAY)
30257 +               if (!priv->soc_info->has_osd || plane->type == DRM_PLANE_TYPE_OVERLAY)
30258                         hwdesc = &priv->dma_hwdescs->hwdesc_f0;
30259                 else
30260                         hwdesc = &priv->dma_hwdescs->hwdesc_f1;
30261 @@ -826,6 +826,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
30262         const struct jz_soc_info *soc_info;
30263         struct ingenic_drm *priv;
30264         struct clk *parent_clk;
30265 +       struct drm_plane *primary;
30266         struct drm_bridge *bridge;
30267         struct drm_panel *panel;
30268         struct drm_encoder *encoder;
30269 @@ -940,9 +941,11 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
30270         if (soc_info->has_osd)
30271                 priv->ipu_plane = drm_plane_from_index(drm, 0);
30273 -       drm_plane_helper_add(&priv->f1, &ingenic_drm_plane_helper_funcs);
30274 +       primary = priv->soc_info->has_osd ? &priv->f1 : &priv->f0;
30276 -       ret = drm_universal_plane_init(drm, &priv->f1, 1,
30277 +       drm_plane_helper_add(primary, &ingenic_drm_plane_helper_funcs);
30279 +       ret = drm_universal_plane_init(drm, primary, 1,
30280                                        &ingenic_drm_primary_plane_funcs,
30281                                        priv->soc_info->formats_f1,
30282                                        priv->soc_info->num_formats_f1,
30283 @@ -954,7 +957,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
30285         drm_crtc_helper_add(&priv->crtc, &ingenic_drm_crtc_helper_funcs);
30287 -       ret = drm_crtc_init_with_planes(drm, &priv->crtc, &priv->f1,
30288 +       ret = drm_crtc_init_with_planes(drm, &priv->crtc, primary,
30289                                         NULL, &ingenic_drm_crtc_funcs, NULL);
30290         if (ret) {
30291                 dev_err(dev, "Failed to init CRTC: %i\n", ret);
30292 diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
30293 index 2314c8122992..b3fd3501c412 100644
30294 --- a/drivers/gpu/drm/mcde/mcde_dsi.c
30295 +++ b/drivers/gpu/drm/mcde/mcde_dsi.c
30296 @@ -760,7 +760,7 @@ static void mcde_dsi_start(struct mcde_dsi *d)
30297                 DSI_MCTL_MAIN_DATA_CTL_BTA_EN |
30298                 DSI_MCTL_MAIN_DATA_CTL_READ_EN |
30299                 DSI_MCTL_MAIN_DATA_CTL_REG_TE_EN;
30300 -       if (d->mdsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET)
30301 +       if (!(d->mdsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET))
30302                 val |= DSI_MCTL_MAIN_DATA_CTL_HOST_EOT_GEN;
30303         writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL);
30305 diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
30306 index 8ee55f9e2954..7fb358167f8d 100644
30307 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
30308 +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
30309 @@ -153,7 +153,7 @@ struct mtk_hdmi_conf {
30310  struct mtk_hdmi {
30311         struct drm_bridge bridge;
30312         struct drm_bridge *next_bridge;
30313 -       struct drm_connector conn;
30314 +       struct drm_connector *curr_conn;/* current connector (only valid when 'enabled') */
30315         struct device *dev;
30316         const struct mtk_hdmi_conf *conf;
30317         struct phy *phy;
30318 @@ -186,11 +186,6 @@ static inline struct mtk_hdmi *hdmi_ctx_from_bridge(struct drm_bridge *b)
30319         return container_of(b, struct mtk_hdmi, bridge);
30322 -static inline struct mtk_hdmi *hdmi_ctx_from_conn(struct drm_connector *c)
30324 -       return container_of(c, struct mtk_hdmi, conn);
30327  static u32 mtk_hdmi_read(struct mtk_hdmi *hdmi, u32 offset)
30329         return readl(hdmi->regs + offset);
30330 @@ -974,7 +969,7 @@ static int mtk_hdmi_setup_avi_infoframe(struct mtk_hdmi *hdmi,
30331         ssize_t err;
30333         err = drm_hdmi_avi_infoframe_from_display_mode(&frame,
30334 -                                                      &hdmi->conn, mode);
30335 +                                                      hdmi->curr_conn, mode);
30336         if (err < 0) {
30337                 dev_err(hdmi->dev,
30338                         "Failed to get AVI infoframe from mode: %zd\n", err);
30339 @@ -1054,7 +1049,7 @@ static int mtk_hdmi_setup_vendor_specific_infoframe(struct mtk_hdmi *hdmi,
30340         ssize_t err;
30342         err = drm_hdmi_vendor_infoframe_from_display_mode(&frame,
30343 -                                                         &hdmi->conn, mode);
30344 +                                                         hdmi->curr_conn, mode);
30345         if (err) {
30346                 dev_err(hdmi->dev,
30347                         "Failed to get vendor infoframe from mode: %zd\n", err);
30348 @@ -1201,48 +1196,16 @@ mtk_hdmi_update_plugged_status(struct mtk_hdmi *hdmi)
30349                connector_status_connected : connector_status_disconnected;
30352 -static enum drm_connector_status hdmi_conn_detect(struct drm_connector *conn,
30353 -                                                 bool force)
30354 +static enum drm_connector_status mtk_hdmi_detect(struct mtk_hdmi *hdmi)
30356 -       struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
30357         return mtk_hdmi_update_plugged_status(hdmi);
30360 -static void hdmi_conn_destroy(struct drm_connector *conn)
30362 -       struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
30364 -       mtk_cec_set_hpd_event(hdmi->cec_dev, NULL, NULL);
30366 -       drm_connector_cleanup(conn);
30369 -static int mtk_hdmi_conn_get_modes(struct drm_connector *conn)
30371 -       struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
30372 -       struct edid *edid;
30373 -       int ret;
30375 -       if (!hdmi->ddc_adpt)
30376 -               return -ENODEV;
30378 -       edid = drm_get_edid(conn, hdmi->ddc_adpt);
30379 -       if (!edid)
30380 -               return -ENODEV;
30382 -       hdmi->dvi_mode = !drm_detect_monitor_audio(edid);
30384 -       drm_connector_update_edid_property(conn, edid);
30386 -       ret = drm_add_edid_modes(conn, edid);
30387 -       kfree(edid);
30388 -       return ret;
30391 -static int mtk_hdmi_conn_mode_valid(struct drm_connector *conn,
30392 -                                   struct drm_display_mode *mode)
30393 +static int mtk_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
30394 +                                     const struct drm_display_info *info,
30395 +                                     const struct drm_display_mode *mode)
30397 -       struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
30398 +       struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
30399         struct drm_bridge *next_bridge;
30401         dev_dbg(hdmi->dev, "xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n",
30402 @@ -1267,74 +1230,57 @@ static int mtk_hdmi_conn_mode_valid(struct drm_connector *conn,
30403         return drm_mode_validate_size(mode, 0x1fff, 0x1fff);
30406 -static struct drm_encoder *mtk_hdmi_conn_best_enc(struct drm_connector *conn)
30408 -       struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
30410 -       return hdmi->bridge.encoder;
30413 -static const struct drm_connector_funcs mtk_hdmi_connector_funcs = {
30414 -       .detect = hdmi_conn_detect,
30415 -       .fill_modes = drm_helper_probe_single_connector_modes,
30416 -       .destroy = hdmi_conn_destroy,
30417 -       .reset = drm_atomic_helper_connector_reset,
30418 -       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
30419 -       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
30422 -static const struct drm_connector_helper_funcs
30423 -               mtk_hdmi_connector_helper_funcs = {
30424 -       .get_modes = mtk_hdmi_conn_get_modes,
30425 -       .mode_valid = mtk_hdmi_conn_mode_valid,
30426 -       .best_encoder = mtk_hdmi_conn_best_enc,
30429  static void mtk_hdmi_hpd_event(bool hpd, struct device *dev)
30431         struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
30433 -       if (hdmi && hdmi->bridge.encoder && hdmi->bridge.encoder->dev)
30434 +       if (hdmi && hdmi->bridge.encoder && hdmi->bridge.encoder->dev) {
30435 +               static enum drm_connector_status status;
30437 +               status = mtk_hdmi_detect(hdmi);
30438                 drm_helper_hpd_irq_event(hdmi->bridge.encoder->dev);
30439 +               drm_bridge_hpd_notify(&hdmi->bridge, status);
30440 +       }
30443  /*
30444   * Bridge callbacks
30445   */
30447 +static enum drm_connector_status mtk_hdmi_bridge_detect(struct drm_bridge *bridge)
30449 +       struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
30451 +       return mtk_hdmi_detect(hdmi);
30454 +static struct edid *mtk_hdmi_bridge_get_edid(struct drm_bridge *bridge,
30455 +                                            struct drm_connector *connector)
30457 +       struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
30458 +       struct edid *edid;
30460 +       if (!hdmi->ddc_adpt)
30461 +               return NULL;
30462 +       edid = drm_get_edid(connector, hdmi->ddc_adpt);
30463 +       if (!edid)
30464 +               return NULL;
30465 +       hdmi->dvi_mode = !drm_detect_monitor_audio(edid);
30466 +       return edid;
30469  static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge,
30470                                   enum drm_bridge_attach_flags flags)
30472         struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
30473         int ret;
30475 -       if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
30476 -               DRM_ERROR("Fix bridge driver to make connector optional!");
30477 +       if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
30478 +               DRM_ERROR("%s: The flag DRM_BRIDGE_ATTACH_NO_CONNECTOR must be supplied\n",
30479 +                         __func__);
30480                 return -EINVAL;
30481         }
30483 -       ret = drm_connector_init_with_ddc(bridge->encoder->dev, &hdmi->conn,
30484 -                                         &mtk_hdmi_connector_funcs,
30485 -                                         DRM_MODE_CONNECTOR_HDMIA,
30486 -                                         hdmi->ddc_adpt);
30487 -       if (ret) {
30488 -               dev_err(hdmi->dev, "Failed to initialize connector: %d\n", ret);
30489 -               return ret;
30490 -       }
30491 -       drm_connector_helper_add(&hdmi->conn, &mtk_hdmi_connector_helper_funcs);
30493 -       hdmi->conn.polled = DRM_CONNECTOR_POLL_HPD;
30494 -       hdmi->conn.interlace_allowed = true;
30495 -       hdmi->conn.doublescan_allowed = false;
30497 -       ret = drm_connector_attach_encoder(&hdmi->conn,
30498 -                                               bridge->encoder);
30499 -       if (ret) {
30500 -               dev_err(hdmi->dev,
30501 -                       "Failed to attach connector to encoder: %d\n", ret);
30502 -               return ret;
30503 -       }
30505         if (hdmi->next_bridge) {
30506                 ret = drm_bridge_attach(bridge->encoder, hdmi->next_bridge,
30507                                         bridge, flags);
30508 @@ -1357,7 +1303,8 @@ static bool mtk_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
30509         return true;
30512 -static void mtk_hdmi_bridge_disable(struct drm_bridge *bridge)
30513 +static void mtk_hdmi_bridge_atomic_disable(struct drm_bridge *bridge,
30514 +                                          struct drm_bridge_state *old_bridge_state)
30516         struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
30518 @@ -1368,10 +1315,13 @@ static void mtk_hdmi_bridge_disable(struct drm_bridge *bridge)
30519         clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]);
30520         clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
30522 +       hdmi->curr_conn = NULL;
30524         hdmi->enabled = false;
30527 -static void mtk_hdmi_bridge_post_disable(struct drm_bridge *bridge)
30528 +static void mtk_hdmi_bridge_atomic_post_disable(struct drm_bridge *bridge,
30529 +                                               struct drm_bridge_state *old_state)
30531         struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
30533 @@ -1406,7 +1356,8 @@ static void mtk_hdmi_bridge_mode_set(struct drm_bridge *bridge,
30534         drm_mode_copy(&hdmi->mode, adjusted_mode);
30537 -static void mtk_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
30538 +static void mtk_hdmi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
30539 +                                             struct drm_bridge_state *old_state)
30541         struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
30543 @@ -1426,10 +1377,16 @@ static void mtk_hdmi_send_infoframe(struct mtk_hdmi *hdmi,
30544                 mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode);
30547 -static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge)
30548 +static void mtk_hdmi_bridge_atomic_enable(struct drm_bridge *bridge,
30549 +                                         struct drm_bridge_state *old_state)
30551 +       struct drm_atomic_state *state = old_state->base.state;
30552         struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
30554 +       /* Retrieve the connector through the atomic state. */
30555 +       hdmi->curr_conn = drm_atomic_get_new_connector_for_encoder(state,
30556 +                                                                  bridge->encoder);
30558         mtk_hdmi_output_set_display_mode(hdmi, &hdmi->mode);
30559         clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
30560         clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]);
30561 @@ -1440,13 +1397,19 @@ static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge)
30564  static const struct drm_bridge_funcs mtk_hdmi_bridge_funcs = {
30565 +       .mode_valid = mtk_hdmi_bridge_mode_valid,
30566 +       .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
30567 +       .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
30568 +       .atomic_reset = drm_atomic_helper_bridge_reset,
30569         .attach = mtk_hdmi_bridge_attach,
30570         .mode_fixup = mtk_hdmi_bridge_mode_fixup,
30571 -       .disable = mtk_hdmi_bridge_disable,
30572 -       .post_disable = mtk_hdmi_bridge_post_disable,
30573 +       .atomic_disable = mtk_hdmi_bridge_atomic_disable,
30574 +       .atomic_post_disable = mtk_hdmi_bridge_atomic_post_disable,
30575         .mode_set = mtk_hdmi_bridge_mode_set,
30576 -       .pre_enable = mtk_hdmi_bridge_pre_enable,
30577 -       .enable = mtk_hdmi_bridge_enable,
30578 +       .atomic_pre_enable = mtk_hdmi_bridge_atomic_pre_enable,
30579 +       .atomic_enable = mtk_hdmi_bridge_atomic_enable,
30580 +       .detect = mtk_hdmi_bridge_detect,
30581 +       .get_edid = mtk_hdmi_bridge_get_edid,
30582  };
30584  static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
30585 @@ -1662,8 +1625,10 @@ static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf,
30587         struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
30589 -       memcpy(buf, hdmi->conn.eld, min(sizeof(hdmi->conn.eld), len));
30591 +       if (hdmi->enabled)
30592 +               memcpy(buf, hdmi->curr_conn->eld, min(sizeof(hdmi->curr_conn->eld), len));
30593 +       else
30594 +               memset(buf, 0, len);
30595         return 0;
30598 @@ -1755,6 +1720,9 @@ static int mtk_drm_hdmi_probe(struct platform_device *pdev)
30600         hdmi->bridge.funcs = &mtk_hdmi_bridge_funcs;
30601         hdmi->bridge.of_node = pdev->dev.of_node;
30602 +       hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
30603 +                        | DRM_BRIDGE_OP_HPD;
30604 +       hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
30605         drm_bridge_add(&hdmi->bridge);
30607         ret = mtk_hdmi_clk_enable_audio(hdmi);
30608 diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
30609 index 91cf46f84025..3d55e153fa9c 100644
30610 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
30611 +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
30612 @@ -246,7 +246,7 @@ static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
30615  struct a6xx_gmu_oob_bits {
30616 -       int set, ack, set_new, ack_new;
30617 +       int set, ack, set_new, ack_new, clear, clear_new;
30618         const char *name;
30619  };
30621 @@ -260,6 +260,8 @@ static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
30622                 .ack = 24,
30623                 .set_new = 30,
30624                 .ack_new = 31,
30625 +               .clear = 24,
30626 +               .clear_new = 31,
30627         },
30629         [GMU_OOB_PERFCOUNTER_SET] = {
30630 @@ -268,18 +270,22 @@ static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
30631                 .ack = 25,
30632                 .set_new = 28,
30633                 .ack_new = 30,
30634 +               .clear = 25,
30635 +               .clear_new = 29,
30636         },
30638         [GMU_OOB_BOOT_SLUMBER] = {
30639                 .name = "BOOT_SLUMBER",
30640                 .set = 22,
30641                 .ack = 30,
30642 +               .clear = 30,
30643         },
30645         [GMU_OOB_DCVS_SET] = {
30646                 .name = "GPU_DCVS",
30647                 .set = 23,
30648                 .ack = 31,
30649 +               .clear = 31,
30650         },
30651  };
30653 @@ -335,9 +341,9 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
30654                 return;
30656         if (gmu->legacy)
30657 -               bit = a6xx_gmu_oob_bits[state].ack;
30658 +               bit = a6xx_gmu_oob_bits[state].clear;
30659         else
30660 -               bit = a6xx_gmu_oob_bits[state].ack_new;
30661 +               bit = a6xx_gmu_oob_bits[state].clear_new;
30663         gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit);
30665 diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
30666 index d553f62f4eeb..b4d8e1b01ee4 100644
30667 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
30668 +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
30669 @@ -1153,10 +1153,6 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
30671         struct device_node *phandle;
30673 -       a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx");
30674 -       if (IS_ERR(a6xx_gpu->llc_mmio))
30675 -               return;
30677         /*
30678          * There is a different programming path for targets with an mmu500
30679          * attached, so detect if that is the case
30680 @@ -1166,6 +1162,11 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
30681                 of_device_is_compatible(phandle, "arm,mmu-500"));
30682         of_node_put(phandle);
30684 +       if (a6xx_gpu->have_mmu500)
30685 +               a6xx_gpu->llc_mmio = NULL;
30686 +       else
30687 +               a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx");
30689         a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
30690         a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
30692 diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
30693 index 189f3533525c..e4444452759c 100644
30694 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
30695 +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
30696 @@ -22,7 +22,7 @@
30697         (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED4))
30699  #define VIG_SM8250_MASK \
30700 -       (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3LITE))
30701 +       (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3LITE))
30703  #define DMA_SDM845_MASK \
30704         (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
30705 diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
30706 index ff2c1d583c79..0392d4dfe270 100644
30707 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
30708 +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
30709 @@ -20,7 +20,7 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
30711         struct mdp5_kms *mdp5_kms = get_kms(encoder);
30712         struct device *dev = encoder->dev->dev;
30713 -       u32 total_lines_x100, vclks_line, cfg;
30714 +       u32 total_lines, vclks_line, cfg;
30715         long vsync_clk_speed;
30716         struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
30717         int pp_id = mixer->pp;
30718 @@ -30,8 +30,8 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
30719                 return -EINVAL;
30720         }
30722 -       total_lines_x100 = mode->vtotal * drm_mode_vrefresh(mode);
30723 -       if (!total_lines_x100) {
30724 +       total_lines = mode->vtotal * drm_mode_vrefresh(mode);
30725 +       if (!total_lines) {
30726                 DRM_DEV_ERROR(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
30727                               __func__, mode->vtotal, drm_mode_vrefresh(mode));
30728                 return -EINVAL;
30729 @@ -43,15 +43,23 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
30730                                                         vsync_clk_speed);
30731                 return -EINVAL;
30732         }
30733 -       vclks_line = vsync_clk_speed * 100 / total_lines_x100;
30734 +       vclks_line = vsync_clk_speed / total_lines;
30736         cfg = MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN
30737                 | MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN;
30738         cfg |= MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(vclks_line);
30740 +       /*
30741 +        * Tearcheck emits a blanking signal every vclks_line * vtotal * 2 ticks on
30742 +        * the vsync_clk equating to roughly half the desired panel refresh rate.
30743 +        * This is only necessary as stability fallback if interrupts from the
30744 +        * panel arrive too late or not at all, but is currently used by default
30745 +        * because these panel interrupts are not wired up yet.
30746 +        */
30747         mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_CONFIG_VSYNC(pp_id), cfg);
30748         mdp5_write(mdp5_kms,
30749 -               REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), 0xfff0);
30750 +               REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), (2 * mode->vtotal));
30752         mdp5_write(mdp5_kms,
30753                 REG_MDP5_PP_VSYNC_INIT_VAL(pp_id), mode->vdisplay);
30754         mdp5_write(mdp5_kms, REG_MDP5_PP_RD_PTR_IRQ(pp_id), mode->vdisplay + 1);
30755 diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
30756 index 82a8673ab8da..d7e4a39a904e 100644
30757 --- a/drivers/gpu/drm/msm/dp/dp_audio.c
30758 +++ b/drivers/gpu/drm/msm/dp/dp_audio.c
30759 @@ -527,6 +527,7 @@ int dp_audio_hw_params(struct device *dev,
30760         dp_audio_setup_acr(audio);
30761         dp_audio_safe_to_exit_level(audio);
30762         dp_audio_enable(audio, true);
30763 +       dp_display_signal_audio_start(dp_display);
30764         dp_display->audio_enabled = true;
30766  end:
30767 diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
30768 index 5a39da6e1eaf..1784e119269b 100644
30769 --- a/drivers/gpu/drm/msm/dp/dp_display.c
30770 +++ b/drivers/gpu/drm/msm/dp/dp_display.c
30771 @@ -178,6 +178,15 @@ static int dp_del_event(struct dp_display_private *dp_priv, u32 event)
30772         return 0;
30775 +void dp_display_signal_audio_start(struct msm_dp *dp_display)
30777 +       struct dp_display_private *dp;
30779 +       dp = container_of(dp_display, struct dp_display_private, dp_display);
30781 +       reinit_completion(&dp->audio_comp);
30784  void dp_display_signal_audio_complete(struct msm_dp *dp_display)
30786         struct dp_display_private *dp;
30787 @@ -586,10 +595,8 @@ static int dp_connect_pending_timeout(struct dp_display_private *dp, u32 data)
30788         mutex_lock(&dp->event_mutex);
30790         state = dp->hpd_state;
30791 -       if (state == ST_CONNECT_PENDING) {
30792 -               dp_display_enable(dp, 0);
30793 +       if (state == ST_CONNECT_PENDING)
30794                 dp->hpd_state = ST_CONNECTED;
30795 -       }
30797         mutex_unlock(&dp->event_mutex);
30799 @@ -651,7 +658,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
30800         dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
30802         /* signal the disconnect event early to ensure proper teardown */
30803 -       reinit_completion(&dp->audio_comp);
30804         dp_display_handle_plugged_change(g_dp_display, false);
30806         dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK |
30807 @@ -669,10 +675,8 @@ static int dp_disconnect_pending_timeout(struct dp_display_private *dp, u32 data
30808         mutex_lock(&dp->event_mutex);
30810         state =  dp->hpd_state;
30811 -       if (state == ST_DISCONNECT_PENDING) {
30812 -               dp_display_disable(dp, 0);
30813 +       if (state == ST_DISCONNECT_PENDING)
30814                 dp->hpd_state = ST_DISCONNECTED;
30815 -       }
30817         mutex_unlock(&dp->event_mutex);
30819 @@ -898,7 +902,6 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
30820         /* wait only if audio was enabled */
30821         if (dp_display->audio_enabled) {
30822                 /* signal the disconnect event */
30823 -               reinit_completion(&dp->audio_comp);
30824                 dp_display_handle_plugged_change(dp_display, false);
30825                 if (!wait_for_completion_timeout(&dp->audio_comp,
30826                                 HZ * 5))
30827 @@ -1272,7 +1275,12 @@ static int dp_pm_resume(struct device *dev)
30829         status = dp_catalog_link_is_connected(dp->catalog);
30831 -       if (status)
30832 +       /*
30833 +        * can not declared display is connected unless
30834 +        * HDMI cable is plugged in and sink_count of
30835 +        * dongle become 1
30836 +        */
30837 +       if (status && dp->link->sink_count)
30838                 dp->dp_display.is_connected = true;
30839         else
30840                 dp->dp_display.is_connected = false;
30841 diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
30842 index 6092ba1ed85e..5173c89eedf7 100644
30843 --- a/drivers/gpu/drm/msm/dp/dp_display.h
30844 +++ b/drivers/gpu/drm/msm/dp/dp_display.h
30845 @@ -34,6 +34,7 @@ int dp_display_get_modes(struct msm_dp *dp_display,
30846  int dp_display_request_irq(struct msm_dp *dp_display);
30847  bool dp_display_check_video_test(struct msm_dp *dp_display);
30848  int dp_display_get_test_bpp(struct msm_dp *dp_display);
30849 +void dp_display_signal_audio_start(struct msm_dp *dp_display);
30850  void dp_display_signal_audio_complete(struct msm_dp *dp_display);
30852  #endif /* _DP_DISPLAY_H_ */
30853 diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.c b/drivers/gpu/drm/msm/dp/dp_hpd.c
30854 index 5b8fe32022b5..e1c90fa47411 100644
30855 --- a/drivers/gpu/drm/msm/dp/dp_hpd.c
30856 +++ b/drivers/gpu/drm/msm/dp/dp_hpd.c
30857 @@ -34,8 +34,8 @@ int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd)
30859         dp_usbpd->hpd_high = hpd;
30861 -       if (!hpd_priv->dp_cb && !hpd_priv->dp_cb->configure
30862 -                               && !hpd_priv->dp_cb->disconnect) {
30863 +       if (!hpd_priv->dp_cb || !hpd_priv->dp_cb->configure
30864 +                               || !hpd_priv->dp_cb->disconnect) {
30865                 pr_err("hpd dp_cb not initialized\n");
30866                 return -EINVAL;
30867         }
30868 diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
30869 index 85ad0babc326..d611cc8e54a4 100644
30870 --- a/drivers/gpu/drm/msm/msm_debugfs.c
30871 +++ b/drivers/gpu/drm/msm/msm_debugfs.c
30872 @@ -111,23 +111,15 @@ static const struct file_operations msm_gpu_fops = {
30873  static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
30875         struct msm_drm_private *priv = dev->dev_private;
30876 -       struct msm_gpu *gpu = priv->gpu;
30877         int ret;
30879 -       ret = mutex_lock_interruptible(&priv->mm_lock);
30880 +       ret = mutex_lock_interruptible(&priv->obj_lock);
30881         if (ret)
30882                 return ret;
30884 -       if (gpu) {
30885 -               seq_printf(m, "Active Objects (%s):\n", gpu->name);
30886 -               msm_gem_describe_objects(&gpu->active_list, m);
30887 -       }
30889 -       seq_printf(m, "Inactive Objects:\n");
30890 -       msm_gem_describe_objects(&priv->inactive_dontneed, m);
30891 -       msm_gem_describe_objects(&priv->inactive_willneed, m);
30892 +       msm_gem_describe_objects(&priv->objects, m);
30894 -       mutex_unlock(&priv->mm_lock);
30895 +       mutex_unlock(&priv->obj_lock);
30897         return 0;
30899 diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
30900 index 196907689c82..18ea1c66de71 100644
30901 --- a/drivers/gpu/drm/msm/msm_drv.c
30902 +++ b/drivers/gpu/drm/msm/msm_drv.c
30903 @@ -446,6 +446,9 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
30905         priv->wq = alloc_ordered_workqueue("msm", 0);
30907 +       INIT_LIST_HEAD(&priv->objects);
30908 +       mutex_init(&priv->obj_lock);
30910         INIT_LIST_HEAD(&priv->inactive_willneed);
30911         INIT_LIST_HEAD(&priv->inactive_dontneed);
30912         mutex_init(&priv->mm_lock);
30913 diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
30914 index 591c47a654e8..6b58e49754cb 100644
30915 --- a/drivers/gpu/drm/msm/msm_drv.h
30916 +++ b/drivers/gpu/drm/msm/msm_drv.h
30917 @@ -174,7 +174,14 @@ struct msm_drm_private {
30918         struct msm_rd_state *hangrd;   /* debugfs to dump hanging submits */
30919         struct msm_perf_state *perf;
30921 -       /*
30922 +       /**
30923 +        * List of all GEM objects (mainly for debugfs, protected by obj_lock
30924 +        * (acquire before per GEM object lock)
30925 +        */
30926 +       struct list_head objects;
30927 +       struct mutex obj_lock;
30929 +       /**
30930          * Lists of inactive GEM objects.  Every bo is either in one of the
30931          * inactive lists (depending on whether or not it is shrinkable) or
30932          * gpu->active_list (for the gpu it is active on[1])
30933 diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
30934 index f091c1e164fa..aeba3eb8ce46 100644
30935 --- a/drivers/gpu/drm/msm/msm_gem.c
30936 +++ b/drivers/gpu/drm/msm/msm_gem.c
30937 @@ -951,7 +951,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
30938         size_t size = 0;
30940         seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
30941 -       list_for_each_entry(msm_obj, list, mm_list) {
30942 +       list_for_each_entry(msm_obj, list, node) {
30943                 struct drm_gem_object *obj = &msm_obj->base;
30944                 seq_puts(m, "   ");
30945                 msm_gem_describe(obj, m);
30946 @@ -970,6 +970,10 @@ void msm_gem_free_object(struct drm_gem_object *obj)
30947         struct drm_device *dev = obj->dev;
30948         struct msm_drm_private *priv = dev->dev_private;
30950 +       mutex_lock(&priv->obj_lock);
30951 +       list_del(&msm_obj->node);
30952 +       mutex_unlock(&priv->obj_lock);
30954         mutex_lock(&priv->mm_lock);
30955         list_del(&msm_obj->mm_list);
30956         mutex_unlock(&priv->mm_lock);
30957 @@ -1157,6 +1161,10 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
30958         list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
30959         mutex_unlock(&priv->mm_lock);
30961 +       mutex_lock(&priv->obj_lock);
30962 +       list_add_tail(&msm_obj->node, &priv->objects);
30963 +       mutex_unlock(&priv->obj_lock);
30965         return obj;
30967  fail:
30968 @@ -1227,6 +1235,10 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
30969         list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
30970         mutex_unlock(&priv->mm_lock);
30972 +       mutex_lock(&priv->obj_lock);
30973 +       list_add_tail(&msm_obj->node, &priv->objects);
30974 +       mutex_unlock(&priv->obj_lock);
30976         return obj;
30978  fail:
30979 diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
30980 index b3a0a880cbab..99d4c0e9465e 100644
30981 --- a/drivers/gpu/drm/msm/msm_gem.h
30982 +++ b/drivers/gpu/drm/msm/msm_gem.h
30983 @@ -55,8 +55,16 @@ struct msm_gem_object {
30984          */
30985         uint8_t vmap_count;
30987 -       /* And object is either:
30988 -        *  inactive - on priv->inactive_list
30989 +       /**
30990 +        * Node in list of all objects (mainly for debugfs, protected by
30991 +        * priv->obj_lock
30992 +        */
30993 +       struct list_head node;
30995 +       /**
30996 +        * An object is either:
30997 +        *  inactive - on priv->inactive_dontneed or priv->inactive_willneed
30998 +        *     (depending on purgability status)
30999          *  active   - on one one of the gpu's active_list..  well, at
31000          *     least for now we don't have (I don't think) hw sync between
31001          *     2d and 3d one devices which have both, meaning we need to
31002 diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
31003 index b31d750c425a..5f1722b040f4 100644
31004 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c
31005 +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
31006 @@ -4327,7 +4327,8 @@ static int omap_dsi_register_te_irq(struct dsi_data *dsi,
31007         irq_set_status_flags(te_irq, IRQ_NOAUTOEN);
31009         err = request_threaded_irq(te_irq, NULL, omap_dsi_te_irq_handler,
31010 -                                  IRQF_TRIGGER_RISING, "TE", dsi);
31011 +                                  IRQF_TRIGGER_RISING | IRQF_ONESHOT,
31012 +                                  "TE", dsi);
31013         if (err) {
31014                 dev_err(dsi->dev, "request irq failed with %d\n", err);
31015                 gpiod_put(dsi->te_gpio);
31016 diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
31017 index b9a0e56f33e2..ef70140c5b09 100644
31018 --- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c
31019 +++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
31020 @@ -898,8 +898,7 @@ static int nt35510_probe(struct mipi_dsi_device *dsi)
31021          */
31022         dsi->hs_rate = 349440000;
31023         dsi->lp_rate = 9600000;
31024 -       dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS |
31025 -               MIPI_DSI_MODE_EOT_PACKET;
31026 +       dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
31028         /*
31029          * Every new incarnation of this display must have a unique
31030 diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
31031 index 4aac0d1573dd..70560cac53a9 100644
31032 --- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
31033 +++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
31034 @@ -184,9 +184,7 @@ static int s6d16d0_probe(struct mipi_dsi_device *dsi)
31035          * As we only send commands we do not need to be continuously
31036          * clocked.
31037          */
31038 -       dsi->mode_flags =
31039 -               MIPI_DSI_CLOCK_NON_CONTINUOUS |
31040 -               MIPI_DSI_MODE_EOT_PACKET;
31041 +       dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
31043         s6->supply = devm_regulator_get(dev, "vdd1");
31044         if (IS_ERR(s6->supply))
31045 diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
31046 index eec74c10ddda..9c3563c61e8c 100644
31047 --- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
31048 +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
31049 @@ -97,7 +97,6 @@ static int s6e63m0_dsi_probe(struct mipi_dsi_device *dsi)
31050         dsi->hs_rate = 349440000;
31051         dsi->lp_rate = 9600000;
31052         dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
31053 -               MIPI_DSI_MODE_EOT_PACKET |
31054                 MIPI_DSI_MODE_VIDEO_BURST;
31056         ret = s6e63m0_probe(dev, s6e63m0_dsi_dcs_read, s6e63m0_dsi_dcs_write,
31057 diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
31058 index 4e2dad314c79..e8b1a0e873ea 100644
31059 --- a/drivers/gpu/drm/panel/panel-simple.c
31060 +++ b/drivers/gpu/drm/panel/panel-simple.c
31061 @@ -406,7 +406,7 @@ static int panel_simple_prepare(struct drm_panel *panel)
31062                 if (IS_ERR(p->hpd_gpio)) {
31063                         err = panel_simple_get_hpd_gpio(panel->dev, p, false);
31064                         if (err)
31065 -                               return err;
31066 +                               goto error;
31067                 }
31069                 err = readx_poll_timeout(gpiod_get_value_cansleep, p->hpd_gpio,
31070 @@ -418,13 +418,20 @@ static int panel_simple_prepare(struct drm_panel *panel)
31071                 if (err) {
31072                         dev_err(panel->dev,
31073                                 "error waiting for hpd GPIO: %d\n", err);
31074 -                       return err;
31075 +                       goto error;
31076                 }
31077         }
31079         p->prepared_time = ktime_get();
31081         return 0;
31083 +error:
31084 +       gpiod_set_value_cansleep(p->enable_gpio, 0);
31085 +       regulator_disable(p->supply);
31086 +       p->unprepared_time = ktime_get();
31088 +       return err;
31091  static int panel_simple_enable(struct drm_panel *panel)
31092 diff --git a/drivers/gpu/drm/panel/panel-sony-acx424akp.c b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
31093 index 065efae213f5..95659a4d15e9 100644
31094 --- a/drivers/gpu/drm/panel/panel-sony-acx424akp.c
31095 +++ b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
31096 @@ -449,8 +449,7 @@ static int acx424akp_probe(struct mipi_dsi_device *dsi)
31097                         MIPI_DSI_MODE_VIDEO_BURST;
31098         else
31099                 dsi->mode_flags =
31100 -                       MIPI_DSI_CLOCK_NON_CONTINUOUS |
31101 -                       MIPI_DSI_MODE_EOT_PACKET;
31102 +                       MIPI_DSI_CLOCK_NON_CONTINUOUS;
31104         acx->supply = devm_regulator_get(dev, "vddi");
31105         if (IS_ERR(acx->supply))
31106 diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
31107 index 7c1b3481b785..21e552d1ac71 100644
31108 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
31109 +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
31110 @@ -488,8 +488,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
31111                 }
31112                 bo->base.pages = pages;
31113                 bo->base.pages_use_count = 1;
31114 -       } else
31115 +       } else {
31116                 pages = bo->base.pages;
31117 +               if (pages[page_offset]) {
31118 +                       /* Pages are already mapped, bail out. */
31119 +                       mutex_unlock(&bo->base.pages_lock);
31120 +                       goto out;
31121 +               }
31122 +       }
31124         mapping = bo->base.base.filp->f_mapping;
31125         mapping_set_unevictable(mapping);
31126 @@ -522,6 +528,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
31128         dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
31130 +out:
31131         panfrost_gem_mapping_put(bomapping);
31133         return 0;
31134 @@ -593,6 +600,8 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
31135                 access_type = (fault_status >> 8) & 0x3;
31136                 source_id = (fault_status >> 16);
31138 +               mmu_write(pfdev, MMU_INT_CLEAR, mask);
31140                 /* Page fault only */
31141                 ret = -1;
31142                 if ((status & mask) == BIT(i) && (exception_type & 0xF8) == 0xC0)
31143 @@ -616,8 +625,6 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
31144                                 access_type, access_type_name(pfdev, fault_status),
31145                                 source_id);
31147 -               mmu_write(pfdev, MMU_INT_CLEAR, mask);
31149                 status &= ~mask;
31150         }
31152 diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
31153 index 54e3c3a97440..741cc983daf1 100644
31154 --- a/drivers/gpu/drm/qxl/qxl_cmd.c
31155 +++ b/drivers/gpu/drm/qxl/qxl_cmd.c
31156 @@ -268,7 +268,7 @@ int qxl_alloc_bo_reserved(struct qxl_device *qdev,
31157         int ret;
31159         ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
31160 -                           false, QXL_GEM_DOMAIN_VRAM, NULL, &bo);
31161 +                           false, QXL_GEM_DOMAIN_VRAM, 0, NULL, &bo);
31162         if (ret) {
31163                 DRM_ERROR("failed to allocate VRAM BO\n");
31164                 return ret;
31165 diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
31166 index 10738e04c09b..3f432ec8e771 100644
31167 --- a/drivers/gpu/drm/qxl/qxl_display.c
31168 +++ b/drivers/gpu/drm/qxl/qxl_display.c
31169 @@ -798,8 +798,8 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
31170                                 qdev->dumb_shadow_bo = NULL;
31171                         }
31172                         qxl_bo_create(qdev, surf.height * surf.stride,
31173 -                                     true, true, QXL_GEM_DOMAIN_SURFACE, &surf,
31174 -                                     &qdev->dumb_shadow_bo);
31175 +                                     true, true, QXL_GEM_DOMAIN_SURFACE, 0,
31176 +                                     &surf, &qdev->dumb_shadow_bo);
31177                 }
31178                 if (user_bo->shadow != qdev->dumb_shadow_bo) {
31179                         if (user_bo->shadow) {
31180 @@ -1228,6 +1228,10 @@ int qxl_modeset_init(struct qxl_device *qdev)
31182  void qxl_modeset_fini(struct qxl_device *qdev)
31184 +       if (qdev->dumb_shadow_bo) {
31185 +               drm_gem_object_put(&qdev->dumb_shadow_bo->tbo.base);
31186 +               qdev->dumb_shadow_bo = NULL;
31187 +       }
31188         qxl_destroy_monitors_object(qdev);
31189         drm_mode_config_cleanup(&qdev->ddev);
31191 diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
31192 index 48e096285b4c..a08da0bd9098 100644
31193 --- a/drivers/gpu/drm/qxl/qxl_gem.c
31194 +++ b/drivers/gpu/drm/qxl/qxl_gem.c
31195 @@ -55,7 +55,7 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
31196         /* At least align on page size */
31197         if (alignment < PAGE_SIZE)
31198                 alignment = PAGE_SIZE;
31199 -       r = qxl_bo_create(qdev, size, kernel, false, initial_domain, surf, &qbo);
31200 +       r = qxl_bo_create(qdev, size, kernel, false, initial_domain, 0, surf, &qbo);
31201         if (r) {
31202                 if (r != -ERESTARTSYS)
31203                         DRM_ERROR(
31204 diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
31205 index ceebc5881f68..a5806667697a 100644
31206 --- a/drivers/gpu/drm/qxl/qxl_object.c
31207 +++ b/drivers/gpu/drm/qxl/qxl_object.c
31208 @@ -103,8 +103,8 @@ static const struct drm_gem_object_funcs qxl_object_funcs = {
31209         .print_info = drm_gem_ttm_print_info,
31210  };
31212 -int qxl_bo_create(struct qxl_device *qdev,
31213 -                 unsigned long size, bool kernel, bool pinned, u32 domain,
31214 +int qxl_bo_create(struct qxl_device *qdev, unsigned long size,
31215 +                 bool kernel, bool pinned, u32 domain, u32 priority,
31216                   struct qxl_surface *surf,
31217                   struct qxl_bo **bo_ptr)
31219 @@ -137,6 +137,7 @@ int qxl_bo_create(struct qxl_device *qdev,
31221         qxl_ttm_placement_from_domain(bo, domain);
31223 +       bo->tbo.priority = priority;
31224         r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, size, type,
31225                                  &bo->placement, 0, &ctx, size,
31226                                  NULL, NULL, &qxl_ttm_bo_destroy);
31227 diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
31228 index e60a8f88e226..dc1659e717f1 100644
31229 --- a/drivers/gpu/drm/qxl/qxl_object.h
31230 +++ b/drivers/gpu/drm/qxl/qxl_object.h
31231 @@ -61,6 +61,7 @@ static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
31232  extern int qxl_bo_create(struct qxl_device *qdev,
31233                          unsigned long size,
31234                          bool kernel, bool pinned, u32 domain,
31235 +                        u32 priority,
31236                          struct qxl_surface *surf,
31237                          struct qxl_bo **bo_ptr);
31238  extern int qxl_bo_kmap(struct qxl_bo *bo, struct dma_buf_map *map);
31239 diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
31240 index b372455e2729..801ce77b1dac 100644
31241 --- a/drivers/gpu/drm/qxl/qxl_release.c
31242 +++ b/drivers/gpu/drm/qxl/qxl_release.c
31243 @@ -199,11 +199,12 @@ qxl_release_free(struct qxl_device *qdev,
31246  static int qxl_release_bo_alloc(struct qxl_device *qdev,
31247 -                               struct qxl_bo **bo)
31248 +                               struct qxl_bo **bo,
31249 +                               u32 priority)
31251         /* pin releases bo's they are too messy to evict */
31252         return qxl_bo_create(qdev, PAGE_SIZE, false, true,
31253 -                            QXL_GEM_DOMAIN_VRAM, NULL, bo);
31254 +                            QXL_GEM_DOMAIN_VRAM, priority, NULL, bo);
31257  int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
31258 @@ -326,13 +327,18 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
31259         int ret = 0;
31260         union qxl_release_info *info;
31261         int cur_idx;
31262 +       u32 priority;
31264 -       if (type == QXL_RELEASE_DRAWABLE)
31265 +       if (type == QXL_RELEASE_DRAWABLE) {
31266                 cur_idx = 0;
31267 -       else if (type == QXL_RELEASE_SURFACE_CMD)
31268 +               priority = 0;
31269 +       } else if (type == QXL_RELEASE_SURFACE_CMD) {
31270                 cur_idx = 1;
31271 -       else if (type == QXL_RELEASE_CURSOR_CMD)
31272 +               priority = 1;
31273 +       } else if (type == QXL_RELEASE_CURSOR_CMD) {
31274                 cur_idx = 2;
31275 +               priority = 1;
31276 +       }
31277         else {
31278                 DRM_ERROR("got illegal type: %d\n", type);
31279                 return -EINVAL;
31280 @@ -352,7 +358,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
31281                 qdev->current_release_bo[cur_idx] = NULL;
31282         }
31283         if (!qdev->current_release_bo[cur_idx]) {
31284 -               ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
31285 +               ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx], priority);
31286                 if (ret) {
31287                         mutex_unlock(&qdev->release_mutex);
31288                         if (free_bo) {
31289 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
31290 index 3effc8c71494..ea44423376c4 100644
31291 --- a/drivers/gpu/drm/radeon/radeon.h
31292 +++ b/drivers/gpu/drm/radeon/radeon.h
31293 @@ -1558,6 +1558,7 @@ struct radeon_dpm {
31294         void                    *priv;
31295         u32                     new_active_crtcs;
31296         int                     new_active_crtc_count;
31297 +       int                     high_pixelclock_count;
31298         u32                     current_active_crtcs;
31299         int                     current_active_crtc_count;
31300         bool single_display;
31301 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
31302 index 42301b4e56f5..28c4413f4dc8 100644
31303 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
31304 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
31305 @@ -2120,11 +2120,14 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
31306                 return state_index;
31307         /* last mode is usually default, array is low to high */
31308         for (i = 0; i < num_modes; i++) {
31309 -               rdev->pm.power_state[state_index].clock_info =
31310 -                       kcalloc(1, sizeof(struct radeon_pm_clock_info),
31311 -                               GFP_KERNEL);
31312 +               /* avoid memory leaks from invalid modes or unknown frev. */
31313 +               if (!rdev->pm.power_state[state_index].clock_info) {
31314 +                       rdev->pm.power_state[state_index].clock_info =
31315 +                               kzalloc(sizeof(struct radeon_pm_clock_info),
31316 +                                       GFP_KERNEL);
31317 +               }
31318                 if (!rdev->pm.power_state[state_index].clock_info)
31319 -                       return state_index;
31320 +                       goto out;
31321                 rdev->pm.power_state[state_index].num_clock_modes = 1;
31322                 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
31323                 switch (frev) {
31324 @@ -2243,17 +2246,24 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
31325                         break;
31326                 }
31327         }
31328 +out:
31329 +       /* free any unused clock_info allocation. */
31330 +       if (state_index && state_index < num_modes) {
31331 +               kfree(rdev->pm.power_state[state_index].clock_info);
31332 +               rdev->pm.power_state[state_index].clock_info = NULL;
31333 +       }
31335         /* last mode is usually default */
31336 -       if (rdev->pm.default_power_state_index == -1) {
31337 +       if (state_index && rdev->pm.default_power_state_index == -1) {
31338                 rdev->pm.power_state[state_index - 1].type =
31339                         POWER_STATE_TYPE_DEFAULT;
31340                 rdev->pm.default_power_state_index = state_index - 1;
31341                 rdev->pm.power_state[state_index - 1].default_clock_mode =
31342                         &rdev->pm.power_state[state_index - 1].clock_info[0];
31343 -               rdev->pm.power_state[state_index].flags &=
31344 +               rdev->pm.power_state[state_index - 1].flags &=
31345                         ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
31346 -               rdev->pm.power_state[state_index].misc = 0;
31347 -               rdev->pm.power_state[state_index].misc2 = 0;
31348 +               rdev->pm.power_state[state_index - 1].misc = 0;
31349 +               rdev->pm.power_state[state_index - 1].misc2 = 0;
31350         }
31351         return state_index;
31353 diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
31354 index 2c32186c4acd..4e4c937c36c6 100644
31355 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
31356 +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
31357 @@ -242,6 +242,9 @@ radeon_dp_mst_detect(struct drm_connector *connector,
31358                 to_radeon_connector(connector);
31359         struct radeon_connector *master = radeon_connector->mst_port;
31361 +       if (drm_connector_is_unregistered(connector))
31362 +               return connector_status_disconnected;
31364         return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
31365                                       radeon_connector->port);
31367 diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
31368 index 2479d6ab7a36..58876bb4ef2a 100644
31369 --- a/drivers/gpu/drm/radeon/radeon_kms.c
31370 +++ b/drivers/gpu/drm/radeon/radeon_kms.c
31371 @@ -518,6 +518,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
31372                         *value = rdev->config.si.backend_enable_mask;
31373                 } else {
31374                         DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
31375 +                       return -EINVAL;
31376                 }
31377                 break;
31378         case RADEON_INFO_MAX_SCLK:
31379 diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
31380 index 9b81786782de..499ce55e34cc 100644
31381 --- a/drivers/gpu/drm/radeon/radeon_object.c
31382 +++ b/drivers/gpu/drm/radeon/radeon_object.c
31383 @@ -384,6 +384,8 @@ int radeon_bo_evict_vram(struct radeon_device *rdev)
31384         }
31385  #endif
31386         man = ttm_manager_type(bdev, TTM_PL_VRAM);
31387 +       if (!man)
31388 +               return 0;
31389         return ttm_resource_manager_evict_all(bdev, man);
31392 diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
31393 index 1995dad59dd0..2db4a8b1542d 100644
31394 --- a/drivers/gpu/drm/radeon/radeon_pm.c
31395 +++ b/drivers/gpu/drm/radeon/radeon_pm.c
31396 @@ -1775,6 +1775,7 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
31397         struct drm_device *ddev = rdev->ddev;
31398         struct drm_crtc *crtc;
31399         struct radeon_crtc *radeon_crtc;
31400 +       struct radeon_connector *radeon_connector;
31402         if (!rdev->pm.dpm_enabled)
31403                 return;
31404 @@ -1784,6 +1785,7 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
31405         /* update active crtc counts */
31406         rdev->pm.dpm.new_active_crtcs = 0;
31407         rdev->pm.dpm.new_active_crtc_count = 0;
31408 +       rdev->pm.dpm.high_pixelclock_count = 0;
31409         if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
31410                 list_for_each_entry(crtc,
31411                                     &ddev->mode_config.crtc_list, head) {
31412 @@ -1791,6 +1793,12 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
31413                         if (crtc->enabled) {
31414                                 rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
31415                                 rdev->pm.dpm.new_active_crtc_count++;
31416 +                               if (!radeon_crtc->connector)
31417 +                                       continue;
31419 +                               radeon_connector = to_radeon_connector(radeon_crtc->connector);
31420 +                               if (radeon_connector->pixelclock_for_modeset > 297000)
31421 +                                       rdev->pm.dpm.high_pixelclock_count++;
31422                         }
31423                 }
31424         }
31425 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
31426 index 78893bea85ae..c0258d213a72 100644
31427 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
31428 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
31429 @@ -485,13 +485,14 @@ static void radeon_ttm_backend_unbind(struct ttm_bo_device *bdev, struct ttm_tt
31430         struct radeon_ttm_tt *gtt = (void *)ttm;
31431         struct radeon_device *rdev = radeon_get_rdev(bdev);
31433 +       if (gtt->userptr)
31434 +               radeon_ttm_tt_unpin_userptr(bdev, ttm);
31436         if (!gtt->bound)
31437                 return;
31439         radeon_gart_unbind(rdev, gtt->offset, ttm->num_pages);
31441 -       if (gtt->userptr)
31442 -               radeon_ttm_tt_unpin_userptr(bdev, ttm);
31443         gtt->bound = false;
31446 diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
31447 index 91bfc4762767..43b63705d073 100644
31448 --- a/drivers/gpu/drm/radeon/si_dpm.c
31449 +++ b/drivers/gpu/drm/radeon/si_dpm.c
31450 @@ -2979,6 +2979,9 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
31451                     (rdev->pdev->device == 0x6605)) {
31452                         max_sclk = 75000;
31453                 }
31455 +               if (rdev->pm.dpm.high_pixelclock_count > 1)
31456 +                       disable_sclk_switching = true;
31457         }
31459         if (rps->vce_active) {
31460 diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
31461 index 7812094f93d6..6f3b523e16e8 100644
31462 --- a/drivers/gpu/drm/stm/ltdc.c
31463 +++ b/drivers/gpu/drm/stm/ltdc.c
31464 @@ -525,13 +525,42 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
31466         struct ltdc_device *ldev = crtc_to_ltdc(crtc);
31467         struct drm_device *ddev = crtc->dev;
31468 +       struct drm_connector_list_iter iter;
31469 +       struct drm_connector *connector = NULL;
31470 +       struct drm_encoder *encoder = NULL;
31471 +       struct drm_bridge *bridge = NULL;
31472         struct drm_display_mode *mode = &crtc->state->adjusted_mode;
31473         struct videomode vm;
31474         u32 hsync, vsync, accum_hbp, accum_vbp, accum_act_w, accum_act_h;
31475         u32 total_width, total_height;
31476 +       u32 bus_flags = 0;
31477         u32 val;
31478         int ret;
31480 +       /* get encoder from crtc */
31481 +       drm_for_each_encoder(encoder, ddev)
31482 +               if (encoder->crtc == crtc)
31483 +                       break;
31485 +       if (encoder) {
31486 +               /* get bridge from encoder */
31487 +               list_for_each_entry(bridge, &encoder->bridge_chain, chain_node)
31488 +                       if (bridge->encoder == encoder)
31489 +                               break;
31491 +               /* Get the connector from encoder */
31492 +               drm_connector_list_iter_begin(ddev, &iter);
31493 +               drm_for_each_connector_iter(connector, &iter)
31494 +                       if (connector->encoder == encoder)
31495 +                               break;
31496 +               drm_connector_list_iter_end(&iter);
31497 +       }
31499 +       if (bridge && bridge->timings)
31500 +               bus_flags = bridge->timings->input_bus_flags;
31501 +       else if (connector)
31502 +               bus_flags = connector->display_info.bus_flags;
31504         if (!pm_runtime_active(ddev->dev)) {
31505                 ret = pm_runtime_get_sync(ddev->dev);
31506                 if (ret) {
31507 @@ -567,10 +596,10 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
31508         if (vm.flags & DISPLAY_FLAGS_VSYNC_HIGH)
31509                 val |= GCR_VSPOL;
31511 -       if (vm.flags & DISPLAY_FLAGS_DE_LOW)
31512 +       if (bus_flags & DRM_BUS_FLAG_DE_LOW)
31513                 val |= GCR_DEPOL;
31515 -       if (vm.flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
31516 +       if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
31517                 val |= GCR_PCPOL;
31519         reg_update_bits(ldev->regs, LTDC_GCR,
31520 diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
31521 index 30213708fc99..d99afd19ca08 100644
31522 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
31523 +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
31524 @@ -515,6 +515,15 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
31526         drm_crtc_vblank_off(crtc);
31528 +       spin_lock_irq(&crtc->dev->event_lock);
31530 +       if (crtc->state->event) {
31531 +               drm_crtc_send_vblank_event(crtc, crtc->state->event);
31532 +               crtc->state->event = NULL;
31533 +       }
31535 +       spin_unlock_irq(&crtc->dev->event_lock);
31537         tilcdc_crtc_disable_irqs(dev);
31539         pm_runtime_put_sync(dev->dev);
31540 diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
31541 index 23eb6d772e40..669f2ee39515 100644
31542 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
31543 +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
31544 @@ -174,7 +174,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
31545                 if (!sync_file) {
31546                         dma_fence_put(&out_fence->f);
31547                         ret = -ENOMEM;
31548 -                       goto out_memdup;
31549 +                       goto out_unresv;
31550                 }
31552                 exbuf->fence_fd = out_fence_fd;
31553 diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
31554 index d69a5b6da553..4ff1ec28e630 100644
31555 --- a/drivers/gpu/drm/virtio/virtgpu_object.c
31556 +++ b/drivers/gpu/drm/virtio/virtgpu_object.c
31557 @@ -248,6 +248,7 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
31559         ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
31560         if (ret != 0) {
31561 +               virtio_gpu_array_put_free(objs);
31562                 virtio_gpu_free_object(&shmem_obj->base);
31563                 return ret;
31564         }
31565 diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
31566 index 0443b7deeaef..758d8a98d96b 100644
31567 --- a/drivers/gpu/drm/vkms/vkms_crtc.c
31568 +++ b/drivers/gpu/drm/vkms/vkms_crtc.c
31569 @@ -18,7 +18,8 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
31571         ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
31572                                           output->period_ns);
31573 -       WARN_ON(ret_overrun != 1);
31574 +       if (ret_overrun != 1)
31575 +               pr_warn("%s: vblank timer overrun\n", __func__);
31577         spin_lock(&output->lock);
31578         ret = drm_crtc_handle_vblank(crtc);
31579 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31580 index 6c2a569f1fcb..8d7feeb0d7ab 100644
31581 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31582 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31583 @@ -201,7 +201,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
31584                         break;
31585                 }
31586                 if (lazy)
31587 -                       schedule_timeout(1);
31588 +                       schedule_min_hrtimeout();
31589                 else if ((++count & 0x0F) == 0) {
31590                         /**
31591                          * FIXME: Use schedule_hr_timeout here for
31592 diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
31593 index 99158ee67d02..59d1fb017da0 100644
31594 --- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
31595 +++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
31596 @@ -866,7 +866,7 @@ static int zynqmp_dp_train(struct zynqmp_dp *dp)
31597                 return ret;
31599         zynqmp_dp_write(dp, ZYNQMP_DP_SCRAMBLING_DISABLE, 1);
31600 -       memset(dp->train_set, 0, 4);
31601 +       memset(dp->train_set, 0, sizeof(dp->train_set));
31602         ret = zynqmp_dp_link_train_cr(dp);
31603         if (ret)
31604                 return ret;
31605 diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
31606 index 67fd8a2f5aba..ba338973e968 100644
31607 --- a/drivers/hid/hid-ids.h
31608 +++ b/drivers/hid/hid-ids.h
31609 @@ -946,6 +946,7 @@
31610  #define USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S   0x8003
31612  #define USB_VENDOR_ID_PLANTRONICS      0x047f
31613 +#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES        0xc056
31615  #define USB_VENDOR_ID_PANASONIC                0x04da
31616  #define USB_DEVICE_ID_PANABOARD_UBT780 0x1044
31617 diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
31618 index c6c8e20f3e8d..0ff03fed9770 100644
31619 --- a/drivers/hid/hid-lenovo.c
31620 +++ b/drivers/hid/hid-lenovo.c
31621 @@ -33,6 +33,9 @@
31623  #include "hid-ids.h"
31625 +/* Userspace expects F20 for mic-mute KEY_MICMUTE does not work */
31626 +#define LENOVO_KEY_MICMUTE KEY_F20
31628  struct lenovo_drvdata {
31629         u8 led_report[3]; /* Must be first for proper alignment */
31630         int led_state;
31631 @@ -62,8 +65,8 @@ struct lenovo_drvdata {
31632  #define TP10UBKBD_LED_OFF              1
31633  #define TP10UBKBD_LED_ON               2
31635 -static void lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
31636 -                                    enum led_brightness value)
31637 +static int lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
31638 +                                   enum led_brightness value)
31640         struct lenovo_drvdata *data = hid_get_drvdata(hdev);
31641         int ret;
31642 @@ -75,10 +78,18 @@ static void lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
31643         data->led_report[2] = value ? TP10UBKBD_LED_ON : TP10UBKBD_LED_OFF;
31644         ret = hid_hw_raw_request(hdev, data->led_report[0], data->led_report, 3,
31645                                  HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
31646 -       if (ret)
31647 -               hid_err(hdev, "Set LED output report error: %d\n", ret);
31648 +       if (ret != 3) {
31649 +               if (ret != -ENODEV)
31650 +                       hid_err(hdev, "Set LED output report error: %d\n", ret);
31652 +               ret = ret < 0 ? ret : -EIO;
31653 +       } else {
31654 +               ret = 0;
31655 +       }
31657         mutex_unlock(&data->led_report_mutex);
31659 +       return ret;
31662  static void lenovo_tp10ubkbd_sync_fn_lock(struct work_struct *work)
31663 @@ -126,7 +137,7 @@ static int lenovo_input_mapping_tpkbd(struct hid_device *hdev,
31664         if (usage->hid == (HID_UP_BUTTON | 0x0010)) {
31665                 /* This sub-device contains trackpoint, mark it */
31666                 hid_set_drvdata(hdev, (void *)1);
31667 -               map_key_clear(KEY_MICMUTE);
31668 +               map_key_clear(LENOVO_KEY_MICMUTE);
31669                 return 1;
31670         }
31671         return 0;
31672 @@ -141,7 +152,7 @@ static int lenovo_input_mapping_cptkbd(struct hid_device *hdev,
31673             (usage->hid & HID_USAGE_PAGE) == HID_UP_LNVENDOR) {
31674                 switch (usage->hid & HID_USAGE) {
31675                 case 0x00f1: /* Fn-F4: Mic mute */
31676 -                       map_key_clear(KEY_MICMUTE);
31677 +                       map_key_clear(LENOVO_KEY_MICMUTE);
31678                         return 1;
31679                 case 0x00f2: /* Fn-F5: Brightness down */
31680                         map_key_clear(KEY_BRIGHTNESSDOWN);
31681 @@ -231,7 +242,7 @@ static int lenovo_input_mapping_tp10_ultrabook_kbd(struct hid_device *hdev,
31682                         map_key_clear(KEY_FN_ESC);
31683                         return 1;
31684                 case 9: /* Fn-F4: Mic mute */
31685 -                       map_key_clear(KEY_MICMUTE);
31686 +                       map_key_clear(LENOVO_KEY_MICMUTE);
31687                         return 1;
31688                 case 10: /* Fn-F7: Control panel */
31689                         map_key_clear(KEY_CONFIG);
31690 @@ -349,7 +360,7 @@ static ssize_t attr_fn_lock_store(struct device *dev,
31692         struct hid_device *hdev = to_hid_device(dev);
31693         struct lenovo_drvdata *data = hid_get_drvdata(hdev);
31694 -       int value;
31695 +       int value, ret;
31697         if (kstrtoint(buf, 10, &value))
31698                 return -EINVAL;
31699 @@ -364,7 +375,9 @@ static ssize_t attr_fn_lock_store(struct device *dev,
31700                 lenovo_features_set_cptkbd(hdev);
31701                 break;
31702         case USB_DEVICE_ID_LENOVO_TP10UBKBD:
31703 -               lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value);
31704 +               ret = lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value);
31705 +               if (ret)
31706 +                       return ret;
31707                 break;
31708         }
31710 @@ -498,6 +511,9 @@ static int lenovo_event_cptkbd(struct hid_device *hdev,
31711  static int lenovo_event(struct hid_device *hdev, struct hid_field *field,
31712                 struct hid_usage *usage, __s32 value)
31714 +       if (!hid_get_drvdata(hdev))
31715 +               return 0;
31717         switch (hdev->product) {
31718         case USB_DEVICE_ID_LENOVO_CUSBKBD:
31719         case USB_DEVICE_ID_LENOVO_CBTKBD:
31720 @@ -777,7 +793,7 @@ static enum led_brightness lenovo_led_brightness_get(
31721                                 : LED_OFF;
31724 -static void lenovo_led_brightness_set(struct led_classdev *led_cdev,
31725 +static int lenovo_led_brightness_set(struct led_classdev *led_cdev,
31726                         enum led_brightness value)
31728         struct device *dev = led_cdev->dev->parent;
31729 @@ -785,6 +801,7 @@ static void lenovo_led_brightness_set(struct led_classdev *led_cdev,
31730         struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
31731         u8 tp10ubkbd_led[] = { TP10UBKBD_MUTE_LED, TP10UBKBD_MICMUTE_LED };
31732         int led_nr = 0;
31733 +       int ret = 0;
31735         if (led_cdev == &data_pointer->led_micmute)
31736                 led_nr = 1;
31737 @@ -799,9 +816,11 @@ static void lenovo_led_brightness_set(struct led_classdev *led_cdev,
31738                 lenovo_led_set_tpkbd(hdev);
31739                 break;
31740         case USB_DEVICE_ID_LENOVO_TP10UBKBD:
31741 -               lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value);
31742 +               ret = lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value);
31743                 break;
31744         }
31746 +       return ret;
31749  static int lenovo_register_leds(struct hid_device *hdev)
31750 @@ -822,7 +841,8 @@ static int lenovo_register_leds(struct hid_device *hdev)
31752         data->led_mute.name = name_mute;
31753         data->led_mute.brightness_get = lenovo_led_brightness_get;
31754 -       data->led_mute.brightness_set = lenovo_led_brightness_set;
31755 +       data->led_mute.brightness_set_blocking = lenovo_led_brightness_set;
31756 +       data->led_mute.flags = LED_HW_PLUGGABLE;
31757         data->led_mute.dev = &hdev->dev;
31758         ret = led_classdev_register(&hdev->dev, &data->led_mute);
31759         if (ret < 0)
31760 @@ -830,7 +850,8 @@ static int lenovo_register_leds(struct hid_device *hdev)
31762         data->led_micmute.name = name_micm;
31763         data->led_micmute.brightness_get = lenovo_led_brightness_get;
31764 -       data->led_micmute.brightness_set = lenovo_led_brightness_set;
31765 +       data->led_micmute.brightness_set_blocking = lenovo_led_brightness_set;
31766 +       data->led_micmute.flags = LED_HW_PLUGGABLE;
31767         data->led_micmute.dev = &hdev->dev;
31768         ret = led_classdev_register(&hdev->dev, &data->led_micmute);
31769         if (ret < 0) {
31770 diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c
31771 index 85b685efc12f..e81b7cec2d12 100644
31772 --- a/drivers/hid/hid-plantronics.c
31773 +++ b/drivers/hid/hid-plantronics.c
31774 @@ -13,6 +13,7 @@
31776  #include <linux/hid.h>
31777  #include <linux/module.h>
31778 +#include <linux/jiffies.h>
31780  #define PLT_HID_1_0_PAGE       0xffa00000
31781  #define PLT_HID_2_0_PAGE       0xffa20000
31782 @@ -36,6 +37,16 @@
31783  #define PLT_ALLOW_CONSUMER (field->application == HID_CP_CONSUMERCONTROL && \
31784                             (usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER)
31786 +#define PLT_QUIRK_DOUBLE_VOLUME_KEYS BIT(0)
31788 +#define PLT_DOUBLE_KEY_TIMEOUT 5 /* ms */
31790 +struct plt_drv_data {
31791 +       unsigned long device_type;
31792 +       unsigned long last_volume_key_ts;
31793 +       u32 quirks;
31796  static int plantronics_input_mapping(struct hid_device *hdev,
31797                                      struct hid_input *hi,
31798                                      struct hid_field *field,
31799 @@ -43,7 +54,8 @@ static int plantronics_input_mapping(struct hid_device *hdev,
31800                                      unsigned long **bit, int *max)
31802         unsigned short mapped_key;
31803 -       unsigned long plt_type = (unsigned long)hid_get_drvdata(hdev);
31804 +       struct plt_drv_data *drv_data = hid_get_drvdata(hdev);
31805 +       unsigned long plt_type = drv_data->device_type;
31807         /* special case for PTT products */
31808         if (field->application == HID_GD_JOYSTICK)
31809 @@ -105,6 +117,30 @@ static int plantronics_input_mapping(struct hid_device *hdev,
31810         return 1;
31813 +static int plantronics_event(struct hid_device *hdev, struct hid_field *field,
31814 +                            struct hid_usage *usage, __s32 value)
31816 +       struct plt_drv_data *drv_data = hid_get_drvdata(hdev);
31818 +       if (drv_data->quirks & PLT_QUIRK_DOUBLE_VOLUME_KEYS) {
31819 +               unsigned long prev_ts, cur_ts;
31821 +               /* Usages are filtered in plantronics_usages. */
31823 +               if (!value) /* Handle key presses only. */
31824 +                       return 0;
31826 +               prev_ts = drv_data->last_volume_key_ts;
31827 +               cur_ts = jiffies;
31828 +               if (jiffies_to_msecs(cur_ts - prev_ts) <= PLT_DOUBLE_KEY_TIMEOUT)
31829 +                       return 1; /* Ignore the repeated key. */
31831 +               drv_data->last_volume_key_ts = cur_ts;
31832 +       }
31834 +       return 0;
31837  static unsigned long plantronics_device_type(struct hid_device *hdev)
31839         unsigned i, col_page;
31840 @@ -133,15 +169,24 @@ static unsigned long plantronics_device_type(struct hid_device *hdev)
31841  static int plantronics_probe(struct hid_device *hdev,
31842                              const struct hid_device_id *id)
31844 +       struct plt_drv_data *drv_data;
31845         int ret;
31847 +       drv_data = devm_kzalloc(&hdev->dev, sizeof(*drv_data), GFP_KERNEL);
31848 +       if (!drv_data)
31849 +               return -ENOMEM;
31851         ret = hid_parse(hdev);
31852         if (ret) {
31853                 hid_err(hdev, "parse failed\n");
31854                 goto err;
31855         }
31857 -       hid_set_drvdata(hdev, (void *)plantronics_device_type(hdev));
31858 +       drv_data->device_type = plantronics_device_type(hdev);
31859 +       drv_data->quirks = id->driver_data;
31860 +       drv_data->last_volume_key_ts = jiffies - msecs_to_jiffies(PLT_DOUBLE_KEY_TIMEOUT);
31862 +       hid_set_drvdata(hdev, drv_data);
31864         ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT |
31865                 HID_CONNECT_HIDINPUT_FORCE | HID_CONNECT_HIDDEV_FORCE);
31866 @@ -153,15 +198,26 @@ static int plantronics_probe(struct hid_device *hdev,
31869  static const struct hid_device_id plantronics_devices[] = {
31870 +       { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
31871 +                                        USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES),
31872 +               .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
31873         { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
31874         { }
31875  };
31876  MODULE_DEVICE_TABLE(hid, plantronics_devices);
31878 +static const struct hid_usage_id plantronics_usages[] = {
31879 +       { HID_CP_VOLUMEUP, EV_KEY, HID_ANY_ID },
31880 +       { HID_CP_VOLUMEDOWN, EV_KEY, HID_ANY_ID },
31881 +       { HID_TERMINATOR, HID_TERMINATOR, HID_TERMINATOR }
31884  static struct hid_driver plantronics_driver = {
31885         .name = "plantronics",
31886         .id_table = plantronics_devices,
31887 +       .usage_table = plantronics_usages,
31888         .input_mapping = plantronics_input_mapping,
31889 +       .event = plantronics_event,
31890         .probe = plantronics_probe,
31891  };
31892  module_hid_driver(plantronics_driver);
31893 diff --git a/drivers/hsi/hsi_core.c b/drivers/hsi/hsi_core.c
31894 index c3fb5beb846e..ec90713564e3 100644
31895 --- a/drivers/hsi/hsi_core.c
31896 +++ b/drivers/hsi/hsi_core.c
31897 @@ -210,8 +210,6 @@ static void hsi_add_client_from_dt(struct hsi_port *port,
31898         if (err)
31899                 goto err;
31901 -       dev_set_name(&cl->device, "%s", name);
31903         err = hsi_of_property_parse_mode(client, "hsi-mode", &mode);
31904         if (err) {
31905                 err = hsi_of_property_parse_mode(client, "hsi-rx-mode",
31906 @@ -293,6 +291,7 @@ static void hsi_add_client_from_dt(struct hsi_port *port,
31907         cl->device.release = hsi_client_release;
31908         cl->device.of_node = client;
31910 +       dev_set_name(&cl->device, "%s", name);
31911         if (device_register(&cl->device) < 0) {
31912                 pr_err("hsi: failed to register client: %s\n", name);
31913                 put_device(&cl->device);
31914 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
31915 index 0bd202de7960..945e41f5e3a8 100644
31916 --- a/drivers/hv/channel.c
31917 +++ b/drivers/hv/channel.c
31918 @@ -653,7 +653,7 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
31920         if (newchannel->rescind) {
31921                 err = -ENODEV;
31922 -               goto error_free_info;
31923 +               goto error_clean_msglist;
31924         }
31926         err = vmbus_post_msg(open_msg,
31927 diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
31928 index f0ed730e2e4e..ecebf1235fd5 100644
31929 --- a/drivers/hv/channel_mgmt.c
31930 +++ b/drivers/hv/channel_mgmt.c
31931 @@ -756,6 +756,12 @@ static void init_vp_index(struct vmbus_channel *channel)
31932         free_cpumask_var(available_mask);
31935 +#define UNLOAD_DELAY_UNIT_MS   10              /* 10 milliseconds */
31936 +#define UNLOAD_WAIT_MS         (100*1000)      /* 100 seconds */
31937 +#define UNLOAD_WAIT_LOOPS      (UNLOAD_WAIT_MS/UNLOAD_DELAY_UNIT_MS)
31938 +#define UNLOAD_MSG_MS          (5*1000)        /* Every 5 seconds */
31939 +#define UNLOAD_MSG_LOOPS       (UNLOAD_MSG_MS/UNLOAD_DELAY_UNIT_MS)
31941  static void vmbus_wait_for_unload(void)
31943         int cpu;
31944 @@ -773,12 +779,17 @@ static void vmbus_wait_for_unload(void)
31945          * vmbus_connection.unload_event. If not, the last thing we can do is
31946          * read message pages for all CPUs directly.
31947          *
31948 -        * Wait no more than 10 seconds so that the panic path can't get
31949 -        * hung forever in case the response message isn't seen.
31950 +        * Wait up to 100 seconds since an Azure host must writeback any dirty
31951 +        * data in its disk cache before the VMbus UNLOAD request will
31952 +        * complete. This flushing has been empirically observed to take up
31953 +        * to 50 seconds in cases with a lot of dirty data, so allow additional
31954 +        * leeway and for inaccuracies in mdelay(). But eventually time out so
31955 +        * that the panic path can't get hung forever in case the response
31956 +        * message isn't seen.
31957          */
31958 -       for (i = 0; i < 1000; i++) {
31959 +       for (i = 1; i <= UNLOAD_WAIT_LOOPS; i++) {
31960                 if (completion_done(&vmbus_connection.unload_event))
31961 -                       break;
31962 +                       goto completed;
31964                 for_each_online_cpu(cpu) {
31965                         struct hv_per_cpu_context *hv_cpu
31966 @@ -801,9 +812,18 @@ static void vmbus_wait_for_unload(void)
31967                         vmbus_signal_eom(msg, message_type);
31968                 }
31970 -               mdelay(10);
31971 +               /*
31972 +                * Give a notice periodically so someone watching the
31973 +                * serial output won't think it is completely hung.
31974 +                */
31975 +               if (!(i % UNLOAD_MSG_LOOPS))
31976 +                       pr_notice("Waiting for VMBus UNLOAD to complete\n");
31978 +               mdelay(UNLOAD_DELAY_UNIT_MS);
31979         }
31980 +       pr_err("Continuing even though VMBus UNLOAD did not complete\n");
31982 +completed:
31983         /*
31984          * We're crashing and already got the UNLOAD_RESPONSE, cleanup all
31985          * maybe-pending messages on all CPUs to be able to receive new
31986 diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
31987 index 35833d4d1a1d..ecd82ebfd5bc 100644
31988 --- a/drivers/hv/ring_buffer.c
31989 +++ b/drivers/hv/ring_buffer.c
31990 @@ -313,7 +313,6 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
31991                 rqst_id = vmbus_next_request_id(&channel->requestor, requestid);
31992                 if (rqst_id == VMBUS_RQST_ERROR) {
31993                         spin_unlock_irqrestore(&outring_info->ring_lock, flags);
31994 -                       pr_err("No request id available\n");
31995                         return -EAGAIN;
31996                 }
31997         }
31998 diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
31999 index 29f5fed28c2a..974cb08c7aa7 100644
32000 --- a/drivers/hwmon/fam15h_power.c
32001 +++ b/drivers/hwmon/fam15h_power.c
32002 @@ -221,7 +221,7 @@ static ssize_t power1_average_show(struct device *dev,
32003                 prev_ptsc[cu] = data->cpu_sw_pwr_ptsc[cu];
32004         }
32006 -       leftover = schedule_timeout_interruptible(msecs_to_jiffies(data->power_period));
32007 +       leftover = schedule_msec_hrtimeout_interruptible((data->power_period));
32008         if (leftover)
32009                 return 0;
32011 diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c
32012 index 4382105bf142..2a4bed0ab226 100644
32013 --- a/drivers/hwmon/ltc2992.c
32014 +++ b/drivers/hwmon/ltc2992.c
32015 @@ -900,11 +900,15 @@ static int ltc2992_parse_dt(struct ltc2992_state *st)
32017         fwnode_for_each_available_child_node(fwnode, child) {
32018                 ret = fwnode_property_read_u32(child, "reg", &addr);
32019 -               if (ret < 0)
32020 +               if (ret < 0) {
32021 +                       fwnode_handle_put(child);
32022                         return ret;
32023 +               }
32025 -               if (addr > 1)
32026 +               if (addr > 1) {
32027 +                       fwnode_handle_put(child);
32028                         return -EINVAL;
32029 +               }
32031                 ret = fwnode_property_read_u32(child, "shunt-resistor-micro-ohms", &val);
32032                 if (!ret)
32033 diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
32034 index 7a5e539b567b..580e63d7daa0 100644
32035 --- a/drivers/hwmon/occ/common.c
32036 +++ b/drivers/hwmon/occ/common.c
32037 @@ -217,9 +217,9 @@ int occ_update_response(struct occ *occ)
32038                 return rc;
32040         /* limit the maximum rate of polling the OCC */
32041 -       if (time_after(jiffies, occ->last_update + OCC_UPDATE_FREQUENCY)) {
32042 +       if (time_after(jiffies, occ->next_update)) {
32043                 rc = occ_poll(occ);
32044 -               occ->last_update = jiffies;
32045 +               occ->next_update = jiffies + OCC_UPDATE_FREQUENCY;
32046         } else {
32047                 rc = occ->last_error;
32048         }
32049 @@ -1164,6 +1164,7 @@ int occ_setup(struct occ *occ, const char *name)
32050                 return rc;
32051         }
32053 +       occ->next_update = jiffies + OCC_UPDATE_FREQUENCY;
32054         occ_parse_poll_response(occ);
32056         rc = occ_setup_sensor_attrs(occ);
32057 diff --git a/drivers/hwmon/occ/common.h b/drivers/hwmon/occ/common.h
32058 index 67e6968b8978..e6df719770e8 100644
32059 --- a/drivers/hwmon/occ/common.h
32060 +++ b/drivers/hwmon/occ/common.h
32061 @@ -99,7 +99,7 @@ struct occ {
32062         u8 poll_cmd_data;               /* to perform OCC poll command */
32063         int (*send_cmd)(struct occ *occ, u8 *cmd);
32065 -       unsigned long last_update;
32066 +       unsigned long next_update;
32067         struct mutex lock;              /* lock OCC access */
32069         struct device *hwmon;
32070 diff --git a/drivers/hwmon/pmbus/pxe1610.c b/drivers/hwmon/pmbus/pxe1610.c
32071 index da27ce34ee3f..eb4a06003b7f 100644
32072 --- a/drivers/hwmon/pmbus/pxe1610.c
32073 +++ b/drivers/hwmon/pmbus/pxe1610.c
32074 @@ -41,6 +41,15 @@ static int pxe1610_identify(struct i2c_client *client,
32075                                 info->vrm_version[i] = vr13;
32076                                 break;
32077                         default:
32078 +                               /*
32079 +                                * If prior pages are available limit operation
32080 +                                * to them
32081 +                                */
32082 +                               if (i != 0) {
32083 +                                       info->pages = i;
32084 +                                       return 0;
32085 +                               }
32087                                 return -ENODEV;
32088                         }
32089                 }
32090 diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
32091 index 0f603b4094f2..a706ba11b93e 100644
32092 --- a/drivers/hwtracing/coresight/coresight-etm-perf.c
32093 +++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
32094 @@ -52,7 +52,7 @@ static ssize_t format_attr_contextid_show(struct device *dev,
32096         int pid_fmt = ETM_OPT_CTXTID;
32098 -#if defined(CONFIG_CORESIGHT_SOURCE_ETM4X)
32099 +#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X)
32100         pid_fmt = is_kernel_in_hyp_mode() ? ETM_OPT_CTXTID2 : ETM_OPT_CTXTID;
32101  #endif
32102         return sprintf(page, "config:%d\n", pid_fmt);
32103 diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
32104 index 3629b7885aca..c594f45319fc 100644
32105 --- a/drivers/hwtracing/coresight/coresight-platform.c
32106 +++ b/drivers/hwtracing/coresight/coresight-platform.c
32107 @@ -90,6 +90,12 @@ static void of_coresight_get_ports_legacy(const struct device_node *node,
32108         struct of_endpoint endpoint;
32109         int in = 0, out = 0;
32111 +       /*
32112 +        * Avoid warnings in of_graph_get_next_endpoint()
32113 +        * if the device doesn't have any graph connections
32114 +        */
32115 +       if (!of_graph_is_present(node))
32116 +               return;
32117         do {
32118                 ep = of_graph_get_next_endpoint(node, ep);
32119                 if (!ep)
32120 diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
32121 index f72803a02391..28509b02a0b5 100644
32122 --- a/drivers/hwtracing/intel_th/gth.c
32123 +++ b/drivers/hwtracing/intel_th/gth.c
32124 @@ -543,7 +543,7 @@ static void intel_th_gth_disable(struct intel_th_device *thdev,
32125         output->active = false;
32127         for_each_set_bit(master, gth->output[output->port].master,
32128 -                        TH_CONFIGURABLE_MASTERS) {
32129 +                        TH_CONFIGURABLE_MASTERS + 1) {
32130                 gth_master_set(gth, master, -1);
32131         }
32132         spin_unlock(&gth->gth_lock);
32133 @@ -697,7 +697,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
32134         othdev->output.port = -1;
32135         othdev->output.active = false;
32136         gth->output[port].output = NULL;
32137 -       for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++)
32138 +       for (master = 0; master < TH_CONFIGURABLE_MASTERS + 1; master++)
32139                 if (gth->master[master] == port)
32140                         gth->master[master] = -1;
32141         spin_unlock(&gth->gth_lock);
32142 diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
32143 index 251e75c9ba9d..817cdb29bbd8 100644
32144 --- a/drivers/hwtracing/intel_th/pci.c
32145 +++ b/drivers/hwtracing/intel_th/pci.c
32146 @@ -273,11 +273,21 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
32147                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x51a6),
32148                 .driver_data = (kernel_ulong_t)&intel_th_2x,
32149         },
32150 +       {
32151 +               /* Alder Lake-M */
32152 +               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x54a6),
32153 +               .driver_data = (kernel_ulong_t)&intel_th_2x,
32154 +       },
32155         {
32156                 /* Alder Lake CPU */
32157                 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
32158                 .driver_data = (kernel_ulong_t)&intel_th_2x,
32159         },
32160 +       {
32161 +               /* Rocket Lake CPU */
32162 +               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4c19),
32163 +               .driver_data = (kernel_ulong_t)&intel_th_2x,
32164 +       },
32165         { 0 },
32166  };
32168 diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
32169 index e4b7f2a951ad..c1bbc4caeb5c 100644
32170 --- a/drivers/i2c/busses/i2c-cadence.c
32171 +++ b/drivers/i2c/busses/i2c-cadence.c
32172 @@ -789,7 +789,7 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
32173         bool change_role = false;
32174  #endif
32176 -       ret = pm_runtime_get_sync(id->dev);
32177 +       ret = pm_runtime_resume_and_get(id->dev);
32178         if (ret < 0)
32179                 return ret;
32181 @@ -911,7 +911,7 @@ static int cdns_reg_slave(struct i2c_client *slave)
32182         if (slave->flags & I2C_CLIENT_TEN)
32183                 return -EAFNOSUPPORT;
32185 -       ret = pm_runtime_get_sync(id->dev);
32186 +       ret = pm_runtime_resume_and_get(id->dev);
32187         if (ret < 0)
32188                 return ret;
32190 @@ -1200,7 +1200,10 @@ static int cdns_i2c_probe(struct platform_device *pdev)
32191         if (IS_ERR(id->membase))
32192                 return PTR_ERR(id->membase);
32194 -       id->irq = platform_get_irq(pdev, 0);
32195 +       ret = platform_get_irq(pdev, 0);
32196 +       if (ret < 0)
32197 +               return ret;
32198 +       id->irq = ret;
32200         id->adap.owner = THIS_MODULE;
32201         id->adap.dev.of_node = pdev->dev.of_node;
32202 diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
32203 index a08554c1a570..bdff0e6345d9 100644
32204 --- a/drivers/i2c/busses/i2c-emev2.c
32205 +++ b/drivers/i2c/busses/i2c-emev2.c
32206 @@ -395,7 +395,10 @@ static int em_i2c_probe(struct platform_device *pdev)
32208         em_i2c_reset(&priv->adap);
32210 -       priv->irq = platform_get_irq(pdev, 0);
32211 +       ret = platform_get_irq(pdev, 0);
32212 +       if (ret < 0)
32213 +               goto err_clk;
32214 +       priv->irq = ret;
32215         ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0,
32216                                 "em_i2c", priv);
32217         if (ret)
32218 diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
32219 index 4acee6f9e5a3..99d446763530 100644
32220 --- a/drivers/i2c/busses/i2c-i801.c
32221 +++ b/drivers/i2c/busses/i2c-i801.c
32222 @@ -73,6 +73,7 @@
32223   * Comet Lake-V (PCH)          0xa3a3  32      hard    yes     yes     yes
32224   * Alder Lake-S (PCH)          0x7aa3  32      hard    yes     yes     yes
32225   * Alder Lake-P (PCH)          0x51a3  32      hard    yes     yes     yes
32226 + * Alder Lake-M (PCH)          0x54a3  32      hard    yes     yes     yes
32227   *
32228   * Features supported by this driver:
32229   * Software PEC                                no
32230 @@ -230,6 +231,7 @@
32231  #define PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS         0x4b23
32232  #define PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS          0x4da3
32233  #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS         0x51a3
32234 +#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS         0x54a3
32235  #define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS              0x5ad4
32236  #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS         0x7aa3
32237  #define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS            0x8c22
32238 @@ -1087,6 +1089,7 @@ static const struct pci_device_id i801_ids[] = {
32239         { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS) },
32240         { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS) },
32241         { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS) },
32242 +       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS) },
32243         { 0, }
32244  };
32246 @@ -1771,6 +1774,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
32247         case PCI_DEVICE_ID_INTEL_EBG_SMBUS:
32248         case PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS:
32249         case PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS:
32250 +       case PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS:
32251                 priv->features |= FEATURE_BLOCK_PROC;
32252                 priv->features |= FEATURE_I2C_BLOCK_READ;
32253                 priv->features |= FEATURE_IRQ;
32254 diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
32255 index 98a89301ed2a..8e987945ed45 100644
32256 --- a/drivers/i2c/busses/i2c-img-scb.c
32257 +++ b/drivers/i2c/busses/i2c-img-scb.c
32258 @@ -1057,7 +1057,7 @@ static int img_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
32259                         atomic = true;
32260         }
32262 -       ret = pm_runtime_get_sync(adap->dev.parent);
32263 +       ret = pm_runtime_resume_and_get(adap->dev.parent);
32264         if (ret < 0)
32265                 return ret;
32267 @@ -1158,7 +1158,7 @@ static int img_i2c_init(struct img_i2c *i2c)
32268         u32 rev;
32269         int ret;
32271 -       ret = pm_runtime_get_sync(i2c->adap.dev.parent);
32272 +       ret = pm_runtime_resume_and_get(i2c->adap.dev.parent);
32273         if (ret < 0)
32274                 return ret;
32276 diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
32277 index 9db6ccded5e9..8b9ba055c418 100644
32278 --- a/drivers/i2c/busses/i2c-imx-lpi2c.c
32279 +++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
32280 @@ -259,7 +259,7 @@ static int lpi2c_imx_master_enable(struct lpi2c_imx_struct *lpi2c_imx)
32281         unsigned int temp;
32282         int ret;
32284 -       ret = pm_runtime_get_sync(lpi2c_imx->adapter.dev.parent);
32285 +       ret = pm_runtime_resume_and_get(lpi2c_imx->adapter.dev.parent);
32286         if (ret < 0)
32287                 return ret;
32289 diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
32290 index b80fdc1f0092..dc5ca71906db 100644
32291 --- a/drivers/i2c/busses/i2c-imx.c
32292 +++ b/drivers/i2c/busses/i2c-imx.c
32293 @@ -801,7 +801,7 @@ static int i2c_imx_reg_slave(struct i2c_client *client)
32294         i2c_imx->last_slave_event = I2C_SLAVE_STOP;
32296         /* Resume */
32297 -       ret = pm_runtime_get_sync(i2c_imx->adapter.dev.parent);
32298 +       ret = pm_runtime_resume_and_get(i2c_imx->adapter.dev.parent);
32299         if (ret < 0) {
32300                 dev_err(&i2c_imx->adapter.dev, "failed to resume i2c controller");
32301                 return ret;
32302 @@ -1253,7 +1253,7 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
32303         struct imx_i2c_struct *i2c_imx = i2c_get_adapdata(adapter);
32304         int result;
32306 -       result = pm_runtime_get_sync(i2c_imx->adapter.dev.parent);
32307 +       result = pm_runtime_resume_and_get(i2c_imx->adapter.dev.parent);
32308         if (result < 0)
32309                 return result;
32311 @@ -1496,7 +1496,7 @@ static int i2c_imx_remove(struct platform_device *pdev)
32312         struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev);
32313         int irq, ret;
32315 -       ret = pm_runtime_get_sync(&pdev->dev);
32316 +       ret = pm_runtime_resume_and_get(&pdev->dev);
32317         if (ret < 0)
32318                 return ret;
32320 diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
32321 index 55177eb21d7b..baa7319eee53 100644
32322 --- a/drivers/i2c/busses/i2c-jz4780.c
32323 +++ b/drivers/i2c/busses/i2c-jz4780.c
32324 @@ -825,7 +825,10 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
32326         jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0);
32328 -       i2c->irq = platform_get_irq(pdev, 0);
32329 +       ret = platform_get_irq(pdev, 0);
32330 +       if (ret < 0)
32331 +               goto err;
32332 +       i2c->irq = ret;
32333         ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0,
32334                                dev_name(&pdev->dev), i2c);
32335         if (ret)
32336 diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c
32337 index 2fb0532d8a16..ab261d762dea 100644
32338 --- a/drivers/i2c/busses/i2c-mlxbf.c
32339 +++ b/drivers/i2c/busses/i2c-mlxbf.c
32340 @@ -2376,6 +2376,8 @@ static int mlxbf_i2c_probe(struct platform_device *pdev)
32341         mlxbf_i2c_init_slave(pdev, priv);
32343         irq = platform_get_irq(pdev, 0);
32344 +       if (irq < 0)
32345 +               return irq;
32346         ret = devm_request_irq(dev, irq, mlxbf_smbus_irq,
32347                                IRQF_ONESHOT | IRQF_SHARED | IRQF_PROBE_SHARED,
32348                                dev_name(dev), priv);
32349 diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
32350 index 2ffd2f354d0a..bf25acba2ed5 100644
32351 --- a/drivers/i2c/busses/i2c-mt65xx.c
32352 +++ b/drivers/i2c/busses/i2c-mt65xx.c
32353 @@ -479,7 +479,7 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
32355         u16 control_reg;
32357 -       if (i2c->dev_comp->dma_sync) {
32358 +       if (i2c->dev_comp->apdma_sync) {
32359                 writel(I2C_DMA_WARM_RST, i2c->pdmabase + OFFSET_RST);
32360                 udelay(10);
32361                 writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
32362 @@ -564,7 +564,7 @@ static const struct i2c_spec_values *mtk_i2c_get_spec(unsigned int speed)
32364  static int mtk_i2c_max_step_cnt(unsigned int target_speed)
32366 -       if (target_speed > I2C_MAX_FAST_MODE_FREQ)
32367 +       if (target_speed > I2C_MAX_FAST_MODE_PLUS_FREQ)
32368                 return MAX_HS_STEP_CNT_DIV;
32369         else
32370                 return MAX_STEP_CNT_DIV;
32371 @@ -635,7 +635,7 @@ static int mtk_i2c_check_ac_timing(struct mtk_i2c *i2c,
32372         if (sda_min > sda_max)
32373                 return -3;
32375 -       if (check_speed > I2C_MAX_FAST_MODE_FREQ) {
32376 +       if (check_speed > I2C_MAX_FAST_MODE_PLUS_FREQ) {
32377                 if (i2c->dev_comp->ltiming_adjust) {
32378                         i2c->ac_timing.hs = I2C_TIME_DEFAULT_VALUE |
32379                                 (sample_cnt << 12) | (high_cnt << 8);
32380 @@ -850,7 +850,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
32382         control_reg = mtk_i2c_readw(i2c, OFFSET_CONTROL) &
32383                         ~(I2C_CONTROL_DIR_CHANGE | I2C_CONTROL_RS);
32384 -       if ((i2c->speed_hz > I2C_MAX_FAST_MODE_FREQ) || (left_num >= 1))
32385 +       if ((i2c->speed_hz > I2C_MAX_FAST_MODE_PLUS_FREQ) || (left_num >= 1))
32386                 control_reg |= I2C_CONTROL_RS;
32388         if (i2c->op == I2C_MASTER_WRRD)
32389 @@ -1067,7 +1067,8 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
32390                 }
32391         }
32393 -       if (i2c->auto_restart && num >= 2 && i2c->speed_hz > I2C_MAX_FAST_MODE_FREQ)
32394 +       if (i2c->auto_restart && num >= 2 &&
32395 +               i2c->speed_hz > I2C_MAX_FAST_MODE_PLUS_FREQ)
32396                 /* ignore the first restart irq after the master code,
32397                  * otherwise the first transfer will be discarded.
32398                  */
32399 diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
32400 index 12ac4212aded..d4f6c6d60683 100644
32401 --- a/drivers/i2c/busses/i2c-omap.c
32402 +++ b/drivers/i2c/busses/i2c-omap.c
32403 @@ -1404,9 +1404,9 @@ omap_i2c_probe(struct platform_device *pdev)
32404         pm_runtime_set_autosuspend_delay(omap->dev, OMAP_I2C_PM_TIMEOUT);
32405         pm_runtime_use_autosuspend(omap->dev);
32407 -       r = pm_runtime_get_sync(omap->dev);
32408 +       r = pm_runtime_resume_and_get(omap->dev);
32409         if (r < 0)
32410 -               goto err_free_mem;
32411 +               goto err_disable_pm;
32413         /*
32414          * Read the Rev hi bit-[15:14] ie scheme this is 1 indicates ver2.
32415 @@ -1513,8 +1513,8 @@ omap_i2c_probe(struct platform_device *pdev)
32416         omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0);
32417         pm_runtime_dont_use_autosuspend(omap->dev);
32418         pm_runtime_put_sync(omap->dev);
32419 +err_disable_pm:
32420         pm_runtime_disable(&pdev->dev);
32421 -err_free_mem:
32423         return r;
32425 @@ -1525,7 +1525,7 @@ static int omap_i2c_remove(struct platform_device *pdev)
32426         int ret;
32428         i2c_del_adapter(&omap->adapter);
32429 -       ret = pm_runtime_get_sync(&pdev->dev);
32430 +       ret = pm_runtime_resume_and_get(&pdev->dev);
32431         if (ret < 0)
32432                 return ret;
32434 diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
32435 index 12f6d452c0f7..8722ca23f889 100644
32436 --- a/drivers/i2c/busses/i2c-rcar.c
32437 +++ b/drivers/i2c/busses/i2c-rcar.c
32438 @@ -1027,7 +1027,10 @@ static int rcar_i2c_probe(struct platform_device *pdev)
32439         if (of_property_read_bool(dev->of_node, "smbus"))
32440                 priv->flags |= ID_P_HOST_NOTIFY;
32442 -       priv->irq = platform_get_irq(pdev, 0);
32443 +       ret = platform_get_irq(pdev, 0);
32444 +       if (ret < 0)
32445 +               goto out_pm_disable;
32446 +       priv->irq = ret;
32447         ret = devm_request_irq(dev, priv->irq, irqhandler, irqflags, dev_name(dev), priv);
32448         if (ret < 0) {
32449                 dev_err(dev, "cannot get irq %d\n", priv->irq);
32450 diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
32451 index c2005c789d2b..319d1fa617c8 100644
32452 --- a/drivers/i2c/busses/i2c-sh7760.c
32453 +++ b/drivers/i2c/busses/i2c-sh7760.c
32454 @@ -471,7 +471,10 @@ static int sh7760_i2c_probe(struct platform_device *pdev)
32455                 goto out2;
32456         }
32458 -       id->irq = platform_get_irq(pdev, 0);
32459 +       ret = platform_get_irq(pdev, 0);
32460 +       if (ret < 0)
32461 +               goto out3;
32462 +       id->irq = ret;
32464         id->adap.nr = pdev->id;
32465         id->adap.algo = &sh7760_i2c_algo;
32466 diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
32467 index 2917fecf6c80..8ead7e021008 100644
32468 --- a/drivers/i2c/busses/i2c-sprd.c
32469 +++ b/drivers/i2c/busses/i2c-sprd.c
32470 @@ -290,7 +290,7 @@ static int sprd_i2c_master_xfer(struct i2c_adapter *i2c_adap,
32471         struct sprd_i2c *i2c_dev = i2c_adap->algo_data;
32472         int im, ret;
32474 -       ret = pm_runtime_get_sync(i2c_dev->dev);
32475 +       ret = pm_runtime_resume_and_get(i2c_dev->dev);
32476         if (ret < 0)
32477                 return ret;
32479 @@ -576,7 +576,7 @@ static int sprd_i2c_remove(struct platform_device *pdev)
32480         struct sprd_i2c *i2c_dev = platform_get_drvdata(pdev);
32481         int ret;
32483 -       ret = pm_runtime_get_sync(i2c_dev->dev);
32484 +       ret = pm_runtime_resume_and_get(i2c_dev->dev);
32485         if (ret < 0)
32486                 return ret;
32488 diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
32489 index c62c815b88eb..318abfa7926b 100644
32490 --- a/drivers/i2c/busses/i2c-stm32f7.c
32491 +++ b/drivers/i2c/busses/i2c-stm32f7.c
32492 @@ -1652,7 +1652,7 @@ static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
32493         i2c_dev->msg_id = 0;
32494         f7_msg->smbus = false;
32496 -       ret = pm_runtime_get_sync(i2c_dev->dev);
32497 +       ret = pm_runtime_resume_and_get(i2c_dev->dev);
32498         if (ret < 0)
32499                 return ret;
32501 @@ -1698,7 +1698,7 @@ static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
32502         f7_msg->read_write = read_write;
32503         f7_msg->smbus = true;
32505 -       ret = pm_runtime_get_sync(dev);
32506 +       ret = pm_runtime_resume_and_get(dev);
32507         if (ret < 0)
32508                 return ret;
32510 @@ -1799,7 +1799,7 @@ static int stm32f7_i2c_reg_slave(struct i2c_client *slave)
32511         if (ret)
32512                 return ret;
32514 -       ret = pm_runtime_get_sync(dev);
32515 +       ret = pm_runtime_resume_and_get(dev);
32516         if (ret < 0)
32517                 return ret;
32519 @@ -1880,7 +1880,7 @@ static int stm32f7_i2c_unreg_slave(struct i2c_client *slave)
32521         WARN_ON(!i2c_dev->slave[id]);
32523 -       ret = pm_runtime_get_sync(i2c_dev->dev);
32524 +       ret = pm_runtime_resume_and_get(i2c_dev->dev);
32525         if (ret < 0)
32526                 return ret;
32528 @@ -2273,7 +2273,7 @@ static int stm32f7_i2c_regs_backup(struct stm32f7_i2c_dev *i2c_dev)
32529         int ret;
32530         struct stm32f7_i2c_regs *backup_regs = &i2c_dev->backup_regs;
32532 -       ret = pm_runtime_get_sync(i2c_dev->dev);
32533 +       ret = pm_runtime_resume_and_get(i2c_dev->dev);
32534         if (ret < 0)
32535                 return ret;
32537 @@ -2295,7 +2295,7 @@ static int stm32f7_i2c_regs_restore(struct stm32f7_i2c_dev *i2c_dev)
32538         int ret;
32539         struct stm32f7_i2c_regs *backup_regs = &i2c_dev->backup_regs;
32541 -       ret = pm_runtime_get_sync(i2c_dev->dev);
32542 +       ret = pm_runtime_resume_and_get(i2c_dev->dev);
32543         if (ret < 0)
32544                 return ret;
32546 diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
32547 index 087b2951942e..2a8568b97c14 100644
32548 --- a/drivers/i2c/busses/i2c-xiic.c
32549 +++ b/drivers/i2c/busses/i2c-xiic.c
32550 @@ -706,7 +706,7 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
32551         dev_dbg(adap->dev.parent, "%s entry SR: 0x%x\n", __func__,
32552                 xiic_getreg8(i2c, XIIC_SR_REG_OFFSET));
32554 -       err = pm_runtime_get_sync(i2c->dev);
32555 +       err = pm_runtime_resume_and_get(i2c->dev);
32556         if (err < 0)
32557                 return err;
32559 @@ -873,7 +873,7 @@ static int xiic_i2c_remove(struct platform_device *pdev)
32560         /* remove adapter & data */
32561         i2c_del_adapter(&i2c->adap);
32563 -       ret = pm_runtime_get_sync(i2c->dev);
32564 +       ret = pm_runtime_resume_and_get(i2c->dev);
32565         if (ret < 0)
32566                 return ret;
32568 diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
32569 index 6ceb11cc4be1..6ef38a8ee95c 100644
32570 --- a/drivers/i2c/i2c-dev.c
32571 +++ b/drivers/i2c/i2c-dev.c
32572 @@ -440,8 +440,13 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
32573                                    sizeof(rdwr_arg)))
32574                         return -EFAULT;
32576 -               /* Put an arbitrary limit on the number of messages that can
32577 -                * be sent at once */
32578 +               if (!rdwr_arg.msgs || rdwr_arg.nmsgs == 0)
32579 +                       return -EINVAL;
32581 +               /*
32582 +                * Put an arbitrary limit on the number of messages that can
32583 +                * be sent at once
32584 +                */
32585                 if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
32586                         return -EINVAL;
32588 diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
32589 index f8e9b7305c13..e2e12a5585e5 100644
32590 --- a/drivers/i3c/master.c
32591 +++ b/drivers/i3c/master.c
32592 @@ -2535,7 +2535,7 @@ int i3c_master_register(struct i3c_master_controller *master,
32594         ret = i3c_master_bus_init(master);
32595         if (ret)
32596 -               goto err_destroy_wq;
32597 +               goto err_put_dev;
32599         ret = device_add(&master->dev);
32600         if (ret)
32601 @@ -2566,9 +2566,6 @@ int i3c_master_register(struct i3c_master_controller *master,
32602  err_cleanup_bus:
32603         i3c_master_bus_cleanup(master);
32605 -err_destroy_wq:
32606 -       destroy_workqueue(master->wq);
32608  err_put_dev:
32609         put_device(&master->dev);
32611 diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
32612 index 2e0c62c39155..8acf277b8b25 100644
32613 --- a/drivers/iio/accel/Kconfig
32614 +++ b/drivers/iio/accel/Kconfig
32615 @@ -211,7 +211,6 @@ config DMARD10
32616  config HID_SENSOR_ACCEL_3D
32617         depends on HID_SENSOR_HUB
32618         select IIO_BUFFER
32619 -       select IIO_TRIGGERED_BUFFER
32620         select HID_SENSOR_IIO_COMMON
32621         select HID_SENSOR_IIO_TRIGGER
32622         tristate "HID Accelerometers 3D"
32623 diff --git a/drivers/iio/accel/adis16201.c b/drivers/iio/accel/adis16201.c
32624 index 3633a4e302c6..fe225990de24 100644
32625 --- a/drivers/iio/accel/adis16201.c
32626 +++ b/drivers/iio/accel/adis16201.c
32627 @@ -215,7 +215,7 @@ static const struct iio_chan_spec adis16201_channels[] = {
32628         ADIS_AUX_ADC_CHAN(ADIS16201_AUX_ADC_REG, ADIS16201_SCAN_AUX_ADC, 0, 12),
32629         ADIS_INCLI_CHAN(X, ADIS16201_XINCL_OUT_REG, ADIS16201_SCAN_INCLI_X,
32630                         BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
32631 -       ADIS_INCLI_CHAN(X, ADIS16201_YINCL_OUT_REG, ADIS16201_SCAN_INCLI_Y,
32632 +       ADIS_INCLI_CHAN(Y, ADIS16201_YINCL_OUT_REG, ADIS16201_SCAN_INCLI_Y,
32633                         BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
32634         IIO_CHAN_SOFT_TIMESTAMP(7)
32635  };
32636 diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
32637 index e0667c4b3c08..91958da22dcf 100644
32638 --- a/drivers/iio/adc/Kconfig
32639 +++ b/drivers/iio/adc/Kconfig
32640 @@ -249,7 +249,7 @@ config AD799X
32641  config AD9467
32642         tristate "Analog Devices AD9467 High Speed ADC driver"
32643         depends on SPI
32644 -       select ADI_AXI_ADC
32645 +       depends on ADI_AXI_ADC
32646         help
32647           Say yes here to build support for Analog Devices:
32648           * AD9467 16-Bit, 200 MSPS/250 MSPS Analog-to-Digital Converter
32649 @@ -266,8 +266,6 @@ config ADI_AXI_ADC
32650         select IIO_BUFFER
32651         select IIO_BUFFER_HW_CONSUMER
32652         select IIO_BUFFER_DMAENGINE
32653 -       depends on HAS_IOMEM
32654 -       depends on OF
32655         help
32656           Say yes here to build support for Analog Devices Generic
32657           AXI ADC IP core. The IP core is used for interfacing with
32658 diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
32659 index 17402714b387..9e9ff07cf972 100644
32660 --- a/drivers/iio/adc/ad7476.c
32661 +++ b/drivers/iio/adc/ad7476.c
32662 @@ -321,25 +321,15 @@ static int ad7476_probe(struct spi_device *spi)
32663         spi_message_init(&st->msg);
32664         spi_message_add_tail(&st->xfer, &st->msg);
32666 -       ret = iio_triggered_buffer_setup(indio_dev, NULL,
32667 -                       &ad7476_trigger_handler, NULL);
32668 +       ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev, NULL,
32669 +                                             &ad7476_trigger_handler, NULL);
32670         if (ret)
32671 -               goto error_disable_reg;
32672 +               return ret;
32674         if (st->chip_info->reset)
32675                 st->chip_info->reset(st);
32677 -       ret = iio_device_register(indio_dev);
32678 -       if (ret)
32679 -               goto error_ring_unregister;
32680 -       return 0;
32682 -error_ring_unregister:
32683 -       iio_triggered_buffer_cleanup(indio_dev);
32684 -error_disable_reg:
32685 -       regulator_disable(st->reg);
32687 -       return ret;
32688 +       return devm_iio_device_register(&spi->dev, indio_dev);
32691  static const struct spi_device_id ad7476_id[] = {
32692 diff --git a/drivers/iio/common/hid-sensors/Kconfig b/drivers/iio/common/hid-sensors/Kconfig
32693 index 24d492567336..2a3dd3b907be 100644
32694 --- a/drivers/iio/common/hid-sensors/Kconfig
32695 +++ b/drivers/iio/common/hid-sensors/Kconfig
32696 @@ -19,6 +19,7 @@ config HID_SENSOR_IIO_TRIGGER
32697         tristate "Common module (trigger) for all HID Sensor IIO drivers"
32698         depends on HID_SENSOR_HUB && HID_SENSOR_IIO_COMMON && IIO_BUFFER
32699         select IIO_TRIGGER
32700 +       select IIO_TRIGGERED_BUFFER
32701         help
32702           Say yes here to build trigger support for HID sensors.
32703           Triggers will be send if all requested attributes were read.
32704 diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
32705 index 5824f2edf975..20b5ac7ab66a 100644
32706 --- a/drivers/iio/gyro/Kconfig
32707 +++ b/drivers/iio/gyro/Kconfig
32708 @@ -111,7 +111,6 @@ config FXAS21002C_SPI
32709  config HID_SENSOR_GYRO_3D
32710         depends on HID_SENSOR_HUB
32711         select IIO_BUFFER
32712 -       select IIO_TRIGGERED_BUFFER
32713         select HID_SENSOR_IIO_COMMON
32714         select HID_SENSOR_IIO_TRIGGER
32715         tristate "HID Gyroscope 3D"
32716 diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
32717 index ac90be03332a..f17a93519535 100644
32718 --- a/drivers/iio/gyro/mpu3050-core.c
32719 +++ b/drivers/iio/gyro/mpu3050-core.c
32720 @@ -272,7 +272,16 @@ static int mpu3050_read_raw(struct iio_dev *indio_dev,
32721         case IIO_CHAN_INFO_OFFSET:
32722                 switch (chan->type) {
32723                 case IIO_TEMP:
32724 -                       /* The temperature scaling is (x+23000)/280 Celsius */
32725 +                       /*
32726 +                        * The temperature scaling is (x+23000)/280 Celsius
32727 +                        * for the "best fit straight line" temperature range
32728 +                        * of -30C..85C.  The 23000 includes room temperature
32729 +                        * offset of +35C, 280 is the precision scale and x is
32730 +                        * the 16-bit signed integer reported by hardware.
32731 +                        *
32732 +                        * Temperature value itself represents temperature of
32733 +                        * the sensor die.
32734 +                        */
32735                         *val = 23000;
32736                         return IIO_VAL_INT;
32737                 default:
32738 @@ -329,7 +338,7 @@ static int mpu3050_read_raw(struct iio_dev *indio_dev,
32739                                 goto out_read_raw_unlock;
32740                         }
32742 -                       *val = be16_to_cpu(raw_val);
32743 +                       *val = (s16)be16_to_cpu(raw_val);
32744                         ret = IIO_VAL_INT;
32746                         goto out_read_raw_unlock;
32747 diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
32748 index 6549fcf6db69..2de5494e7c22 100644
32749 --- a/drivers/iio/humidity/Kconfig
32750 +++ b/drivers/iio/humidity/Kconfig
32751 @@ -52,7 +52,6 @@ config HID_SENSOR_HUMIDITY
32752         tristate "HID Environmental humidity sensor"
32753         depends on HID_SENSOR_HUB
32754         select IIO_BUFFER
32755 -       select IIO_TRIGGERED_BUFFER
32756         select HID_SENSOR_IIO_COMMON
32757         select HID_SENSOR_IIO_TRIGGER
32758         help
32759 diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
32760 index dfe86c589325..c41b8ef1e250 100644
32761 --- a/drivers/iio/imu/adis16480.c
32762 +++ b/drivers/iio/imu/adis16480.c
32763 @@ -10,6 +10,7 @@
32764  #include <linux/of_irq.h>
32765  #include <linux/interrupt.h>
32766  #include <linux/delay.h>
32767 +#include <linux/math.h>
32768  #include <linux/mutex.h>
32769  #include <linux/device.h>
32770  #include <linux/kernel.h>
32771 @@ -17,6 +18,7 @@
32772  #include <linux/slab.h>
32773  #include <linux/sysfs.h>
32774  #include <linux/module.h>
32775 +#include <linux/lcm.h>
32777  #include <linux/iio/iio.h>
32778  #include <linux/iio/sysfs.h>
32779 @@ -170,6 +172,11 @@ static const char * const adis16480_int_pin_names[4] = {
32780         [ADIS16480_PIN_DIO4] = "DIO4",
32781  };
32783 +static bool low_rate_allow;
32784 +module_param(low_rate_allow, bool, 0444);
32785 +MODULE_PARM_DESC(low_rate_allow,
32786 +                "Allow IMU rates below the minimum advisable when external clk is used in PPS mode (default: N)");
32788  #ifdef CONFIG_DEBUG_FS
32790  static ssize_t adis16480_show_firmware_revision(struct file *file,
32791 @@ -312,7 +319,8 @@ static int adis16480_debugfs_init(struct iio_dev *indio_dev)
32792  static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2)
32794         struct adis16480 *st = iio_priv(indio_dev);
32795 -       unsigned int t, reg;
32796 +       unsigned int t, sample_rate = st->clk_freq;
32797 +       int ret;
32799         if (val < 0 || val2 < 0)
32800                 return -EINVAL;
32801 @@ -321,28 +329,65 @@ static int adis16480_set_freq(struct iio_dev *indio_dev, int val, int val2)
32802         if (t == 0)
32803                 return -EINVAL;
32805 +       mutex_lock(&st->adis.state_lock);
32806         /*
32807 -        * When using PPS mode, the rate of data collection is equal to the
32808 -        * product of the external clock frequency and the scale factor in the
32809 -        * SYNC_SCALE register.
32810 -        * When using sync mode, or internal clock, the output data rate is
32811 -        * equal with  the clock frequency divided by DEC_RATE + 1.
32812 +        * When using PPS mode, the input clock needs to be scaled so that we have an IMU
32813 +        * sample rate between (optimally) 4000 and 4250. After this, we can use the
32814 +        * decimation filter to lower the sampling rate in order to get what the user wants.
32815 +        * Optimally, the user sample rate is a multiple of both the IMU sample rate and
32816 +        * the input clock. Hence, calculating the sync_scale dynamically gives us better
32817 +        * chances of achieving a perfect/integer value for DEC_RATE. The math here is:
32818 +        *      1. lcm of the input clock and the desired output rate.
32819 +        *      2. get the highest multiple of the previous result lower than the adis max rate.
32820 +        *      3. The last result becomes the IMU sample rate. Use that to calculate SYNC_SCALE
32821 +        *         and DEC_RATE (to get the user output rate)
32822          */
32823         if (st->clk_mode == ADIS16480_CLK_PPS) {
32824 -               t = t / st->clk_freq;
32825 -               reg = ADIS16495_REG_SYNC_SCALE;
32826 -       } else {
32827 -               t = st->clk_freq / t;
32828 -               reg = ADIS16480_REG_DEC_RATE;
32829 +               unsigned long scaled_rate = lcm(st->clk_freq, t);
32830 +               int sync_scale;
32832 +               /*
32833 +                * If lcm is bigger than the IMU maximum sampling rate there's no perfect
32834 +                * solution. In this case, we get the highest multiple of the input clock
32835 +                * lower than the IMU max sample rate.
32836 +                */
32837 +               if (scaled_rate > st->chip_info->int_clk)
32838 +                       scaled_rate = st->chip_info->int_clk / st->clk_freq * st->clk_freq;
32839 +               else
32840 +                       scaled_rate = st->chip_info->int_clk / scaled_rate * scaled_rate;
32842 +               /*
32843 +                * This is not an hard requirement but it's not advised to run the IMU
32844 +                * with a sample rate lower than 4000Hz due to possible undersampling
32845 +                * issues. However, there are users that might really want to take the risk.
32846 +                * Hence, we provide a module parameter for them. If set, we allow sample
32847 +                * rates lower than 4KHz. By default, we won't allow this and we just roundup
32848 +                * the rate to the next multiple of the input clock bigger than 4KHz. This
32849 +                * is done like this as in some cases (when DEC_RATE is 0) might give
32850 +                * us the closest value to the one desired by the user...
32851 +                */
32852 +               if (scaled_rate < 4000000 && !low_rate_allow)
32853 +                       scaled_rate = roundup(4000000, st->clk_freq);
32855 +               sync_scale = scaled_rate / st->clk_freq;
32856 +               ret = __adis_write_reg_16(&st->adis, ADIS16495_REG_SYNC_SCALE, sync_scale);
32857 +               if (ret)
32858 +                       goto error;
32860 +               sample_rate = scaled_rate;
32861         }
32863 +       t = DIV_ROUND_CLOSEST(sample_rate, t);
32864 +       if (t)
32865 +               t--;
32867         if (t > st->chip_info->max_dec_rate)
32868                 t = st->chip_info->max_dec_rate;
32870 -       if ((t != 0) && (st->clk_mode != ADIS16480_CLK_PPS))
32871 -               t--;
32873 -       return adis_write_reg_16(&st->adis, reg, t);
32874 +       ret = __adis_write_reg_16(&st->adis, ADIS16480_REG_DEC_RATE, t);
32875 +error:
32876 +       mutex_unlock(&st->adis.state_lock);
32877 +       return ret;
32880  static int adis16480_get_freq(struct iio_dev *indio_dev, int *val, int *val2)
32881 @@ -350,34 +395,35 @@ static int adis16480_get_freq(struct iio_dev *indio_dev, int *val, int *val2)
32882         struct adis16480 *st = iio_priv(indio_dev);
32883         uint16_t t;
32884         int ret;
32885 -       unsigned int freq;
32886 -       unsigned int reg;
32887 +       unsigned int freq, sample_rate = st->clk_freq;
32889 -       if (st->clk_mode == ADIS16480_CLK_PPS)
32890 -               reg = ADIS16495_REG_SYNC_SCALE;
32891 -       else
32892 -               reg = ADIS16480_REG_DEC_RATE;
32893 +       mutex_lock(&st->adis.state_lock);
32895 +       if (st->clk_mode == ADIS16480_CLK_PPS) {
32896 +               u16 sync_scale;
32898 +               ret = __adis_read_reg_16(&st->adis, ADIS16495_REG_SYNC_SCALE, &sync_scale);
32899 +               if (ret)
32900 +                       goto error;
32902 -       ret = adis_read_reg_16(&st->adis, reg, &t);
32903 +               sample_rate = st->clk_freq * sync_scale;
32904 +       }
32906 +       ret = __adis_read_reg_16(&st->adis, ADIS16480_REG_DEC_RATE, &t);
32907         if (ret)
32908 -               return ret;
32909 +               goto error;
32911 -       /*
32912 -        * When using PPS mode, the rate of data collection is equal to the
32913 -        * product of the external clock frequency and the scale factor in the
32914 -        * SYNC_SCALE register.
32915 -        * When using sync mode, or internal clock, the output data rate is
32916 -        * equal with  the clock frequency divided by DEC_RATE + 1.
32917 -        */
32918 -       if (st->clk_mode == ADIS16480_CLK_PPS)
32919 -               freq = st->clk_freq * t;
32920 -       else
32921 -               freq = st->clk_freq / (t + 1);
32922 +       mutex_unlock(&st->adis.state_lock);
32924 +       freq = DIV_ROUND_CLOSEST(sample_rate, (t + 1));
32926         *val = freq / 1000;
32927         *val2 = (freq % 1000) * 1000;
32929         return IIO_VAL_INT_PLUS_MICRO;
32930 +error:
32931 +       mutex_unlock(&st->adis.state_lock);
32932 +       return ret;
32935  enum {
32936 @@ -1278,6 +1324,20 @@ static int adis16480_probe(struct spi_device *spi)
32938                 st->clk_freq = clk_get_rate(st->ext_clk);
32939                 st->clk_freq *= 1000; /* micro */
32940 +               if (st->clk_mode == ADIS16480_CLK_PPS) {
32941 +                       u16 sync_scale;
32943 +                       /*
32944 +                        * In PPS mode, the IMU sample rate is the clk_freq * sync_scale. Hence,
32945 +                        * default the IMU sample rate to the highest multiple of the input clock
32946 +                        * lower than the IMU max sample rate. The internal sample rate is the
32947 +                        * max...
32948 +                        */
32949 +                       sync_scale = st->chip_info->int_clk / st->clk_freq;
32950 +                       ret = __adis_write_reg_16(&st->adis, ADIS16495_REG_SYNC_SCALE, sync_scale);
32951 +                       if (ret)
32952 +                               return ret;
32953 +               }
32954         } else {
32955                 st->clk_freq = st->chip_info->int_clk;
32956         }
32957 diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
32958 index 453c51c79655..69ab94ab7297 100644
32959 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
32960 +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
32961 @@ -731,12 +731,16 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
32962         }
32965 -static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val)
32966 +static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val,
32967 +                                       int val2)
32969         int result, i;
32971 +       if (val != 0)
32972 +               return -EINVAL;
32974         for (i = 0; i < ARRAY_SIZE(gyro_scale_6050); ++i) {
32975 -               if (gyro_scale_6050[i] == val) {
32976 +               if (gyro_scale_6050[i] == val2) {
32977                         result = inv_mpu6050_set_gyro_fsr(st, i);
32978                         if (result)
32979                                 return result;
32980 @@ -767,13 +771,17 @@ static int inv_write_raw_get_fmt(struct iio_dev *indio_dev,
32981         return -EINVAL;
32984 -static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val)
32985 +static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val,
32986 +                                        int val2)
32988         int result, i;
32989         u8 d;
32991 +       if (val != 0)
32992 +               return -EINVAL;
32994         for (i = 0; i < ARRAY_SIZE(accel_scale); ++i) {
32995 -               if (accel_scale[i] == val) {
32996 +               if (accel_scale[i] == val2) {
32997                         d = (i << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
32998                         result = regmap_write(st->map, st->reg->accl_config, d);
32999                         if (result)
33000 @@ -814,10 +822,10 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
33001         case IIO_CHAN_INFO_SCALE:
33002                 switch (chan->type) {
33003                 case IIO_ANGL_VEL:
33004 -                       result = inv_mpu6050_write_gyro_scale(st, val2);
33005 +                       result = inv_mpu6050_write_gyro_scale(st, val, val2);
33006                         break;
33007                 case IIO_ACCEL:
33008 -                       result = inv_mpu6050_write_accel_scale(st, val2);
33009 +                       result = inv_mpu6050_write_accel_scale(st, val, val2);
33010                         break;
33011                 default:
33012                         result = -EINVAL;
33013 diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
33014 index 7db761afa578..36f3a900878d 100644
33015 --- a/drivers/iio/industrialio-core.c
33016 +++ b/drivers/iio/industrialio-core.c
33017 @@ -1734,7 +1734,6 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
33018         if (!indio_dev->info)
33019                 goto out_unlock;
33021 -       ret = -EINVAL;
33022         list_for_each_entry(h, &iio_dev_opaque->ioctl_handlers, entry) {
33023                 ret = h->ioctl(indio_dev, filp, cmd, arg);
33024                 if (ret != IIO_IOCTL_UNHANDLED)
33025 @@ -1742,7 +1741,7 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
33026         }
33028         if (ret == IIO_IOCTL_UNHANDLED)
33029 -               ret = -EINVAL;
33030 +               ret = -ENODEV;
33032  out_unlock:
33033         mutex_unlock(&indio_dev->info_exist_lock);
33034 @@ -1864,9 +1863,6 @@ EXPORT_SYMBOL(__iio_device_register);
33035   **/
33036  void iio_device_unregister(struct iio_dev *indio_dev)
33038 -       struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
33039 -       struct iio_ioctl_handler *h, *t;
33041         cdev_device_del(&indio_dev->chrdev, &indio_dev->dev);
33043         mutex_lock(&indio_dev->info_exist_lock);
33044 @@ -1877,9 +1873,6 @@ void iio_device_unregister(struct iio_dev *indio_dev)
33046         indio_dev->info = NULL;
33048 -       list_for_each_entry_safe(h, t, &iio_dev_opaque->ioctl_handlers, entry)
33049 -               list_del(&h->entry);
33051         iio_device_wakeup_eventset(indio_dev);
33052         iio_buffer_wakeup_poll(indio_dev);
33054 diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
33055 index 33ad4dd0b5c7..917f9becf9c7 100644
33056 --- a/drivers/iio/light/Kconfig
33057 +++ b/drivers/iio/light/Kconfig
33058 @@ -256,7 +256,6 @@ config ISL29125
33059  config HID_SENSOR_ALS
33060         depends on HID_SENSOR_HUB
33061         select IIO_BUFFER
33062 -       select IIO_TRIGGERED_BUFFER
33063         select HID_SENSOR_IIO_COMMON
33064         select HID_SENSOR_IIO_TRIGGER
33065         tristate "HID ALS"
33066 @@ -270,7 +269,6 @@ config HID_SENSOR_ALS
33067  config HID_SENSOR_PROX
33068         depends on HID_SENSOR_HUB
33069         select IIO_BUFFER
33070 -       select IIO_TRIGGERED_BUFFER
33071         select HID_SENSOR_IIO_COMMON
33072         select HID_SENSOR_IIO_TRIGGER
33073         tristate "HID PROX"
33074 diff --git a/drivers/iio/light/gp2ap002.c b/drivers/iio/light/gp2ap002.c
33075 index 7ba7aa59437c..040d8429a6e0 100644
33076 --- a/drivers/iio/light/gp2ap002.c
33077 +++ b/drivers/iio/light/gp2ap002.c
33078 @@ -583,7 +583,7 @@ static int gp2ap002_probe(struct i2c_client *client,
33079                                         "gp2ap002", indio_dev);
33080         if (ret) {
33081                 dev_err(dev, "unable to request IRQ\n");
33082 -               goto out_disable_vio;
33083 +               goto out_put_pm;
33084         }
33085         gp2ap002->irq = client->irq;
33087 @@ -613,8 +613,9 @@ static int gp2ap002_probe(struct i2c_client *client,
33089         return 0;
33091 -out_disable_pm:
33092 +out_put_pm:
33093         pm_runtime_put_noidle(dev);
33094 +out_disable_pm:
33095         pm_runtime_disable(dev);
33096  out_disable_vio:
33097         regulator_disable(gp2ap002->vio);
33098 diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
33099 index 5bf2bfbc5379..6ce37819fb73 100644
33100 --- a/drivers/iio/light/tsl2563.c
33101 +++ b/drivers/iio/light/tsl2563.c
33102 @@ -271,11 +271,7 @@ static void tsl2563_wait_adc(struct tsl2563_chip *chip)
33103         default:
33104                 delay = 402;
33105         }
33106 -       /*
33107 -        * TODO: Make sure that we wait at least required delay but why we
33108 -        * have to extend it one tick more?
33109 -        */
33110 -       schedule_timeout_interruptible(msecs_to_jiffies(delay) + 2);
33111 +       schedule_msec_hrtimeout_interruptible(delay + 1);
33114  static int tsl2563_adjust_gainlevel(struct tsl2563_chip *chip, u16 adc)
33115 diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
33116 index 0f787bfc88fc..c9d8f07a6fcd 100644
33117 --- a/drivers/iio/light/tsl2583.c
33118 +++ b/drivers/iio/light/tsl2583.c
33119 @@ -341,6 +341,14 @@ static int tsl2583_als_calibrate(struct iio_dev *indio_dev)
33120                 return lux_val;
33121         }
33123 +       /* Avoid division by zero of lux_value later on */
33124 +       if (lux_val == 0) {
33125 +               dev_err(&chip->client->dev,
33126 +                       "%s: lux_val of 0 will produce out of range trim_value\n",
33127 +                       __func__);
33128 +               return -ENODATA;
33129 +       }
33131         gain_trim_val = (unsigned int)(((chip->als_settings.als_cal_target)
33132                         * chip->als_settings.als_gain_trim) / lux_val);
33133         if ((gain_trim_val < 250) || (gain_trim_val > 4000)) {
33134 diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
33135 index 5d4ffd66032e..74ad5701c6c2 100644
33136 --- a/drivers/iio/magnetometer/Kconfig
33137 +++ b/drivers/iio/magnetometer/Kconfig
33138 @@ -95,7 +95,6 @@ config MAG3110
33139  config HID_SENSOR_MAGNETOMETER_3D
33140         depends on HID_SENSOR_HUB
33141         select IIO_BUFFER
33142 -       select IIO_TRIGGERED_BUFFER
33143         select HID_SENSOR_IIO_COMMON
33144         select HID_SENSOR_IIO_TRIGGER
33145         tristate "HID Magenetometer 3D"
33146 diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c
33147 index d46f23d82b3d..2f2f8cb3c26c 100644
33148 --- a/drivers/iio/magnetometer/yamaha-yas530.c
33149 +++ b/drivers/iio/magnetometer/yamaha-yas530.c
33150 @@ -32,13 +32,14 @@
33151  #include <linux/regmap.h>
33152  #include <linux/regulator/consumer.h>
33153  #include <linux/random.h>
33154 -#include <linux/unaligned/be_byteshift.h>
33156  #include <linux/iio/buffer.h>
33157  #include <linux/iio/iio.h>
33158  #include <linux/iio/trigger_consumer.h>
33159  #include <linux/iio/triggered_buffer.h>
33161 +#include <asm/unaligned.h>
33163  /* This register map covers YAS530 and YAS532 but differs in YAS 537 and YAS539 */
33164  #define YAS5XX_DEVICE_ID               0x80
33165  #define YAS5XX_ACTUATE_INIT_COIL       0x81
33166 @@ -887,6 +888,7 @@ static int yas5xx_probe(struct i2c_client *i2c,
33167                 strncpy(yas5xx->name, "yas532", sizeof(yas5xx->name));
33168                 break;
33169         default:
33170 +               ret = -ENODEV;
33171                 dev_err(dev, "unhandled device ID %02x\n", yas5xx->devid);
33172                 goto assert_reset;
33173         }
33174 diff --git a/drivers/iio/orientation/Kconfig b/drivers/iio/orientation/Kconfig
33175 index a505583cc2fd..396cbbb867f4 100644
33176 --- a/drivers/iio/orientation/Kconfig
33177 +++ b/drivers/iio/orientation/Kconfig
33178 @@ -9,7 +9,6 @@ menu "Inclinometer sensors"
33179  config HID_SENSOR_INCLINOMETER_3D
33180         depends on HID_SENSOR_HUB
33181         select IIO_BUFFER
33182 -       select IIO_TRIGGERED_BUFFER
33183         select HID_SENSOR_IIO_COMMON
33184         select HID_SENSOR_IIO_TRIGGER
33185         tristate "HID Inclinometer 3D"
33186 @@ -20,7 +19,6 @@ config HID_SENSOR_INCLINOMETER_3D
33187  config HID_SENSOR_DEVICE_ROTATION
33188         depends on HID_SENSOR_HUB
33189         select IIO_BUFFER
33190 -       select IIO_TRIGGERED_BUFFER
33191         select HID_SENSOR_IIO_COMMON
33192         select HID_SENSOR_IIO_TRIGGER
33193         tristate "HID Device Rotation"
33194 diff --git a/drivers/iio/orientation/hid-sensor-rotation.c b/drivers/iio/orientation/hid-sensor-rotation.c
33195 index 18e4ef060096..c087d8f72a54 100644
33196 --- a/drivers/iio/orientation/hid-sensor-rotation.c
33197 +++ b/drivers/iio/orientation/hid-sensor-rotation.c
33198 @@ -21,7 +21,7 @@ struct dev_rot_state {
33199         struct hid_sensor_common common_attributes;
33200         struct hid_sensor_hub_attribute_info quaternion;
33201         struct {
33202 -               u32 sampled_vals[4] __aligned(16);
33203 +               s32 sampled_vals[4] __aligned(16);
33204                 u64 timestamp __aligned(8);
33205         } scan;
33206         int scale_pre_decml;
33207 @@ -170,8 +170,15 @@ static int dev_rot_capture_sample(struct hid_sensor_hub_device *hsdev,
33208         struct dev_rot_state *rot_state = iio_priv(indio_dev);
33210         if (usage_id == HID_USAGE_SENSOR_ORIENT_QUATERNION) {
33211 -               memcpy(&rot_state->scan.sampled_vals, raw_data,
33212 -                      sizeof(rot_state->scan.sampled_vals));
33213 +               if (raw_len / 4 == sizeof(s16)) {
33214 +                       rot_state->scan.sampled_vals[0] = ((s16 *)raw_data)[0];
33215 +                       rot_state->scan.sampled_vals[1] = ((s16 *)raw_data)[1];
33216 +                       rot_state->scan.sampled_vals[2] = ((s16 *)raw_data)[2];
33217 +                       rot_state->scan.sampled_vals[3] = ((s16 *)raw_data)[3];
33218 +               } else {
33219 +                       memcpy(&rot_state->scan.sampled_vals, raw_data,
33220 +                              sizeof(rot_state->scan.sampled_vals));
33221 +               }
33223                 dev_dbg(&indio_dev->dev, "Recd Quat len:%zu::%zu\n", raw_len,
33224                         sizeof(rot_state->scan.sampled_vals));
33225 diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
33226 index 689b978db4f9..fc0d3cfca418 100644
33227 --- a/drivers/iio/pressure/Kconfig
33228 +++ b/drivers/iio/pressure/Kconfig
33229 @@ -79,7 +79,6 @@ config DPS310
33230  config HID_SENSOR_PRESS
33231         depends on HID_SENSOR_HUB
33232         select IIO_BUFFER
33233 -       select IIO_TRIGGERED_BUFFER
33234         select HID_SENSOR_IIO_COMMON
33235         select HID_SENSOR_IIO_TRIGGER
33236         tristate "HID PRESS"
33237 diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
33238 index c685f10b5ae4..cc206bfa09c7 100644
33239 --- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
33240 +++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
33241 @@ -160,6 +160,7 @@ static int lidar_get_measurement(struct lidar_data *data, u16 *reg)
33242         ret = lidar_write_control(data, LIDAR_REG_CONTROL_ACQUIRE);
33243         if (ret < 0) {
33244                 dev_err(&client->dev, "cannot send start measurement command");
33245 +               pm_runtime_put_noidle(&client->dev);
33246                 return ret;
33247         }
33249 diff --git a/drivers/iio/proximity/sx9310.c b/drivers/iio/proximity/sx9310.c
33250 index 37fd0b65a014..ea82cfaf7f42 100644
33251 --- a/drivers/iio/proximity/sx9310.c
33252 +++ b/drivers/iio/proximity/sx9310.c
33253 @@ -763,7 +763,11 @@ static int sx9310_write_far_debounce(struct sx9310_data *data, int val)
33254         int ret;
33255         unsigned int regval;
33257 -       val = ilog2(val);
33258 +       if (val > 0)
33259 +               val = ilog2(val);
33260 +       if (!FIELD_FIT(SX9310_REG_PROX_CTRL10_FAR_DEBOUNCE_MASK, val))
33261 +               return -EINVAL;
33263         regval = FIELD_PREP(SX9310_REG_PROX_CTRL10_FAR_DEBOUNCE_MASK, val);
33265         mutex_lock(&data->mutex);
33266 @@ -780,7 +784,11 @@ static int sx9310_write_close_debounce(struct sx9310_data *data, int val)
33267         int ret;
33268         unsigned int regval;
33270 -       val = ilog2(val);
33271 +       if (val > 0)
33272 +               val = ilog2(val);
33273 +       if (!FIELD_FIT(SX9310_REG_PROX_CTRL10_CLOSE_DEBOUNCE_MASK, val))
33274 +               return -EINVAL;
33276         regval = FIELD_PREP(SX9310_REG_PROX_CTRL10_CLOSE_DEBOUNCE_MASK, val);
33278         mutex_lock(&data->mutex);
33279 @@ -1213,17 +1221,17 @@ static int sx9310_init_compensation(struct iio_dev *indio_dev)
33282  static const struct sx9310_reg_default *
33283 -sx9310_get_default_reg(struct sx9310_data *data, int i,
33284 +sx9310_get_default_reg(struct sx9310_data *data, int idx,
33285                        struct sx9310_reg_default *reg_def)
33287 -       int ret;
33288         const struct device_node *np = data->client->dev.of_node;
33289 -       u32 combined[SX9310_NUM_CHANNELS] = { 4, 4, 4, 4 };
33290 +       u32 combined[SX9310_NUM_CHANNELS];
33291 +       u32 start = 0, raw = 0, pos = 0;
33292         unsigned long comb_mask = 0;
33293 +       int ret, i, count;
33294         const char *res;
33295 -       u32 start = 0, raw = 0, pos = 0;
33297 -       memcpy(reg_def, &sx9310_default_regs[i], sizeof(*reg_def));
33298 +       memcpy(reg_def, &sx9310_default_regs[idx], sizeof(*reg_def));
33299         if (!np)
33300                 return reg_def;
33302 @@ -1234,15 +1242,31 @@ sx9310_get_default_reg(struct sx9310_data *data, int i,
33303                         reg_def->def |= SX9310_REG_PROX_CTRL2_SHIELDEN_GROUND;
33304                 }
33306 -               reg_def->def &= ~SX9310_REG_PROX_CTRL2_COMBMODE_MASK;
33307 -               of_property_read_u32_array(np, "semtech,combined-sensors",
33308 -                                          combined, ARRAY_SIZE(combined));
33309 -               for (i = 0; i < ARRAY_SIZE(combined); i++) {
33310 -                       if (combined[i] <= SX9310_NUM_CHANNELS)
33311 -                               comb_mask |= BIT(combined[i]);
33312 +               count = of_property_count_elems_of_size(np, "semtech,combined-sensors",
33313 +                                                       sizeof(u32));
33314 +               if (count > 0 && count <= ARRAY_SIZE(combined)) {
33315 +                       ret = of_property_read_u32_array(np, "semtech,combined-sensors",
33316 +                                                        combined, count);
33317 +                       if (ret)
33318 +                               break;
33319 +               } else {
33320 +                       /*
33321 +                        * Either the property does not exist in the DT or the
33322 +                        * number of entries is incorrect.
33323 +                        */
33324 +                       break;
33325                 }
33326 +               for (i = 0; i < count; i++) {
33327 +                       if (combined[i] >= SX9310_NUM_CHANNELS) {
33328 +                               /* Invalid sensor (invalid DT). */
33329 +                               break;
33330 +                       }
33331 +                       comb_mask |= BIT(combined[i]);
33332 +               }
33333 +               if (i < count)
33334 +                       break;
33336 -               comb_mask &= 0xf;
33337 +               reg_def->def &= ~SX9310_REG_PROX_CTRL2_COMBMODE_MASK;
33338                 if (comb_mask == (BIT(3) | BIT(2) | BIT(1) | BIT(0)))
33339                         reg_def->def |= SX9310_REG_PROX_CTRL2_COMBMODE_CS0_CS1_CS2_CS3;
33340                 else if (comb_mask == (BIT(1) | BIT(2)))
33341 diff --git a/drivers/iio/temperature/Kconfig b/drivers/iio/temperature/Kconfig
33342 index f1f2a1499c9e..4df60082c1fa 100644
33343 --- a/drivers/iio/temperature/Kconfig
33344 +++ b/drivers/iio/temperature/Kconfig
33345 @@ -45,7 +45,6 @@ config HID_SENSOR_TEMP
33346         tristate "HID Environmental temperature sensor"
33347         depends on HID_SENSOR_HUB
33348         select IIO_BUFFER
33349 -       select IIO_TRIGGERED_BUFFER
33350         select HID_SENSOR_IIO_COMMON
33351         select HID_SENSOR_IIO_TRIGGER
33352         help
33353 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
33354 index 3d194bb60840..6adbaea358ae 100644
33355 --- a/drivers/infiniband/core/cm.c
33356 +++ b/drivers/infiniband/core/cm.c
33357 @@ -2138,7 +2138,8 @@ static int cm_req_handler(struct cm_work *work)
33358                 goto destroy;
33359         }
33361 -       cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
33362 +       if (cm_id_priv->av.ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE)
33363 +               cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
33365         memset(&work->path[0], 0, sizeof(work->path[0]));
33366         if (cm_req_has_alt_path(req_msg))
33367 diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
33368 index 94096511599f..6ac07911a17b 100644
33369 --- a/drivers/infiniband/core/cma.c
33370 +++ b/drivers/infiniband/core/cma.c
33371 @@ -463,7 +463,6 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
33372         id_priv->id.route.addr.dev_addr.transport =
33373                 rdma_node_get_transport(cma_dev->device->node_type);
33374         list_add_tail(&id_priv->list, &cma_dev->id_list);
33375 -       rdma_restrack_add(&id_priv->res);
33377         trace_cm_id_attach(id_priv, cma_dev->device);
33379 @@ -700,6 +699,7 @@ static int cma_ib_acquire_dev(struct rdma_id_private *id_priv,
33380         mutex_lock(&lock);
33381         cma_attach_to_dev(id_priv, listen_id_priv->cma_dev);
33382         mutex_unlock(&lock);
33383 +       rdma_restrack_add(&id_priv->res);
33384         return 0;
33387 @@ -754,8 +754,10 @@ static int cma_iw_acquire_dev(struct rdma_id_private *id_priv,
33388         }
33390  out:
33391 -       if (!ret)
33392 +       if (!ret) {
33393                 cma_attach_to_dev(id_priv, cma_dev);
33394 +               rdma_restrack_add(&id_priv->res);
33395 +       }
33397         mutex_unlock(&lock);
33398         return ret;
33399 @@ -816,6 +818,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
33401  found:
33402         cma_attach_to_dev(id_priv, cma_dev);
33403 +       rdma_restrack_add(&id_priv->res);
33404         mutex_unlock(&lock);
33405         addr = (struct sockaddr_ib *)cma_src_addr(id_priv);
33406         memcpy(&addr->sib_addr, &sgid, sizeof(sgid));
33407 @@ -2529,6 +2532,7 @@ static int cma_listen_on_dev(struct rdma_id_private *id_priv,
33408                rdma_addr_size(cma_src_addr(id_priv)));
33410         _cma_attach_to_dev(dev_id_priv, cma_dev);
33411 +       rdma_restrack_add(&dev_id_priv->res);
33412         cma_id_get(id_priv);
33413         dev_id_priv->internal_id = 1;
33414         dev_id_priv->afonly = id_priv->afonly;
33415 @@ -3169,6 +3173,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
33416         ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
33417         id_priv->id.port_num = p;
33418         cma_attach_to_dev(id_priv, cma_dev);
33419 +       rdma_restrack_add(&id_priv->res);
33420         cma_set_loopback(cma_src_addr(id_priv));
33421  out:
33422         mutex_unlock(&lock);
33423 @@ -3201,6 +3206,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
33424                 if (status)
33425                         pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
33426                                              status);
33427 +               rdma_restrack_add(&id_priv->res);
33428         } else if (status) {
33429                 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status);
33430         }
33431 @@ -3812,6 +3818,8 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
33432         if (ret)
33433                 goto err2;
33435 +       if (!cma_any_addr(addr))
33436 +               rdma_restrack_add(&id_priv->res);
33437         return 0;
33438  err2:
33439         if (id_priv->cma_dev)
33440 diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
33441 index 995d4633b0a1..d4d4959c2434 100644
33442 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
33443 +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
33444 @@ -2784,6 +2784,7 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
33445                 dev_err(&cq->hwq.pdev->dev,
33446                         "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
33447                         cqe_cons, rq->max_wqe);
33448 +               rc = -EINVAL;
33449                 goto done;
33450         }
33452 diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
33453 index fa7878336100..3ca47004b752 100644
33454 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
33455 +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
33456 @@ -854,6 +854,7 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res     *res,
33458  unmap_io:
33459         pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
33460 +       dpit->dbr_bar_reg_iomem = NULL;
33461         return -ENOMEM;
33464 diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
33465 index 5c95c789f302..e800e8e8bed5 100644
33466 --- a/drivers/infiniband/hw/cxgb4/resource.c
33467 +++ b/drivers/infiniband/hw/cxgb4/resource.c
33468 @@ -216,7 +216,7 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
33469                         goto out;
33470                 entry->qid = qid;
33471                 list_add_tail(&entry->entry, &uctx->cqids);
33472 -               for (i = qid; i & rdev->qpmask; i++) {
33473 +               for (i = qid + 1; i & rdev->qpmask; i++) {
33474                         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
33475                         if (!entry)
33476                                 goto out;
33477 diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
33478 index 0e83d4b61e46..2cf102b5abd4 100644
33479 --- a/drivers/infiniband/hw/hfi1/firmware.c
33480 +++ b/drivers/infiniband/hw/hfi1/firmware.c
33481 @@ -1916,6 +1916,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
33482                         dd_dev_err(dd, "%s: Failed CRC check at offset %ld\n",
33483                                    __func__, (ptr -
33484                                    (u32 *)dd->platform_config.data));
33485 +                       ret = -EINVAL;
33486                         goto bail;
33487                 }
33488                 /* Jump the CRC DWORD */
33489 diff --git a/drivers/infiniband/hw/hfi1/ipoib.h b/drivers/infiniband/hw/hfi1/ipoib.h
33490 index f650cac9d424..d30c23b6527a 100644
33491 --- a/drivers/infiniband/hw/hfi1/ipoib.h
33492 +++ b/drivers/infiniband/hw/hfi1/ipoib.h
33493 @@ -52,8 +52,9 @@ union hfi1_ipoib_flow {
33494   * @producer_lock: producer sync lock
33495   * @consumer_lock: consumer sync lock
33496   */
33497 +struct ipoib_txreq;
33498  struct hfi1_ipoib_circ_buf {
33499 -       void **items;
33500 +       struct ipoib_txreq **items;
33501         unsigned long head;
33502         unsigned long tail;
33503         unsigned long max_items;
33504 diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
33505 index edd4eeac8dd1..cdc26ee3cf52 100644
33506 --- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
33507 +++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
33508 @@ -702,14 +702,14 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
33510         priv->tx_napis = kcalloc_node(dev->num_tx_queues,
33511                                       sizeof(struct napi_struct),
33512 -                                     GFP_ATOMIC,
33513 +                                     GFP_KERNEL,
33514                                       priv->dd->node);
33515         if (!priv->tx_napis)
33516                 goto free_txreq_cache;
33518         priv->txqs = kcalloc_node(dev->num_tx_queues,
33519                                   sizeof(struct hfi1_ipoib_txq),
33520 -                                 GFP_ATOMIC,
33521 +                                 GFP_KERNEL,
33522                                   priv->dd->node);
33523         if (!priv->txqs)
33524                 goto free_tx_napis;
33525 @@ -741,9 +741,9 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
33526                                              priv->dd->node);
33528                 txq->tx_ring.items =
33529 -                       vzalloc_node(array_size(tx_ring_size,
33530 -                                               sizeof(struct ipoib_txreq)),
33531 -                                    priv->dd->node);
33532 +                       kcalloc_node(tx_ring_size,
33533 +                                    sizeof(struct ipoib_txreq *),
33534 +                                    GFP_KERNEL, priv->dd->node);
33535                 if (!txq->tx_ring.items)
33536                         goto free_txqs;
33538 @@ -764,7 +764,7 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
33539                 struct hfi1_ipoib_txq *txq = &priv->txqs[i];
33541                 netif_napi_del(txq->napi);
33542 -               vfree(txq->tx_ring.items);
33543 +               kfree(txq->tx_ring.items);
33544         }
33546         kfree(priv->txqs);
33547 @@ -817,7 +817,7 @@ void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv)
33548                 hfi1_ipoib_drain_tx_list(txq);
33549                 netif_napi_del(txq->napi);
33550                 (void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items);
33551 -               vfree(txq->tx_ring.items);
33552 +               kfree(txq->tx_ring.items);
33553         }
33555         kfree(priv->txqs);
33556 diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
33557 index f3fb28e3d5d7..d213f65d4cdd 100644
33558 --- a/drivers/infiniband/hw/hfi1/mmu_rb.c
33559 +++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
33560 @@ -89,7 +89,7 @@ int hfi1_mmu_rb_register(void *ops_arg,
33561         struct mmu_rb_handler *h;
33562         int ret;
33564 -       h = kmalloc(sizeof(*h), GFP_KERNEL);
33565 +       h = kzalloc(sizeof(*h), GFP_KERNEL);
33566         if (!h)
33567                 return -ENOMEM;
33569 diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
33570 index ce26f97b2ca2..ad3cee54140e 100644
33571 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
33572 +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
33573 @@ -5068,6 +5068,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
33574         qp_attr->cur_qp_state = qp_attr->qp_state;
33575         qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
33576         qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
33577 +       qp_attr->cap.max_inline_data = hr_qp->max_inline_data;
33579         if (!ibqp->uobject) {
33580                 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
33581 diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.c b/drivers/infiniband/hw/i40iw/i40iw_pble.c
33582 index 53e5cd1a2bd6..146a4148219b 100644
33583 --- a/drivers/infiniband/hw/i40iw/i40iw_pble.c
33584 +++ b/drivers/infiniband/hw/i40iw/i40iw_pble.c
33585 @@ -393,12 +393,9 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
33586         i40iw_debug(dev, I40IW_DEBUG_PBLE, "next_fpm_addr = %llx chunk_size[%u] = 0x%x\n",
33587                     pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
33588         pble_rsrc->unallocated_pble -= (chunk->size >> 3);
33589 -       list_add(&chunk->list, &pble_rsrc->pinfo.clist);
33590         sd_reg_val = (sd_entry_type == I40IW_SD_TYPE_PAGED) ?
33591                         sd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa;
33592 -       if (sd_entry->valid)
33593 -               return 0;
33594 -       if (dev->is_pf) {
33595 +       if (dev->is_pf && !sd_entry->valid) {
33596                 ret_code = i40iw_hmc_sd_one(dev, hmc_info->hmc_fn_id,
33597                                             sd_reg_val, idx->sd_idx,
33598                                             sd_entry->entry_type, true);
33599 @@ -409,6 +406,7 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
33600         }
33602         sd_entry->valid = true;
33603 +       list_add(&chunk->list, &pble_rsrc->pinfo.clist);
33604         return 0;
33605   error:
33606         kfree(chunk);
33607 diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
33608 index 25da0b05b4e2..f0af3f1ae039 100644
33609 --- a/drivers/infiniband/hw/mlx5/fs.c
33610 +++ b/drivers/infiniband/hw/mlx5/fs.c
33611 @@ -1528,8 +1528,8 @@ static struct mlx5_ib_flow_handler *raw_fs_rule_add(
33612                 dst_num++;
33613         }
33615 -       handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher,
33616 -                                       flow_context, flow_act,
33617 +       handler = _create_raw_flow_rule(dev, ft_prio, dst_num ? dst : NULL,
33618 +                                       fs_matcher, flow_context, flow_act,
33619                                         cmd_in, inlen, dst_num);
33621         if (IS_ERR(handler)) {
33622 @@ -1885,8 +1885,9 @@ static int get_dests(struct uverbs_attr_bundle *attrs,
33623                 else
33624                         *dest_id = mqp->raw_packet_qp.rq.tirn;
33625                 *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
33626 -       } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS ||
33627 -                  fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
33628 +       } else if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS ||
33629 +                   fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) &&
33630 +                  !(*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)) {
33631                 *dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT;
33632         }
33634 diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
33635 index 0d69a697d75f..4be7bccefaa4 100644
33636 --- a/drivers/infiniband/hw/mlx5/main.c
33637 +++ b/drivers/infiniband/hw/mlx5/main.c
33638 @@ -499,7 +499,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
33639         translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
33640                                  &props->active_width, ext);
33642 -       if (!dev->is_rep && mlx5_is_roce_enabled(mdev)) {
33643 +       if (!dev->is_rep && dev->mdev->roce.roce_en) {
33644                 u16 qkey_viol_cntr;
33646                 props->port_cap_flags |= IB_PORT_CM_SUP;
33647 @@ -4174,7 +4174,7 @@ static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev)
33649                 /* Register only for native ports */
33650                 err = mlx5_add_netdev_notifier(dev, port_num);
33651 -               if (err || dev->is_rep || !mlx5_is_roce_enabled(mdev))
33652 +               if (err || dev->is_rep || !mlx5_is_roce_init_enabled(mdev))
33653                         /*
33654                          * We don't enable ETH interface for
33655                          * 1. IB representors
33656 @@ -4711,7 +4711,7 @@ static int mlx5r_probe(struct auxiliary_device *adev,
33657         dev->mdev = mdev;
33658         dev->num_ports = num_ports;
33660 -       if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_enabled(mdev))
33661 +       if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_init_enabled(mdev))
33662                 profile = &raw_eth_profile;
33663         else
33664                 profile = &pf_profile;
33665 diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
33666 index 88cc26e008fc..b085c02b53d0 100644
33667 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
33668 +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
33669 @@ -547,11 +547,6 @@ static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
33670         return container_of(wr, struct mlx5_umr_wr, wr);
33673 -struct mlx5_shared_mr_info {
33674 -       int mr_id;
33675 -       struct ib_umem          *umem;
33678  enum mlx5_ib_cq_pr_flags {
33679         MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0,
33680  };
33681 @@ -654,47 +649,69 @@ struct mlx5_ib_dm {
33682         atomic64_add(value, &((mr)->odp_stats.counter_name))
33684  struct mlx5_ib_mr {
33685 -       struct ib_mr            ibmr;
33686 -       void                    *descs;
33687 -       dma_addr_t              desc_map;
33688 -       int                     ndescs;
33689 -       int                     data_length;
33690 -       int                     meta_ndescs;
33691 -       int                     meta_length;
33692 -       int                     max_descs;
33693 -       int                     desc_size;
33694 -       int                     access_mode;
33695 -       unsigned int            page_shift;
33696 -       struct mlx5_core_mkey   mmkey;
33697 -       struct ib_umem         *umem;
33698 -       struct mlx5_shared_mr_info      *smr_info;
33699 -       struct list_head        list;
33700 -       struct mlx5_cache_ent  *cache_ent;
33701 -       u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
33702 -       struct mlx5_core_sig_ctx    *sig;
33703 -       void                    *descs_alloc;
33704 -       int                     access_flags; /* Needed for rereg MR */
33706 -       struct mlx5_ib_mr      *parent;
33707 -       /* Needed for IB_MR_TYPE_INTEGRITY */
33708 -       struct mlx5_ib_mr      *pi_mr;
33709 -       struct mlx5_ib_mr      *klm_mr;
33710 -       struct mlx5_ib_mr      *mtt_mr;
33711 -       u64                     data_iova;
33712 -       u64                     pi_iova;
33714 -       /* For ODP and implicit */
33715 -       struct xarray           implicit_children;
33716 -       union {
33717 -               struct list_head elm;
33718 -               struct work_struct work;
33719 -       } odp_destroy;
33720 -       struct ib_odp_counters  odp_stats;
33721 -       bool                    is_odp_implicit;
33722 +       struct ib_mr ibmr;
33723 +       struct mlx5_core_mkey mmkey;
33725 -       struct mlx5_async_work  cb_work;
33726 +       /* User MR data */
33727 +       struct mlx5_cache_ent *cache_ent;
33728 +       struct ib_umem *umem;
33730 +       /* This is zero'd when the MR is allocated */
33731 +       struct {
33732 +               /* Used only while the MR is in the cache */
33733 +               struct {
33734 +                       u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
33735 +                       struct mlx5_async_work cb_work;
33736 +                       /* Cache list element */
33737 +                       struct list_head list;
33738 +               };
33740 +               /* Used only by kernel MRs (umem == NULL) */
33741 +               struct {
33742 +                       void *descs;
33743 +                       void *descs_alloc;
33744 +                       dma_addr_t desc_map;
33745 +                       int max_descs;
33746 +                       int ndescs;
33747 +                       int desc_size;
33748 +                       int access_mode;
33750 +                       /* For Kernel IB_MR_TYPE_INTEGRITY */
33751 +                       struct mlx5_core_sig_ctx *sig;
33752 +                       struct mlx5_ib_mr *pi_mr;
33753 +                       struct mlx5_ib_mr *klm_mr;
33754 +                       struct mlx5_ib_mr *mtt_mr;
33755 +                       u64 data_iova;
33756 +                       u64 pi_iova;
33757 +                       int meta_ndescs;
33758 +                       int meta_length;
33759 +                       int data_length;
33760 +               };
33762 +               /* Used only by User MRs (umem != NULL) */
33763 +               struct {
33764 +                       unsigned int page_shift;
33765 +                       /* Current access_flags */
33766 +                       int access_flags;
33768 +                       /* For User ODP */
33769 +                       struct mlx5_ib_mr *parent;
33770 +                       struct xarray implicit_children;
33771 +                       union {
33772 +                               struct work_struct work;
33773 +                       } odp_destroy;
33774 +                       struct ib_odp_counters odp_stats;
33775 +                       bool is_odp_implicit;
33776 +               };
33777 +       };
33778  };
33780 +/* Zero the fields in the mr that are variant depending on usage */
33781 +static inline void mlx5_clear_mr(struct mlx5_ib_mr *mr)
33783 +       memset(mr->out, 0, sizeof(*mr) - offsetof(struct mlx5_ib_mr, out));
33786  static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
33788         return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
33789 diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
33790 index db05b0e0a8d7..ea8f068a6da3 100644
33791 --- a/drivers/infiniband/hw/mlx5/mr.c
33792 +++ b/drivers/infiniband/hw/mlx5/mr.c
33793 @@ -590,6 +590,8 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
33794                 ent->available_mrs--;
33795                 queue_adjust_cache_locked(ent);
33796                 spin_unlock_irq(&ent->lock);
33798 +               mlx5_clear_mr(mr);
33799         }
33800         mr->access_flags = access_flags;
33801         return mr;
33802 @@ -615,16 +617,14 @@ static struct mlx5_ib_mr *get_cache_mr(struct mlx5_cache_ent *req_ent)
33803                         ent->available_mrs--;
33804                         queue_adjust_cache_locked(ent);
33805                         spin_unlock_irq(&ent->lock);
33806 -                       break;
33807 +                       mlx5_clear_mr(mr);
33808 +                       return mr;
33809                 }
33810                 queue_adjust_cache_locked(ent);
33811                 spin_unlock_irq(&ent->lock);
33812         }
33814 -       if (!mr)
33815 -               req_ent->miss++;
33817 -       return mr;
33818 +       req_ent->miss++;
33819 +       return NULL;
33822  static void detach_mr_from_cache(struct mlx5_ib_mr *mr)
33823 @@ -993,8 +993,6 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
33825         mr->ibmr.pd = pd;
33826         mr->umem = umem;
33827 -       mr->access_flags = access_flags;
33828 -       mr->desc_size = sizeof(struct mlx5_mtt);
33829         mr->mmkey.iova = iova;
33830         mr->mmkey.size = umem->length;
33831         mr->mmkey.pd = to_mpd(pd)->pdn;
33832 diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
33833 index b103555b1f5d..d98755e78362 100644
33834 --- a/drivers/infiniband/hw/mlx5/odp.c
33835 +++ b/drivers/infiniband/hw/mlx5/odp.c
33836 @@ -227,7 +227,6 @@ static void free_implicit_child_mr(struct mlx5_ib_mr *mr, bool need_imr_xlt)
33838         dma_fence_odp_mr(mr);
33840 -       mr->parent = NULL;
33841         mlx5_mr_cache_free(mr_to_mdev(mr), mr);
33842         ib_umem_odp_release(odp);
33844 diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
33845 index f5a52a6fae43..843f9e7fe96f 100644
33846 --- a/drivers/infiniband/hw/mlx5/qp.c
33847 +++ b/drivers/infiniband/hw/mlx5/qp.c
33848 @@ -3146,6 +3146,19 @@ enum {
33849         MLX5_PATH_FLAG_COUNTER  = 1 << 2,
33850  };
33852 +static int mlx5_to_ib_rate_map(u8 rate)
33854 +       static const int rates[] = { IB_RATE_PORT_CURRENT, IB_RATE_56_GBPS,
33855 +                                    IB_RATE_25_GBPS,      IB_RATE_100_GBPS,
33856 +                                    IB_RATE_200_GBPS,     IB_RATE_50_GBPS,
33857 +                                    IB_RATE_400_GBPS };
33859 +       if (rate < ARRAY_SIZE(rates))
33860 +               return rates[rate];
33862 +       return rate - MLX5_STAT_RATE_OFFSET;
33865  static int ib_to_mlx5_rate_map(u8 rate)
33867         switch (rate) {
33868 @@ -4485,7 +4498,7 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
33869         rdma_ah_set_path_bits(ah_attr, MLX5_GET(ads, path, mlid));
33871         static_rate = MLX5_GET(ads, path, stat_rate);
33872 -       rdma_ah_set_static_rate(ah_attr, static_rate ? static_rate - 5 : 0);
33873 +       rdma_ah_set_static_rate(ah_attr, mlx5_to_ib_rate_map(static_rate));
33874         if (MLX5_GET(ads, path, grh) ||
33875             ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
33876                 rdma_ah_set_grh(ah_attr, NULL, MLX5_GET(ads, path, flow_label),
33877 diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
33878 index c4bc58736e48..1715fbe0719d 100644
33879 --- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
33880 +++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
33881 @@ -636,8 +636,10 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
33882         memcpy(in_params.local_mac_addr, dev->ndev->dev_addr, ETH_ALEN);
33884         if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
33885 -                            &qp->iwarp_cm_flags))
33886 +                            &qp->iwarp_cm_flags)) {
33887 +               rc = -ENODEV;
33888                 goto err; /* QP already being destroyed */
33889 +       }
33891         rc = dev->ops->iwarp_connect(dev->rdma_ctx, &in_params, &out_params);
33892         if (rc) {
33893 diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c
33894 index df0d173d6acb..da2e867a1ed9 100644
33895 --- a/drivers/infiniband/sw/rxe/rxe_av.c
33896 +++ b/drivers/infiniband/sw/rxe/rxe_av.c
33897 @@ -88,7 +88,7 @@ void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr)
33898                 type = RXE_NETWORK_TYPE_IPV4;
33899                 break;
33900         case RDMA_NETWORK_IPV6:
33901 -               type = RXE_NETWORK_TYPE_IPV4;
33902 +               type = RXE_NETWORK_TYPE_IPV6;
33903                 break;
33904         default:
33905                 /* not reached - checked in rxe_av_chk_attr */
33906 diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
33907 index 34a910cf0edb..61c17db70d65 100644
33908 --- a/drivers/infiniband/sw/siw/siw_mem.c
33909 +++ b/drivers/infiniband/sw/siw/siw_mem.c
33910 @@ -106,8 +106,6 @@ int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj,
33911         mem->perms = rights & IWARP_ACCESS_MASK;
33912         kref_init(&mem->ref);
33914 -       mr->mem = mem;
33916         get_random_bytes(&next, 4);
33917         next &= 0x00ffffff;
33919 @@ -116,6 +114,8 @@ int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj,
33920                 kfree(mem);
33921                 return -ENOMEM;
33922         }
33924 +       mr->mem = mem;
33925         /* Set the STag index part */
33926         mem->stag = id << 8;
33927         mr->base_mr.lkey = mr->base_mr.rkey = mem->stag;
33928 diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
33929 index 7305ed8976c2..18266f07c58d 100644
33930 --- a/drivers/infiniband/ulp/isert/ib_isert.c
33931 +++ b/drivers/infiniband/ulp/isert/ib_isert.c
33932 @@ -438,23 +438,23 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
33933         isert_init_conn(isert_conn);
33934         isert_conn->cm_id = cma_id;
33936 -       ret = isert_alloc_login_buf(isert_conn, cma_id->device);
33937 -       if (ret)
33938 -               goto out;
33940         device = isert_device_get(cma_id);
33941         if (IS_ERR(device)) {
33942                 ret = PTR_ERR(device);
33943 -               goto out_rsp_dma_map;
33944 +               goto out;
33945         }
33946         isert_conn->device = device;
33948 +       ret = isert_alloc_login_buf(isert_conn, cma_id->device);
33949 +       if (ret)
33950 +               goto out_conn_dev;
33952         isert_set_nego_params(isert_conn, &event->param.conn);
33954         isert_conn->qp = isert_create_qp(isert_conn, cma_id);
33955         if (IS_ERR(isert_conn->qp)) {
33956                 ret = PTR_ERR(isert_conn->qp);
33957 -               goto out_conn_dev;
33958 +               goto out_rsp_dma_map;
33959         }
33961         ret = isert_login_post_recv(isert_conn);
33962 @@ -473,10 +473,10 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
33964  out_destroy_qp:
33965         isert_destroy_qp(isert_conn);
33966 -out_conn_dev:
33967 -       isert_device_put(device);
33968  out_rsp_dma_map:
33969         isert_free_login_buf(isert_conn);
33970 +out_conn_dev:
33971 +       isert_device_put(device);
33972  out:
33973         kfree(isert_conn);
33974         rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
33975 diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
33976 index 6734329cca33..959ba0462ef0 100644
33977 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
33978 +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
33979 @@ -2784,8 +2784,8 @@ int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
33980         } while (!changed && old_state != RTRS_CLT_DEAD);
33982         if (likely(changed)) {
33983 -               rtrs_clt_destroy_sess_files(sess, sysfs_self);
33984                 rtrs_clt_remove_path_from_arr(sess);
33985 +               rtrs_clt_destroy_sess_files(sess, sysfs_self);
33986                 kobject_put(&sess->kobj);
33987         }
33989 diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
33990 index 6be60aa5ffe2..7f0420ad9057 100644
33991 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c
33992 +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
33993 @@ -2378,6 +2378,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
33994                 pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n",
33995                         dev_name(&sdev->device->dev), port_num);
33996                 mutex_unlock(&sport->mutex);
33997 +               ret = -EINVAL;
33998                 goto reject;
33999         }
34001 diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
34002 index 5f7706febcb0..17540bdb1eaf 100644
34003 --- a/drivers/input/touchscreen/elants_i2c.c
34004 +++ b/drivers/input/touchscreen/elants_i2c.c
34005 @@ -38,6 +38,7 @@
34006  #include <linux/of.h>
34007  #include <linux/gpio/consumer.h>
34008  #include <linux/regulator/consumer.h>
34009 +#include <linux/uuid.h>
34010  #include <asm/unaligned.h>
34012  /* Device, Driver information */
34013 @@ -1334,6 +1335,40 @@ static void elants_i2c_power_off(void *_data)
34014         }
34017 +#ifdef CONFIG_ACPI
34018 +static const struct acpi_device_id i2c_hid_ids[] = {
34019 +       {"ACPI0C50", 0 },
34020 +       {"PNP0C50", 0 },
34021 +       { },
34024 +static const guid_t i2c_hid_guid =
34025 +       GUID_INIT(0x3CDFF6F7, 0x4267, 0x4555,
34026 +                 0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE);
34028 +static bool elants_acpi_is_hid_device(struct device *dev)
34030 +       acpi_handle handle = ACPI_HANDLE(dev);
34031 +       union acpi_object *obj;
34033 +       if (acpi_match_device_ids(ACPI_COMPANION(dev), i2c_hid_ids))
34034 +               return false;
34036 +       obj = acpi_evaluate_dsm_typed(handle, &i2c_hid_guid, 1, 1, NULL, ACPI_TYPE_INTEGER);
34037 +       if (obj) {
34038 +               ACPI_FREE(obj);
34039 +               return true;
34040 +       }
34042 +       return false;
34044 +#else
34045 +static bool elants_acpi_is_hid_device(struct device *dev)
34047 +       return false;
34049 +#endif
34051  static int elants_i2c_probe(struct i2c_client *client,
34052                             const struct i2c_device_id *id)
34054 @@ -1342,9 +1377,14 @@ static int elants_i2c_probe(struct i2c_client *client,
34055         unsigned long irqflags;
34056         int error;
34058 +       /* Don't bind to i2c-hid compatible devices, these are handled by the i2c-hid drv. */
34059 +       if (elants_acpi_is_hid_device(&client->dev)) {
34060 +               dev_warn(&client->dev, "This device appears to be an I2C-HID device, not binding\n");
34061 +               return -ENODEV;
34062 +       }
34064         if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
34065 -               dev_err(&client->dev,
34066 -                       "%s: i2c check functionality error\n", DEVICE_NAME);
34067 +               dev_err(&client->dev, "I2C check functionality error\n");
34068                 return -ENXIO;
34069         }
34071 diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
34072 index d8fccf048bf4..30576a5f2f04 100644
34073 --- a/drivers/input/touchscreen/ili210x.c
34074 +++ b/drivers/input/touchscreen/ili210x.c
34075 @@ -87,7 +87,7 @@ static bool ili210x_touchdata_to_coords(const u8 *touchdata,
34076                                         unsigned int *x, unsigned int *y,
34077                                         unsigned int *z)
34079 -       if (touchdata[0] & BIT(finger))
34080 +       if (!(touchdata[0] & BIT(finger)))
34081                 return false;
34083         *x = get_unaligned_be16(touchdata + 1 + (finger * 4) + 0);
34084 diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
34085 index 8fa2f3b7cfd8..e8b6c3137420 100644
34086 --- a/drivers/input/touchscreen/silead.c
34087 +++ b/drivers/input/touchscreen/silead.c
34088 @@ -20,6 +20,7 @@
34089  #include <linux/input/mt.h>
34090  #include <linux/input/touchscreen.h>
34091  #include <linux/pm.h>
34092 +#include <linux/pm_runtime.h>
34093  #include <linux/irq.h>
34094  #include <linux/regulator/consumer.h>
34096 @@ -335,10 +336,8 @@ static int silead_ts_get_id(struct i2c_client *client)
34098         error = i2c_smbus_read_i2c_block_data(client, SILEAD_REG_ID,
34099                                               sizeof(chip_id), (u8 *)&chip_id);
34100 -       if (error < 0) {
34101 -               dev_err(&client->dev, "Chip ID read error %d\n", error);
34102 +       if (error < 0)
34103                 return error;
34104 -       }
34106         data->chip_id = le32_to_cpu(chip_id);
34107         dev_info(&client->dev, "Silead chip ID: 0x%8X", data->chip_id);
34108 @@ -351,12 +350,49 @@ static int silead_ts_setup(struct i2c_client *client)
34109         int error;
34110         u32 status;
34112 +       /*
34113 +        * Some buggy BIOS-es bring up the chip in a stuck state where it
34114 +        * blocks the I2C bus. The following steps are necessary to
34115 +        * unstuck the chip / bus:
34116 +        * 1. Turn off the Silead chip.
34117 +        * 2. Try to do an I2C transfer with the chip, this will fail in
34118 +        *    response to which the I2C-bus-driver will call:
34119 +        *    i2c_recover_bus() which will unstuck the I2C-bus. Note the
34120 +        *    unstuck-ing of the I2C bus only works if we first drop the
34121 +        *    chip off the bus by turning it off.
34122 +        * 3. Turn the chip back on.
34123 +        *
34124 +        * On the x86/ACPI systems were this problem is seen, step 1. and
34125 +        * 3. require making ACPI calls and dealing with ACPI Power
34126 +        * Resources. The workaround below runtime-suspends the chip to
34127 +        * turn it off, leaving it up to the ACPI subsystem to deal with
34128 +        * this.
34129 +        */
34131 +       if (device_property_read_bool(&client->dev,
34132 +                                     "silead,stuck-controller-bug")) {
34133 +               pm_runtime_set_active(&client->dev);
34134 +               pm_runtime_enable(&client->dev);
34135 +               pm_runtime_allow(&client->dev);
34137 +               pm_runtime_suspend(&client->dev);
34139 +               dev_warn(&client->dev, FW_BUG "Stuck I2C bus: please ignore the next 'controller timed out' error\n");
34140 +               silead_ts_get_id(client);
34142 +               /* The forbid will also resume the device */
34143 +               pm_runtime_forbid(&client->dev);
34144 +               pm_runtime_disable(&client->dev);
34145 +       }
34147         silead_ts_set_power(client, SILEAD_POWER_OFF);
34148         silead_ts_set_power(client, SILEAD_POWER_ON);
34150         error = silead_ts_get_id(client);
34151 -       if (error)
34152 +       if (error) {
34153 +               dev_err(&client->dev, "Chip ID read error %d\n", error);
34154                 return error;
34155 +       }
34157         error = silead_ts_init(client);
34158         if (error)
34159 diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
34160 index 321f5906e6ed..df7b19ff0a9e 100644
34161 --- a/drivers/iommu/amd/init.c
34162 +++ b/drivers/iommu/amd/init.c
34163 @@ -12,7 +12,6 @@
34164  #include <linux/acpi.h>
34165  #include <linux/list.h>
34166  #include <linux/bitmap.h>
34167 -#include <linux/delay.h>
34168  #include <linux/slab.h>
34169  #include <linux/syscore_ops.h>
34170  #include <linux/interrupt.h>
34171 @@ -257,8 +256,6 @@ static enum iommu_init_state init_state = IOMMU_START_STATE;
34172  static int amd_iommu_enable_interrupts(void);
34173  static int __init iommu_go_to_state(enum iommu_init_state state);
34174  static void init_device_table_dma(void);
34175 -static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
34176 -                               u8 fxn, u64 *value, bool is_write);
34178  static bool amd_iommu_pre_enabled = true;
34180 @@ -1717,53 +1714,16 @@ static int __init init_iommu_all(struct acpi_table_header *table)
34181         return 0;
34184 -static void __init init_iommu_perf_ctr(struct amd_iommu *iommu)
34185 +static void init_iommu_perf_ctr(struct amd_iommu *iommu)
34187 -       int retry;
34188 +       u64 val;
34189         struct pci_dev *pdev = iommu->dev;
34190 -       u64 val = 0xabcd, val2 = 0, save_reg, save_src;
34192         if (!iommu_feature(iommu, FEATURE_PC))
34193                 return;
34195         amd_iommu_pc_present = true;
34197 -       /* save the value to restore, if writable */
34198 -       if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false) ||
34199 -           iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, false))
34200 -               goto pc_false;
34202 -       /*
34203 -        * Disable power gating by programing the performance counter
34204 -        * source to 20 (i.e. counts the reads and writes from/to IOMMU
34205 -        * Reserved Register [MMIO Offset 1FF8h] that are ignored.),
34206 -        * which never get incremented during this init phase.
34207 -        * (Note: The event is also deprecated.)
34208 -        */
34209 -       val = 20;
34210 -       if (iommu_pc_get_set_reg(iommu, 0, 0, 8, &val, true))
34211 -               goto pc_false;
34213 -       /* Check if the performance counters can be written to */
34214 -       val = 0xabcd;
34215 -       for (retry = 5; retry; retry--) {
34216 -               if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true) ||
34217 -                   iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false) ||
34218 -                   val2)
34219 -                       break;
34221 -               /* Wait about 20 msec for power gating to disable and retry. */
34222 -               msleep(20);
34223 -       }
34225 -       /* restore */
34226 -       if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true) ||
34227 -           iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, true))
34228 -               goto pc_false;
34230 -       if (val != val2)
34231 -               goto pc_false;
34233         pci_info(pdev, "IOMMU performance counters supported\n");
34235         val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
34236 @@ -1771,11 +1731,6 @@ static void __init init_iommu_perf_ctr(struct amd_iommu *iommu)
34237         iommu->max_counters = (u8) ((val >> 7) & 0xf);
34239         return;
34241 -pc_false:
34242 -       pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
34243 -       amd_iommu_pc_present = false;
34244 -       return;
34247  static ssize_t amd_iommu_show_cap(struct device *dev,
34248 @@ -1837,7 +1792,7 @@ static void __init late_iommu_features_init(struct amd_iommu *iommu)
34249          * IVHD and MMIO conflict.
34250          */
34251         if (features != iommu->features)
34252 -               pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx\n).",
34253 +               pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx).\n",
34254                         features, iommu->features);
34257 diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
34258 index 8594b4a83043..941ba5484731 100644
34259 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
34260 +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
34261 @@ -2305,6 +2305,9 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
34263         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
34265 +       if (!gather->pgsize)
34266 +               return;
34268         arm_smmu_tlb_inv_range_domain(gather->start,
34269                                       gather->end - gather->start + 1,
34270                                       gather->pgsize, true, smmu_domain);
34271 diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
34272 index f985817c967a..230b6f6b3901 100644
34273 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
34274 +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
34275 @@ -115,7 +115,7 @@
34276  #define GERROR_PRIQ_ABT_ERR            (1 << 3)
34277  #define GERROR_EVTQ_ABT_ERR            (1 << 2)
34278  #define GERROR_CMDQ_ERR                        (1 << 0)
34279 -#define GERROR_ERR_MASK                        0xfd
34280 +#define GERROR_ERR_MASK                        0x1fd
34282  #define ARM_SMMU_GERRORN               0x64
34284 diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
34285 index af765c813cc8..fdd095e1fa52 100644
34286 --- a/drivers/iommu/dma-iommu.c
34287 +++ b/drivers/iommu/dma-iommu.c
34288 @@ -52,6 +52,17 @@ struct iommu_dma_cookie {
34289  };
34291  static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
34292 +bool iommu_dma_forcedac __read_mostly;
34294 +static int __init iommu_dma_forcedac_setup(char *str)
34296 +       int ret = kstrtobool(str, &iommu_dma_forcedac);
34298 +       if (!ret && iommu_dma_forcedac)
34299 +               pr_info("Forcing DAC for PCI devices\n");
34300 +       return ret;
34302 +early_param("iommu.forcedac", iommu_dma_forcedac_setup);
34304  void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
34305                 struct iommu_domain *domain)
34306 @@ -444,7 +455,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
34307                 dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
34309         /* Try to get PCI devices a SAC address */
34310 -       if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
34311 +       if (dma_limit > DMA_BIT_MASK(32) && !iommu_dma_forcedac && dev_is_pci(dev))
34312                 iova = alloc_iova_fast(iovad, iova_len,
34313                                        DMA_BIT_MASK(32) >> shift, false);
34315 diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
34316 index ee0932307d64..7e551da6c1fb 100644
34317 --- a/drivers/iommu/intel/iommu.c
34318 +++ b/drivers/iommu/intel/iommu.c
34319 @@ -360,7 +360,6 @@ int intel_iommu_enabled = 0;
34320  EXPORT_SYMBOL_GPL(intel_iommu_enabled);
34322  static int dmar_map_gfx = 1;
34323 -static int dmar_forcedac;
34324  static int intel_iommu_strict;
34325  static int intel_iommu_superpage = 1;
34326  static int iommu_identity_mapping;
34327 @@ -451,8 +450,8 @@ static int __init intel_iommu_setup(char *str)
34328                         dmar_map_gfx = 0;
34329                         pr_info("Disable GFX device mapping\n");
34330                 } else if (!strncmp(str, "forcedac", 8)) {
34331 -                       pr_info("Forcing DAC for PCI devices\n");
34332 -                       dmar_forcedac = 1;
34333 +                       pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n");
34334 +                       iommu_dma_forcedac = true;
34335                 } else if (!strncmp(str, "strict", 6)) {
34336                         pr_info("Disable batched IOTLB flush\n");
34337                         intel_iommu_strict = 1;
34338 @@ -658,7 +657,14 @@ static int domain_update_iommu_snooping(struct intel_iommu *skip)
34339         rcu_read_lock();
34340         for_each_active_iommu(iommu, drhd) {
34341                 if (iommu != skip) {
34342 -                       if (!ecap_sc_support(iommu->ecap)) {
34343 +                       /*
34344 +                        * If the hardware is operating in the scalable mode,
34345 +                        * the snooping control is always supported since we
34346 +                        * always set PASID-table-entry.PGSNP bit if the domain
34347 +                        * is managed outside (UNMANAGED).
34348 +                        */
34349 +                       if (!sm_supported(iommu) &&
34350 +                           !ecap_sc_support(iommu->ecap)) {
34351                                 ret = 0;
34352                                 break;
34353                         }
34354 @@ -1340,6 +1346,11 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
34355                       readl, (sts & DMA_GSTS_RTPS), sts);
34357         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
34359 +       iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
34360 +       if (sm_supported(iommu))
34361 +               qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0);
34362 +       iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
34365  void iommu_flush_write_buffer(struct intel_iommu *iommu)
34366 @@ -2289,6 +2300,41 @@ static inline int hardware_largepage_caps(struct dmar_domain *domain,
34367         return level;
34371 + * Ensure that old small page tables are removed to make room for superpage(s).
34372 + * We're going to add new large pages, so make sure we don't remove their parent
34373 + * tables. The IOTLB/devTLBs should be flushed if any PDE/PTEs are cleared.
34374 + */
34375 +static void switch_to_super_page(struct dmar_domain *domain,
34376 +                                unsigned long start_pfn,
34377 +                                unsigned long end_pfn, int level)
34379 +       unsigned long lvl_pages = lvl_to_nr_pages(level);
34380 +       struct dma_pte *pte = NULL;
34381 +       int i;
34383 +       while (start_pfn <= end_pfn) {
34384 +               if (!pte)
34385 +                       pte = pfn_to_dma_pte(domain, start_pfn, &level);
34387 +               if (dma_pte_present(pte)) {
34388 +                       dma_pte_free_pagetable(domain, start_pfn,
34389 +                                              start_pfn + lvl_pages - 1,
34390 +                                              level + 1);
34392 +                       for_each_domain_iommu(i, domain)
34393 +                               iommu_flush_iotlb_psi(g_iommus[i], domain,
34394 +                                                     start_pfn, lvl_pages,
34395 +                                                     0, 0);
34396 +               }
34398 +               pte++;
34399 +               start_pfn += lvl_pages;
34400 +               if (first_pte_in_page(pte))
34401 +                       pte = NULL;
34402 +       }
34405  static int
34406  __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
34407                  unsigned long phys_pfn, unsigned long nr_pages, int prot)
34408 @@ -2305,8 +2351,9 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
34409                 return -EINVAL;
34411         attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
34412 +       attr |= DMA_FL_PTE_PRESENT;
34413         if (domain_use_first_level(domain)) {
34414 -               attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD | DMA_FL_PTE_US;
34415 +               attr |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
34417                 if (domain->domain.type == IOMMU_DOMAIN_DMA) {
34418                         attr |= DMA_FL_PTE_ACCESS;
34419 @@ -2329,22 +2376,11 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
34420                                 return -ENOMEM;
34421                         /* It is large page*/
34422                         if (largepage_lvl > 1) {
34423 -                               unsigned long nr_superpages, end_pfn;
34424 +                               unsigned long end_pfn;
34426                                 pteval |= DMA_PTE_LARGE_PAGE;
34427 -                               lvl_pages = lvl_to_nr_pages(largepage_lvl);
34429 -                               nr_superpages = nr_pages / lvl_pages;
34430 -                               end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
34432 -                               /*
34433 -                                * Ensure that old small page tables are
34434 -                                * removed to make room for superpage(s).
34435 -                                * We're adding new large pages, so make sure
34436 -                                * we don't remove their parent tables.
34437 -                                */
34438 -                               dma_pte_free_pagetable(domain, iov_pfn, end_pfn,
34439 -                                                      largepage_lvl + 1);
34440 +                               end_pfn = ((iov_pfn + nr_pages) & level_mask(largepage_lvl)) - 1;
34441 +                               switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
34442                         } else {
34443                                 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
34444                         }
34445 @@ -2422,6 +2458,10 @@ static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn
34446                                    (((u16)bus) << 8) | devfn,
34447                                    DMA_CCMD_MASK_NOBIT,
34448                                    DMA_CCMD_DEVICE_INVL);
34450 +       if (sm_supported(iommu))
34451 +               qi_flush_pasid_cache(iommu, did_old, QI_PC_ALL_PASIDS, 0);
34453         iommu->flush.flush_iotlb(iommu,
34454                                  did_old,
34455                                  0,
34456 @@ -2505,6 +2545,9 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
34458         flags |= (level == 5) ? PASID_FLAG_FL5LP : 0;
34460 +       if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
34461 +               flags |= PASID_FLAG_PAGE_SNOOP;
34463         return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
34464                                              domain->iommu_did[iommu->seq_id],
34465                                              flags);
34466 @@ -3267,8 +3310,6 @@ static int __init init_dmars(void)
34467                 register_pasid_allocator(iommu);
34468  #endif
34469                 iommu_set_root_entry(iommu);
34470 -               iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
34471 -               iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
34472         }
34474  #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
34475 @@ -3458,12 +3499,7 @@ static int init_iommu_hw(void)
34476                 }
34478                 iommu_flush_write_buffer(iommu);
34480                 iommu_set_root_entry(iommu);
34482 -               iommu->flush.flush_context(iommu, 0, 0, 0,
34483 -                                          DMA_CCMD_GLOBAL_INVL);
34484 -               iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
34485                 iommu_enable_translation(iommu);
34486                 iommu_disable_protect_mem_regions(iommu);
34487         }
34488 @@ -3846,8 +3882,6 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
34489                 goto disable_iommu;
34491         iommu_set_root_entry(iommu);
34492 -       iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
34493 -       iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
34494         iommu_enable_translation(iommu);
34496         iommu_disable_protect_mem_regions(iommu);
34497 diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
34498 index 611ef5243cb6..5c16ebe037a1 100644
34499 --- a/drivers/iommu/intel/irq_remapping.c
34500 +++ b/drivers/iommu/intel/irq_remapping.c
34501 @@ -736,7 +736,7 @@ static int __init intel_prepare_irq_remapping(void)
34502                 return -ENODEV;
34504         if (intel_cap_audit(CAP_AUDIT_STATIC_IRQR, NULL))
34505 -               goto error;
34506 +               return -ENODEV;
34508         if (!dmar_ir_support())
34509                 return -ENODEV;
34510 diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
34511 index f26cb6195b2c..5093d317ff1a 100644
34512 --- a/drivers/iommu/intel/pasid.c
34513 +++ b/drivers/iommu/intel/pasid.c
34514 @@ -411,6 +411,16 @@ static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
34515         pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
34519 + * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
34520 + * PASID entry.
34521 + */
34522 +static inline void
34523 +pasid_set_pgsnp(struct pasid_entry *pe)
34525 +       pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24);
34528  /*
34529   * Setup the First Level Page table Pointer field (Bit 140~191)
34530   * of a scalable mode PASID entry.
34531 @@ -565,6 +575,9 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
34532                 }
34533         }
34535 +       if (flags & PASID_FLAG_PAGE_SNOOP)
34536 +               pasid_set_pgsnp(pte);
34538         pasid_set_domain_id(pte, did);
34539         pasid_set_address_width(pte, iommu->agaw);
34540         pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
34541 @@ -643,6 +656,9 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
34542         pasid_set_fault_enable(pte);
34543         pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
34545 +       if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
34546 +               pasid_set_pgsnp(pte);
34548         /*
34549          * Since it is a second level only translation setup, we should
34550          * set SRE bit as well (addresses are expected to be GPAs).
34551 diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
34552 index 444c0bec221a..086ebd697319 100644
34553 --- a/drivers/iommu/intel/pasid.h
34554 +++ b/drivers/iommu/intel/pasid.h
34555 @@ -48,6 +48,7 @@
34556   */
34557  #define PASID_FLAG_SUPERVISOR_MODE     BIT(0)
34558  #define PASID_FLAG_NESTED              BIT(1)
34559 +#define PASID_FLAG_PAGE_SNOOP          BIT(2)
34561  /*
34562   * The PASID_FLAG_FL5LP flag Indicates using 5-level paging for first-
34563 diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
34564 index 574a7e657a9a..ecb6314fdd5c 100644
34565 --- a/drivers/iommu/intel/svm.c
34566 +++ b/drivers/iommu/intel/svm.c
34567 @@ -862,7 +862,7 @@ intel_svm_prq_report(struct device *dev, struct page_req_dsc *desc)
34568         /* Fill in event data for device specific processing */
34569         memset(&event, 0, sizeof(struct iommu_fault_event));
34570         event.fault.type = IOMMU_FAULT_PAGE_REQ;
34571 -       event.fault.prm.addr = desc->addr;
34572 +       event.fault.prm.addr = (u64)desc->addr << VTD_PAGE_SHIFT;
34573         event.fault.prm.pasid = desc->pasid;
34574         event.fault.prm.grpid = desc->prg_index;
34575         event.fault.prm.perm = prq_to_iommu_prot(desc);
34576 @@ -920,7 +920,17 @@ static irqreturn_t prq_event_thread(int irq, void *d)
34577                                ((unsigned long long *)req)[1]);
34578                         goto no_pasid;
34579                 }
34581 +               /* We shall not receive page request for supervisor SVM */
34582 +               if (req->pm_req && (req->rd_req | req->wr_req)) {
34583 +                       pr_err("Unexpected page request in Privilege Mode");
34584 +                       /* No need to find the matching sdev as for bad_req */
34585 +                       goto no_pasid;
34586 +               }
34587 +               /* DMA read with exec requeset is not supported. */
34588 +               if (req->exe_req && req->rd_req) {
34589 +                       pr_err("Execution request not supported\n");
34590 +                       goto no_pasid;
34591 +               }
34592                 if (!svm || svm->pasid != req->pasid) {
34593                         rcu_read_lock();
34594                         svm = ioasid_find(NULL, req->pasid, NULL);
34595 @@ -1021,12 +1031,12 @@ static irqreturn_t prq_event_thread(int irq, void *d)
34596                                 QI_PGRP_RESP_TYPE;
34597                         resp.qw1 = QI_PGRP_IDX(req->prg_index) |
34598                                 QI_PGRP_LPIG(req->lpig);
34599 +                       resp.qw2 = 0;
34600 +                       resp.qw3 = 0;
34602                         if (req->priv_data_present)
34603                                 memcpy(&resp.qw2, req->priv_data,
34604                                        sizeof(req->priv_data));
34605 -                       resp.qw2 = 0;
34606 -                       resp.qw3 = 0;
34607                         qi_submit_sync(iommu, &resp, 1, 0);
34608                 }
34609  prq_advance:
34610 diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
34611 index d0b0a15dba84..e10cfa99057c 100644
34612 --- a/drivers/iommu/iommu.c
34613 +++ b/drivers/iommu/iommu.c
34614 @@ -2878,10 +2878,12 @@ EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
34615   */
34616  int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
34618 -       const struct iommu_ops *ops = dev->bus->iommu_ops;
34619 +       if (dev->iommu && dev->iommu->iommu_dev) {
34620 +               const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
34622 -       if (ops && ops->dev_enable_feat)
34623 -               return ops->dev_enable_feat(dev, feat);
34624 +               if (ops->dev_enable_feat)
34625 +                       return ops->dev_enable_feat(dev, feat);
34626 +       }
34628         return -ENODEV;
34630 @@ -2894,10 +2896,12 @@ EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
34631   */
34632  int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
34634 -       const struct iommu_ops *ops = dev->bus->iommu_ops;
34635 +       if (dev->iommu && dev->iommu->iommu_dev) {
34636 +               const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
34638 -       if (ops && ops->dev_disable_feat)
34639 -               return ops->dev_disable_feat(dev, feat);
34640 +               if (ops->dev_disable_feat)
34641 +                       return ops->dev_disable_feat(dev, feat);
34642 +       }
34644         return -EBUSY;
34646 @@ -2905,10 +2909,12 @@ EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
34648  bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
34650 -       const struct iommu_ops *ops = dev->bus->iommu_ops;
34651 +       if (dev->iommu && dev->iommu->iommu_dev) {
34652 +               const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
34654 -       if (ops && ops->dev_feat_enabled)
34655 -               return ops->dev_feat_enabled(dev, feat);
34656 +               if (ops->dev_feat_enabled)
34657 +                       return ops->dev_feat_enabled(dev, feat);
34658 +       }
34660         return false;
34662 diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
34663 index 6ecc007f07cd..e168a682806a 100644
34664 --- a/drivers/iommu/mtk_iommu.c
34665 +++ b/drivers/iommu/mtk_iommu.c
34666 @@ -688,13 +688,6 @@ static const struct iommu_ops mtk_iommu_ops = {
34667  static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
34669         u32 regval;
34670 -       int ret;
34672 -       ret = clk_prepare_enable(data->bclk);
34673 -       if (ret) {
34674 -               dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
34675 -               return ret;
34676 -       }
34678         if (data->plat_data->m4u_plat == M4U_MT8173) {
34679                 regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
34680 @@ -760,7 +753,6 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
34681         if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
34682                              dev_name(data->dev), (void *)data)) {
34683                 writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
34684 -               clk_disable_unprepare(data->bclk);
34685                 dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
34686                 return -ENODEV;
34687         }
34688 @@ -977,14 +969,19 @@ static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev)
34689         void __iomem *base = data->base;
34690         int ret;
34692 -       /* Avoid first resume to affect the default value of registers below. */
34693 -       if (!m4u_dom)
34694 -               return 0;
34695         ret = clk_prepare_enable(data->bclk);
34696         if (ret) {
34697                 dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret);
34698                 return ret;
34699         }
34701 +       /*
34702 +        * Uppon first resume, only enable the clk and return, since the values of the
34703 +        * registers are not yet set.
34704 +        */
34705 +       if (!m4u_dom)
34706 +               return 0;
34708         writel_relaxed(reg->wr_len_ctrl, base + REG_MMU_WR_LEN_CTRL);
34709         writel_relaxed(reg->misc_ctrl, base + REG_MMU_MISC_CTRL);
34710         writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
34711 diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
34712 index 563a9b366294..e81e89a81cb5 100644
34713 --- a/drivers/irqchip/irq-gic-v3-mbi.c
34714 +++ b/drivers/irqchip/irq-gic-v3-mbi.c
34715 @@ -303,7 +303,7 @@ int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent)
34716         reg = of_get_property(np, "mbi-alias", NULL);
34717         if (reg) {
34718                 mbi_phys_base = of_translate_address(np, reg);
34719 -               if (mbi_phys_base == OF_BAD_ADDR) {
34720 +               if (mbi_phys_base == (phys_addr_t)OF_BAD_ADDR) {
34721                         ret = -ENXIO;
34722                         goto err_free_mbi;
34723                 }
34724 diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
34725 index eb0ee356a629..00404024d7cd 100644
34726 --- a/drivers/irqchip/irq-gic-v3.c
34727 +++ b/drivers/irqchip/irq-gic-v3.c
34728 @@ -648,6 +648,10 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
34730         irqnr = gic_read_iar();
34732 +       /* Check for special IDs first */
34733 +       if ((irqnr >= 1020 && irqnr <= 1023))
34734 +               return;
34736         if (gic_supports_nmi() &&
34737             unlikely(gic_read_rpr() == GICD_INT_NMI_PRI)) {
34738                 gic_handle_nmi(irqnr, regs);
34739 @@ -659,10 +663,6 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
34740                 gic_arch_enable_irqs();
34741         }
34743 -       /* Check for special IDs first */
34744 -       if ((irqnr >= 1020 && irqnr <= 1023))
34745 -               return;
34747         if (static_branch_likely(&supports_deactivate_key))
34748                 gic_write_eoir(irqnr);
34749         else
34750 diff --git a/drivers/leds/blink/Kconfig b/drivers/leds/blink/Kconfig
34751 index 265b53476a80..6dedc58c47b3 100644
34752 --- a/drivers/leds/blink/Kconfig
34753 +++ b/drivers/leds/blink/Kconfig
34754 @@ -9,6 +9,7 @@ if LEDS_BLINK
34756  config LEDS_BLINK_LGM
34757         tristate "LED support for Intel LGM SoC series"
34758 +       depends on GPIOLIB
34759         depends on LEDS_CLASS
34760         depends on MFD_SYSCON
34761         depends on OF
34762 diff --git a/drivers/mailbox/sprd-mailbox.c b/drivers/mailbox/sprd-mailbox.c
34763 index 4c325301a2fe..94d9067dc8d0 100644
34764 --- a/drivers/mailbox/sprd-mailbox.c
34765 +++ b/drivers/mailbox/sprd-mailbox.c
34766 @@ -60,6 +60,8 @@ struct sprd_mbox_priv {
34767         struct clk              *clk;
34768         u32                     outbox_fifo_depth;
34770 +       struct mutex            lock;
34771 +       u32                     refcnt;
34772         struct mbox_chan        chan[SPRD_MBOX_CHAN_MAX];
34773  };
34775 @@ -115,7 +117,11 @@ static irqreturn_t sprd_mbox_outbox_isr(int irq, void *data)
34776                 id = readl(priv->outbox_base + SPRD_MBOX_ID);
34778                 chan = &priv->chan[id];
34779 -               mbox_chan_received_data(chan, (void *)msg);
34780 +               if (chan->cl)
34781 +                       mbox_chan_received_data(chan, (void *)msg);
34782 +               else
34783 +                       dev_warn_ratelimited(priv->dev,
34784 +                                   "message's been dropped at ch[%d]\n", id);
34786                 /* Trigger to update outbox FIFO pointer */
34787                 writel(0x1, priv->outbox_base + SPRD_MBOX_TRIGGER);
34788 @@ -215,18 +221,22 @@ static int sprd_mbox_startup(struct mbox_chan *chan)
34789         struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
34790         u32 val;
34792 -       /* Select outbox FIFO mode and reset the outbox FIFO status */
34793 -       writel(0x0, priv->outbox_base + SPRD_MBOX_FIFO_RST);
34794 +       mutex_lock(&priv->lock);
34795 +       if (priv->refcnt++ == 0) {
34796 +               /* Select outbox FIFO mode and reset the outbox FIFO status */
34797 +               writel(0x0, priv->outbox_base + SPRD_MBOX_FIFO_RST);
34799 -       /* Enable inbox FIFO overflow and delivery interrupt */
34800 -       val = readl(priv->inbox_base + SPRD_MBOX_IRQ_MSK);
34801 -       val &= ~(SPRD_INBOX_FIFO_OVERFLOW_IRQ | SPRD_INBOX_FIFO_DELIVER_IRQ);
34802 -       writel(val, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
34803 +               /* Enable inbox FIFO overflow and delivery interrupt */
34804 +               val = readl(priv->inbox_base + SPRD_MBOX_IRQ_MSK);
34805 +               val &= ~(SPRD_INBOX_FIFO_OVERFLOW_IRQ | SPRD_INBOX_FIFO_DELIVER_IRQ);
34806 +               writel(val, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
34808 -       /* Enable outbox FIFO not empty interrupt */
34809 -       val = readl(priv->outbox_base + SPRD_MBOX_IRQ_MSK);
34810 -       val &= ~SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ;
34811 -       writel(val, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
34812 +               /* Enable outbox FIFO not empty interrupt */
34813 +               val = readl(priv->outbox_base + SPRD_MBOX_IRQ_MSK);
34814 +               val &= ~SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ;
34815 +               writel(val, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
34816 +       }
34817 +       mutex_unlock(&priv->lock);
34819         return 0;
34821 @@ -235,9 +245,13 @@ static void sprd_mbox_shutdown(struct mbox_chan *chan)
34823         struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
34825 -       /* Disable inbox & outbox interrupt */
34826 -       writel(SPRD_INBOX_FIFO_IRQ_MASK, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
34827 -       writel(SPRD_OUTBOX_FIFO_IRQ_MASK, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
34828 +       mutex_lock(&priv->lock);
34829 +       if (--priv->refcnt == 0) {
34830 +               /* Disable inbox & outbox interrupt */
34831 +               writel(SPRD_INBOX_FIFO_IRQ_MASK, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
34832 +               writel(SPRD_OUTBOX_FIFO_IRQ_MASK, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
34833 +       }
34834 +       mutex_unlock(&priv->lock);
34837  static const struct mbox_chan_ops sprd_mbox_ops = {
34838 @@ -266,6 +280,7 @@ static int sprd_mbox_probe(struct platform_device *pdev)
34839                 return -ENOMEM;
34841         priv->dev = dev;
34842 +       mutex_init(&priv->lock);
34844         /*
34845          * The Spreadtrum mailbox uses an inbox to send messages to the target
34846 diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
34847 index 82d4e0880a99..4fb635c0baa0 100644
34848 --- a/drivers/md/bcache/writeback.c
34849 +++ b/drivers/md/bcache/writeback.c
34850 @@ -110,13 +110,13 @@ static void __update_writeback_rate(struct cached_dev *dc)
34851                 int64_t fps;
34853                 if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID) {
34854 -                       fp_term = dc->writeback_rate_fp_term_low *
34855 +                       fp_term = (int64_t)dc->writeback_rate_fp_term_low *
34856                         (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW);
34857                 } else if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH) {
34858 -                       fp_term = dc->writeback_rate_fp_term_mid *
34859 +                       fp_term = (int64_t)dc->writeback_rate_fp_term_mid *
34860                         (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID);
34861                 } else {
34862 -                       fp_term = dc->writeback_rate_fp_term_high *
34863 +                       fp_term = (int64_t)dc->writeback_rate_fp_term_high *
34864                         (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH);
34865                 }
34866                 fps = div_s64(dirty, dirty_buckets) * fp_term;
34867 diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
34868 index 46b5d542b8fe..362c887d33b3 100644
34869 --- a/drivers/md/dm-integrity.c
34870 +++ b/drivers/md/dm-integrity.c
34871 @@ -4039,6 +4039,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
34872                         if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
34873                                 r = -EINVAL;
34874                                 ti->error = "Invalid bitmap_flush_interval argument";
34875 +                               goto bad;
34876                         }
34877                         ic->bitmap_flush_interval = msecs_to_jiffies(val);
34878                 } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
34879 diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
34880 index cab12b2251ba..91461b6904c1 100644
34881 --- a/drivers/md/dm-raid.c
34882 +++ b/drivers/md/dm-raid.c
34883 @@ -1868,6 +1868,14 @@ static bool rs_takeover_requested(struct raid_set *rs)
34884         return rs->md.new_level != rs->md.level;
34887 +/* True if layout is set to reshape. */
34888 +static bool rs_is_layout_change(struct raid_set *rs, bool use_mddev)
34890 +       return (use_mddev ? rs->md.delta_disks : rs->delta_disks) ||
34891 +              rs->md.new_layout != rs->md.layout ||
34892 +              rs->md.new_chunk_sectors != rs->md.chunk_sectors;
34895  /* True if @rs is requested to reshape by ctr */
34896  static bool rs_reshape_requested(struct raid_set *rs)
34898 @@ -1880,9 +1888,7 @@ static bool rs_reshape_requested(struct raid_set *rs)
34899         if (rs_is_raid0(rs))
34900                 return false;
34902 -       change = mddev->new_layout != mddev->layout ||
34903 -                mddev->new_chunk_sectors != mddev->chunk_sectors ||
34904 -                rs->delta_disks;
34905 +       change = rs_is_layout_change(rs, false);
34907         /* Historical case to support raid1 reshape without delta disks */
34908         if (rs_is_raid1(rs)) {
34909 @@ -2817,7 +2823,7 @@ static sector_t _get_reshape_sectors(struct raid_set *rs)
34912  /*
34913 - *
34914 + * Reshape:
34915   * - change raid layout
34916   * - change chunk size
34917   * - add disks
34918 @@ -2926,6 +2932,20 @@ static int rs_setup_reshape(struct raid_set *rs)
34919         return r;
34923 + * If the md resync thread has updated superblock with max reshape position
34924 + * at the end of a reshape but not (yet) reset the layout configuration
34925 + * changes -> reset the latter.
34926 + */
34927 +static void rs_reset_inconclusive_reshape(struct raid_set *rs)
34929 +       if (!rs_is_reshaping(rs) && rs_is_layout_change(rs, true)) {
34930 +               rs_set_cur(rs);
34931 +               rs->md.delta_disks = 0;
34932 +               rs->md.reshape_backwards = 0;
34933 +       }
34936  /*
34937   * Enable/disable discard support on RAID set depending on
34938   * RAID level and discard properties of underlying RAID members.
34939 @@ -3212,11 +3232,14 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
34940         if (r)
34941                 goto bad;
34943 +       /* Catch any inconclusive reshape superblock content. */
34944 +       rs_reset_inconclusive_reshape(rs);
34946         /* Start raid set read-only and assumed clean to change in raid_resume() */
34947         rs->md.ro = 1;
34948         rs->md.in_sync = 1;
34950 -       /* Keep array frozen */
34951 +       /* Keep array frozen until resume. */
34952         set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
34954         /* Has to be held on running the array */
34955 @@ -3230,7 +3253,6 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
34956         }
34958         r = md_start(&rs->md);
34960         if (r) {
34961                 ti->error = "Failed to start raid array";
34962                 mddev_unlock(&rs->md);
34963 diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
34964 index 13b4385f4d5a..9c3bc3711b33 100644
34965 --- a/drivers/md/dm-rq.c
34966 +++ b/drivers/md/dm-rq.c
34967 @@ -569,6 +569,7 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
34968         blk_mq_free_tag_set(md->tag_set);
34969  out_kfree_tag_set:
34970         kfree(md->tag_set);
34971 +       md->tag_set = NULL;
34973         return err;
34975 @@ -578,6 +579,7 @@ void dm_mq_cleanup_mapped_device(struct mapped_device *md)
34976         if (md->tag_set) {
34977                 blk_mq_free_tag_set(md->tag_set);
34978                 kfree(md->tag_set);
34979 +               md->tag_set = NULL;
34980         }
34983 diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
34984 index 200c5d0f08bf..ea3130e11680 100644
34985 --- a/drivers/md/md-bitmap.c
34986 +++ b/drivers/md/md-bitmap.c
34987 @@ -1722,6 +1722,8 @@ void md_bitmap_flush(struct mddev *mddev)
34988         md_bitmap_daemon_work(mddev);
34989         bitmap->daemon_lastrun -= sleep;
34990         md_bitmap_daemon_work(mddev);
34991 +       if (mddev->bitmap_info.external)
34992 +               md_super_wait(mddev);
34993         md_bitmap_update_sb(bitmap);
34996 diff --git a/drivers/md/md.c b/drivers/md/md.c
34997 index 21da0c48f6c2..2a9553efc2d1 100644
34998 --- a/drivers/md/md.c
34999 +++ b/drivers/md/md.c
35000 @@ -734,7 +734,34 @@ void mddev_init(struct mddev *mddev)
35002  EXPORT_SYMBOL_GPL(mddev_init);
35004 +static struct mddev *mddev_find_locked(dev_t unit)
35006 +       struct mddev *mddev;
35008 +       list_for_each_entry(mddev, &all_mddevs, all_mddevs)
35009 +               if (mddev->unit == unit)
35010 +                       return mddev;
35012 +       return NULL;
35015  static struct mddev *mddev_find(dev_t unit)
35017 +       struct mddev *mddev;
35019 +       if (MAJOR(unit) != MD_MAJOR)
35020 +               unit &= ~((1 << MdpMinorShift) - 1);
35022 +       spin_lock(&all_mddevs_lock);
35023 +       mddev = mddev_find_locked(unit);
35024 +       if (mddev)
35025 +               mddev_get(mddev);
35026 +       spin_unlock(&all_mddevs_lock);
35028 +       return mddev;
35031 +static struct mddev *mddev_find_or_alloc(dev_t unit)
35033         struct mddev *mddev, *new = NULL;
35035 @@ -745,13 +772,13 @@ static struct mddev *mddev_find(dev_t unit)
35036         spin_lock(&all_mddevs_lock);
35038         if (unit) {
35039 -               list_for_each_entry(mddev, &all_mddevs, all_mddevs)
35040 -                       if (mddev->unit == unit) {
35041 -                               mddev_get(mddev);
35042 -                               spin_unlock(&all_mddevs_lock);
35043 -                               kfree(new);
35044 -                               return mddev;
35045 -                       }
35046 +               mddev = mddev_find_locked(unit);
35047 +               if (mddev) {
35048 +                       mddev_get(mddev);
35049 +                       spin_unlock(&all_mddevs_lock);
35050 +                       kfree(new);
35051 +                       return mddev;
35052 +               }
35054                 if (new) {
35055                         list_add(&new->all_mddevs, &all_mddevs);
35056 @@ -777,12 +804,7 @@ static struct mddev *mddev_find(dev_t unit)
35057                                 return NULL;
35058                         }
35060 -                       is_free = 1;
35061 -                       list_for_each_entry(mddev, &all_mddevs, all_mddevs)
35062 -                               if (mddev->unit == dev) {
35063 -                                       is_free = 0;
35064 -                                       break;
35065 -                               }
35066 +                       is_free = !mddev_find_locked(dev);
35067                 }
35068                 new->unit = dev;
35069                 new->md_minor = MINOR(dev);
35070 @@ -5644,7 +5666,7 @@ static int md_alloc(dev_t dev, char *name)
35071          * writing to /sys/module/md_mod/parameters/new_array.
35072          */
35073         static DEFINE_MUTEX(disks_mutex);
35074 -       struct mddev *mddev = mddev_find(dev);
35075 +       struct mddev *mddev = mddev_find_or_alloc(dev);
35076         struct gendisk *disk;
35077         int partitioned;
35078         int shift;
35079 @@ -6524,11 +6546,9 @@ static void autorun_devices(int part)
35081                 md_probe(dev);
35082                 mddev = mddev_find(dev);
35083 -               if (!mddev || !mddev->gendisk) {
35084 -                       if (mddev)
35085 -                               mddev_put(mddev);
35086 +               if (!mddev)
35087                         break;
35088 -               }
35090                 if (mddev_lock(mddev))
35091                         pr_warn("md: %s locked, cannot run\n", mdname(mddev));
35092                 else if (mddev->raid_disks || mddev->major_version
35093 @@ -7821,8 +7841,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
35094                 /* Wait until bdev->bd_disk is definitely gone */
35095                 if (work_pending(&mddev->del_work))
35096                         flush_workqueue(md_misc_wq);
35097 -               /* Then retry the open from the top */
35098 -               return -ERESTARTSYS;
35099 +               return -EBUSY;
35100         }
35101         BUG_ON(mddev != bdev->bd_disk->private_data);
35103 @@ -8153,7 +8172,11 @@ static void *md_seq_start(struct seq_file *seq, loff_t *pos)
35104         loff_t l = *pos;
35105         struct mddev *mddev;
35107 -       if (l >= 0x10000)
35108 +       if (l == 0x10000) {
35109 +               ++*pos;
35110 +               return (void *)2;
35111 +       }
35112 +       if (l > 0x10000)
35113                 return NULL;
35114         if (!l--)
35115                 /* header */
35116 @@ -9251,11 +9274,11 @@ void md_check_recovery(struct mddev *mddev)
35117                 }
35119                 if (mddev_is_clustered(mddev)) {
35120 -                       struct md_rdev *rdev;
35121 +                       struct md_rdev *rdev, *tmp;
35122                         /* kick the device if another node issued a
35123                          * remove disk.
35124                          */
35125 -                       rdev_for_each(rdev, mddev) {
35126 +                       rdev_for_each_safe(rdev, tmp, mddev) {
35127                                 if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
35128                                                 rdev->raid_disk < 0)
35129                                         md_kick_rdev_from_array(rdev);
35130 @@ -9569,7 +9592,7 @@ static int __init md_init(void)
35131  static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
35133         struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
35134 -       struct md_rdev *rdev2;
35135 +       struct md_rdev *rdev2, *tmp;
35136         int role, ret;
35137         char b[BDEVNAME_SIZE];
35139 @@ -9586,7 +9609,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
35140         }
35142         /* Check for change of roles in the active devices */
35143 -       rdev_for_each(rdev2, mddev) {
35144 +       rdev_for_each_safe(rdev2, tmp, mddev) {
35145                 if (test_bit(Faulty, &rdev2->flags))
35146                         continue;
35148 diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
35149 index fe073d92f01e..70cfdea27efd 100644
35150 --- a/drivers/md/persistent-data/dm-btree-internal.h
35151 +++ b/drivers/md/persistent-data/dm-btree-internal.h
35152 @@ -34,12 +34,12 @@ struct node_header {
35153         __le32 max_entries;
35154         __le32 value_size;
35155         __le32 padding;
35156 -} __packed;
35157 +} __attribute__((packed, aligned(8)));
35159  struct btree_node {
35160         struct node_header header;
35161         __le64 keys[];
35162 -} __packed;
35163 +} __attribute__((packed, aligned(8)));
35166  /*
35167 diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
35168 index d8b4125e338c..a213bf11738f 100644
35169 --- a/drivers/md/persistent-data/dm-space-map-common.c
35170 +++ b/drivers/md/persistent-data/dm-space-map-common.c
35171 @@ -339,6 +339,8 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
35172          */
35173         begin = do_div(index_begin, ll->entries_per_block);
35174         end = do_div(end, ll->entries_per_block);
35175 +       if (end == 0)
35176 +               end = ll->entries_per_block;
35178         for (i = index_begin; i < index_end; i++, begin = 0) {
35179                 struct dm_block *blk;
35180 diff --git a/drivers/md/persistent-data/dm-space-map-common.h b/drivers/md/persistent-data/dm-space-map-common.h
35181 index 8de63ce39bdd..87e17909ef52 100644
35182 --- a/drivers/md/persistent-data/dm-space-map-common.h
35183 +++ b/drivers/md/persistent-data/dm-space-map-common.h
35184 @@ -33,7 +33,7 @@ struct disk_index_entry {
35185         __le64 blocknr;
35186         __le32 nr_free;
35187         __le32 none_free_before;
35188 -} __packed;
35189 +} __attribute__ ((packed, aligned(8)));
35192  #define MAX_METADATA_BITMAPS 255
35193 @@ -43,7 +43,7 @@ struct disk_metadata_index {
35194         __le64 blocknr;
35196         struct disk_index_entry index[MAX_METADATA_BITMAPS];
35197 -} __packed;
35198 +} __attribute__ ((packed, aligned(8)));
35200  struct ll_disk;
35202 @@ -86,7 +86,7 @@ struct disk_sm_root {
35203         __le64 nr_allocated;
35204         __le64 bitmap_root;
35205         __le64 ref_count_root;
35206 -} __packed;
35207 +} __attribute__ ((packed, aligned(8)));
35209  #define ENTRIES_PER_BYTE 4
35211 @@ -94,7 +94,7 @@ struct disk_bitmap_header {
35212         __le32 csum;
35213         __le32 not_used;
35214         __le64 blocknr;
35215 -} __packed;
35216 +} __attribute__ ((packed, aligned(8)));
35218  enum allocation_event {
35219         SM_NONE,
35220 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
35221 index d2378765dc15..ced076ba560e 100644
35222 --- a/drivers/md/raid1.c
35223 +++ b/drivers/md/raid1.c
35224 @@ -478,6 +478,8 @@ static void raid1_end_write_request(struct bio *bio)
35225                 if (!test_bit(Faulty, &rdev->flags))
35226                         set_bit(R1BIO_WriteError, &r1_bio->state);
35227                 else {
35228 +                       /* Fail the request */
35229 +                       set_bit(R1BIO_Degraded, &r1_bio->state);
35230                         /* Finished with this branch */
35231                         r1_bio->bios[mirror] = NULL;
35232                         to_put = bio;
35233 diff --git a/drivers/media/common/saa7146/saa7146_core.c b/drivers/media/common/saa7146/saa7146_core.c
35234 index f2d13b71416c..e50fa0ff7c5d 100644
35235 --- a/drivers/media/common/saa7146/saa7146_core.c
35236 +++ b/drivers/media/common/saa7146/saa7146_core.c
35237 @@ -253,7 +253,7 @@ int saa7146_pgtable_build_single(struct pci_dev *pci, struct saa7146_pgtable *pt
35238                          i, sg_dma_address(list), sg_dma_len(list),
35239                          list->offset);
35240  */
35241 -               for (p = 0; p * 4096 < list->length; p++, ptr++) {
35242 +               for (p = 0; p * 4096 < sg_dma_len(list); p++, ptr++) {
35243                         *ptr = cpu_to_le32(sg_dma_address(list) + p * 4096);
35244                         nr_pages++;
35245                 }
35246 diff --git a/drivers/media/common/saa7146/saa7146_video.c b/drivers/media/common/saa7146/saa7146_video.c
35247 index 7b8795eca589..66215d9106a4 100644
35248 --- a/drivers/media/common/saa7146/saa7146_video.c
35249 +++ b/drivers/media/common/saa7146/saa7146_video.c
35250 @@ -247,9 +247,8 @@ static int saa7146_pgtable_build(struct saa7146_dev *dev, struct saa7146_buf *bu
35252                 /* walk all pages, copy all page addresses to ptr1 */
35253                 for (i = 0; i < length; i++, list++) {
35254 -                       for (p = 0; p * 4096 < list->length; p++, ptr1++) {
35255 +                       for (p = 0; p * 4096 < sg_dma_len(list); p++, ptr1++)
35256                                 *ptr1 = cpu_to_le32(sg_dma_address(list) - list->offset);
35257 -                       }
35258                 }
35259  /*
35260                 ptr1 = pt1->cpu;
35261 diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
35262 index 5ff7bedee247..3862ddc86ec4 100644
35263 --- a/drivers/media/dvb-core/dvbdev.c
35264 +++ b/drivers/media/dvb-core/dvbdev.c
35265 @@ -241,6 +241,7 @@ static void dvb_media_device_free(struct dvb_device *dvbdev)
35267         if (dvbdev->adapter->conn) {
35268                 media_device_unregister_entity(dvbdev->adapter->conn);
35269 +               kfree(dvbdev->adapter->conn);
35270                 dvbdev->adapter->conn = NULL;
35271                 kfree(dvbdev->adapter->conn_pads);
35272                 dvbdev->adapter->conn_pads = NULL;
35273 diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
35274 index cfa4cdde99d8..02e8aa11e36e 100644
35275 --- a/drivers/media/dvb-frontends/m88ds3103.c
35276 +++ b/drivers/media/dvb-frontends/m88ds3103.c
35277 @@ -1904,8 +1904,8 @@ static int m88ds3103_probe(struct i2c_client *client,
35279                 dev->dt_client = i2c_new_dummy_device(client->adapter,
35280                                                       dev->dt_addr);
35281 -               if (!dev->dt_client) {
35282 -                       ret = -ENODEV;
35283 +               if (IS_ERR(dev->dt_client)) {
35284 +                       ret = PTR_ERR(dev->dt_client);
35285                         goto err_kfree;
35286                 }
35287         }
35288 diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
35289 index a3161d709015..ab7883cff8b2 100644
35290 --- a/drivers/media/i2c/adv7511-v4l2.c
35291 +++ b/drivers/media/i2c/adv7511-v4l2.c
35292 @@ -1964,7 +1964,7 @@ static int adv7511_remove(struct i2c_client *client)
35294         adv7511_set_isr(sd, false);
35295         adv7511_init_setup(sd);
35296 -       cancel_delayed_work(&state->edid_handler);
35297 +       cancel_delayed_work_sync(&state->edid_handler);
35298         i2c_unregister_device(state->i2c_edid);
35299         i2c_unregister_device(state->i2c_cec);
35300         i2c_unregister_device(state->i2c_pktmem);
35301 diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
35302 index 09004d928d11..d1f58795794f 100644
35303 --- a/drivers/media/i2c/adv7604.c
35304 +++ b/drivers/media/i2c/adv7604.c
35305 @@ -3616,7 +3616,7 @@ static int adv76xx_remove(struct i2c_client *client)
35306         io_write(sd, 0x6e, 0);
35307         io_write(sd, 0x73, 0);
35309 -       cancel_delayed_work(&state->delayed_work_enable_hotplug);
35310 +       cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
35311         v4l2_async_unregister_subdev(sd);
35312         media_entity_cleanup(&sd->entity);
35313         adv76xx_unregister_clients(to_state(sd));
35314 diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
35315 index 0855f648416d..f7d2b6cd3008 100644
35316 --- a/drivers/media/i2c/adv7842.c
35317 +++ b/drivers/media/i2c/adv7842.c
35318 @@ -3586,7 +3586,7 @@ static int adv7842_remove(struct i2c_client *client)
35319         struct adv7842_state *state = to_state(sd);
35321         adv7842_irq_enable(sd, false);
35322 -       cancel_delayed_work(&state->delayed_work_enable_hotplug);
35323 +       cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
35324         v4l2_device_unregister_subdev(sd);
35325         media_entity_cleanup(&sd->entity);
35326         adv7842_unregister_clients(sd);
35327 diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
35328 index 15afbb4f5b31..4505594996bd 100644
35329 --- a/drivers/media/i2c/ccs/ccs-core.c
35330 +++ b/drivers/media/i2c/ccs/ccs-core.c
35331 @@ -3522,11 +3522,11 @@ static int ccs_probe(struct i2c_client *client)
35332         sensor->pll.scale_n = CCS_LIM(sensor, SCALER_N_MIN);
35334         ccs_create_subdev(sensor, sensor->scaler, " scaler", 2,
35335 -                         MEDIA_ENT_F_CAM_SENSOR);
35336 +                         MEDIA_ENT_F_PROC_VIDEO_SCALER);
35337         ccs_create_subdev(sensor, sensor->binner, " binner", 2,
35338                           MEDIA_ENT_F_PROC_VIDEO_SCALER);
35339         ccs_create_subdev(sensor, sensor->pixel_array, " pixel_array", 1,
35340 -                         MEDIA_ENT_F_PROC_VIDEO_SCALER);
35341 +                         MEDIA_ENT_F_CAM_SENSOR);
35343         rval = ccs_init_controls(sensor);
35344         if (rval < 0)
35345 diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
35346 index 6e3382b85a90..49ba39418360 100644
35347 --- a/drivers/media/i2c/imx219.c
35348 +++ b/drivers/media/i2c/imx219.c
35349 @@ -1035,29 +1035,47 @@ static int imx219_start_streaming(struct imx219 *imx219)
35350         const struct imx219_reg_list *reg_list;
35351         int ret;
35353 +       ret = pm_runtime_get_sync(&client->dev);
35354 +       if (ret < 0) {
35355 +               pm_runtime_put_noidle(&client->dev);
35356 +               return ret;
35357 +       }
35359         /* Apply default values of current mode */
35360         reg_list = &imx219->mode->reg_list;
35361         ret = imx219_write_regs(imx219, reg_list->regs, reg_list->num_of_regs);
35362         if (ret) {
35363                 dev_err(&client->dev, "%s failed to set mode\n", __func__);
35364 -               return ret;
35365 +               goto err_rpm_put;
35366         }
35368         ret = imx219_set_framefmt(imx219);
35369         if (ret) {
35370                 dev_err(&client->dev, "%s failed to set frame format: %d\n",
35371                         __func__, ret);
35372 -               return ret;
35373 +               goto err_rpm_put;
35374         }
35376         /* Apply customized values from user */
35377         ret =  __v4l2_ctrl_handler_setup(imx219->sd.ctrl_handler);
35378         if (ret)
35379 -               return ret;
35380 +               goto err_rpm_put;
35382         /* set stream on register */
35383 -       return imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
35384 -                               IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING);
35385 +       ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
35386 +                              IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING);
35387 +       if (ret)
35388 +               goto err_rpm_put;
35390 +       /* vflip and hflip cannot change during streaming */
35391 +       __v4l2_ctrl_grab(imx219->vflip, true);
35392 +       __v4l2_ctrl_grab(imx219->hflip, true);
35394 +       return 0;
35396 +err_rpm_put:
35397 +       pm_runtime_put(&client->dev);
35398 +       return ret;
35401  static void imx219_stop_streaming(struct imx219 *imx219)
35402 @@ -1070,12 +1088,16 @@ static void imx219_stop_streaming(struct imx219 *imx219)
35403                                IMX219_REG_VALUE_08BIT, IMX219_MODE_STANDBY);
35404         if (ret)
35405                 dev_err(&client->dev, "%s failed to set stream\n", __func__);
35407 +       __v4l2_ctrl_grab(imx219->vflip, false);
35408 +       __v4l2_ctrl_grab(imx219->hflip, false);
35410 +       pm_runtime_put(&client->dev);
35413  static int imx219_set_stream(struct v4l2_subdev *sd, int enable)
35415         struct imx219 *imx219 = to_imx219(sd);
35416 -       struct i2c_client *client = v4l2_get_subdevdata(sd);
35417         int ret = 0;
35419         mutex_lock(&imx219->mutex);
35420 @@ -1085,36 +1107,23 @@ static int imx219_set_stream(struct v4l2_subdev *sd, int enable)
35421         }
35423         if (enable) {
35424 -               ret = pm_runtime_get_sync(&client->dev);
35425 -               if (ret < 0) {
35426 -                       pm_runtime_put_noidle(&client->dev);
35427 -                       goto err_unlock;
35428 -               }
35430                 /*
35431                  * Apply default & customized values
35432                  * and then start streaming.
35433                  */
35434                 ret = imx219_start_streaming(imx219);
35435                 if (ret)
35436 -                       goto err_rpm_put;
35437 +                       goto err_unlock;
35438         } else {
35439                 imx219_stop_streaming(imx219);
35440 -               pm_runtime_put(&client->dev);
35441         }
35443         imx219->streaming = enable;
35445 -       /* vflip and hflip cannot change during streaming */
35446 -       __v4l2_ctrl_grab(imx219->vflip, enable);
35447 -       __v4l2_ctrl_grab(imx219->hflip, enable);
35449         mutex_unlock(&imx219->mutex);
35451         return ret;
35453 -err_rpm_put:
35454 -       pm_runtime_put(&client->dev);
35455  err_unlock:
35456         mutex_unlock(&imx219->mutex);
35458 diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c
35459 index 39530d43590e..a7caf2eb5771 100644
35460 --- a/drivers/media/i2c/msp3400-driver.c
35461 +++ b/drivers/media/i2c/msp3400-driver.c
35462 @@ -170,7 +170,7 @@ static int msp_read(struct i2c_client *client, int dev, int addr)
35463                         break;
35464                 dev_warn(&client->dev, "I/O error #%d (read 0x%02x/0x%02x)\n", err,
35465                        dev, addr);
35466 -               schedule_timeout_interruptible(msecs_to_jiffies(10));
35467 +               schedule_msec_hrtimeout_interruptible((10));
35468         }
35469         if (err == 3) {
35470                 dev_warn(&client->dev, "resetting chip, sound will go off.\n");
35471 @@ -211,7 +211,7 @@ static int msp_write(struct i2c_client *client, int dev, int addr, int val)
35472                         break;
35473                 dev_warn(&client->dev, "I/O error #%d (write 0x%02x/0x%02x)\n", err,
35474                        dev, addr);
35475 -               schedule_timeout_interruptible(msecs_to_jiffies(10));
35476 +               schedule_msec_hrtimeout_interruptible((10));
35477         }
35478         if (err == 3) {
35479                 dev_warn(&client->dev, "resetting chip, sound will go off.\n");
35480 diff --git a/drivers/media/i2c/rdacm21.c b/drivers/media/i2c/rdacm21.c
35481 index dcc21515e5a4..179d107f494c 100644
35482 --- a/drivers/media/i2c/rdacm21.c
35483 +++ b/drivers/media/i2c/rdacm21.c
35484 @@ -345,7 +345,7 @@ static int ov10640_initialize(struct rdacm21_device *dev)
35485         /* Read OV10640 ID to test communications. */
35486         ov490_write_reg(dev, OV490_SCCB_SLAVE0_DIR, OV490_SCCB_SLAVE_READ);
35487         ov490_write_reg(dev, OV490_SCCB_SLAVE0_ADDR_HIGH, OV10640_CHIP_ID >> 8);
35488 -       ov490_write_reg(dev, OV490_SCCB_SLAVE0_ADDR_LOW, (u8)OV10640_CHIP_ID);
35489 +       ov490_write_reg(dev, OV490_SCCB_SLAVE0_ADDR_LOW, OV10640_CHIP_ID & 0xff);
35491         /* Trigger SCCB slave transaction and give it some time to complete. */
35492         ov490_write_reg(dev, OV490_HOST_CMD, OV490_HOST_CMD_TRIGGER);
35493 diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
35494 index 831b5b54fd78..1b309bb743c7 100644
35495 --- a/drivers/media/i2c/tc358743.c
35496 +++ b/drivers/media/i2c/tc358743.c
35497 @@ -2193,7 +2193,7 @@ static int tc358743_remove(struct i2c_client *client)
35498                 del_timer_sync(&state->timer);
35499                 flush_work(&state->work_i2c_poll);
35500         }
35501 -       cancel_delayed_work(&state->delayed_work_enable_hotplug);
35502 +       cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
35503         cec_unregister_adapter(state->cec_adap);
35504         v4l2_async_unregister_subdev(sd);
35505         v4l2_device_unregister_subdev(sd);
35506 diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
35507 index a09bf0a39d05..89bb7e6dc7a4 100644
35508 --- a/drivers/media/i2c/tda1997x.c
35509 +++ b/drivers/media/i2c/tda1997x.c
35510 @@ -2804,7 +2804,7 @@ static int tda1997x_remove(struct i2c_client *client)
35511         media_entity_cleanup(&sd->entity);
35512         v4l2_ctrl_handler_free(&state->hdl);
35513         regulator_bulk_disable(TDA1997X_NUM_SUPPLIES, state->supplies);
35514 -       cancel_delayed_work(&state->delayed_work_enable_hpd);
35515 +       cancel_delayed_work_sync(&state->delayed_work_enable_hpd);
35516         mutex_destroy(&state->page_lock);
35517         mutex_destroy(&state->lock);
35519 diff --git a/drivers/media/pci/cx18/cx18-gpio.c b/drivers/media/pci/cx18/cx18-gpio.c
35520 index cf7cfda94107..f63e17489547 100644
35521 --- a/drivers/media/pci/cx18/cx18-gpio.c
35522 +++ b/drivers/media/pci/cx18/cx18-gpio.c
35523 @@ -81,11 +81,11 @@ static void gpio_reset_seq(struct cx18 *cx, u32 active_lo, u32 active_hi,
35525         /* Assert */
35526         gpio_update(cx, mask, ~active_lo);
35527 -       schedule_timeout_uninterruptible(msecs_to_jiffies(assert_msecs));
35528 +       schedule_msec_hrtimeout_uninterruptible((assert_msecs));
35530         /* Deassert */
35531         gpio_update(cx, mask, ~active_hi);
35532 -       schedule_timeout_uninterruptible(msecs_to_jiffies(recovery_msecs));
35533 +       schedule_msec_hrtimeout_uninterruptible((recovery_msecs));
35536  /*
35537 diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
35538 index 22f55a7840a6..d0ca260ecf70 100644
35539 --- a/drivers/media/pci/cx23885/cx23885-core.c
35540 +++ b/drivers/media/pci/cx23885/cx23885-core.c
35541 @@ -2077,6 +2077,15 @@ static struct {
35542          * 0x1423 is the PCI ID for the IOMMU found on Kaveri
35543          */
35544         { PCI_VENDOR_ID_AMD, 0x1423 },
35545 +       /* 0x1481 is the PCI ID for the IOMMU found on Starship/Matisse
35546 +        */
35547 +       { PCI_VENDOR_ID_AMD, 0x1481 },
35548 +       /* 0x1419 is the PCI ID for the IOMMU found on 15h (Models 10h-1fh) family
35549 +        */
35550 +       { PCI_VENDOR_ID_AMD, 0x1419 },
35551 +       /* 0x5a23 is the PCI ID for the IOMMU found on RD890S/RD990
35552 +        */
35553 +       { PCI_VENDOR_ID_ATI, 0x5a23 },
35554  };
35556  static bool cx23885_does_need_dma_reset(void)
35557 diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
35558 index 6e8c0c230e11..fecef85bd62e 100644
35559 --- a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
35560 +++ b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
35561 @@ -302,7 +302,7 @@ static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
35562         if (!q->sensor)
35563                 return -ENODEV;
35565 -       freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes);
35566 +       freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes * 2);
35567         if (freq < 0) {
35568                 dev_err(dev, "error %lld, invalid link_freq\n", freq);
35569                 return freq;
35570 diff --git a/drivers/media/pci/ivtv/ivtv-gpio.c b/drivers/media/pci/ivtv/ivtv-gpio.c
35571 index 856e7ab7f33e..766a26251337 100644
35572 --- a/drivers/media/pci/ivtv/ivtv-gpio.c
35573 +++ b/drivers/media/pci/ivtv/ivtv-gpio.c
35574 @@ -105,7 +105,7 @@ void ivtv_reset_ir_gpio(struct ivtv *itv)
35575         curout = (curout & ~0xF) | 1;
35576         write_reg(curout, IVTV_REG_GPIO_OUT);
35577         /* We could use something else for smaller time */
35578 -       schedule_timeout_interruptible(msecs_to_jiffies(1));
35579 +       schedule_msec_hrtimeout_interruptible((1));
35580         curout |= 2;
35581         write_reg(curout, IVTV_REG_GPIO_OUT);
35582         curdir &= ~0x80;
35583 @@ -125,11 +125,11 @@ int ivtv_reset_tuner_gpio(void *dev, int component, int cmd, int value)
35584         curout = read_reg(IVTV_REG_GPIO_OUT);
35585         curout &= ~(1 << itv->card->xceive_pin);
35586         write_reg(curout, IVTV_REG_GPIO_OUT);
35587 -       schedule_timeout_interruptible(msecs_to_jiffies(1));
35588 +       schedule_msec_hrtimeout_interruptible((1));
35590         curout |= 1 << itv->card->xceive_pin;
35591         write_reg(curout, IVTV_REG_GPIO_OUT);
35592 -       schedule_timeout_interruptible(msecs_to_jiffies(1));
35593 +       schedule_msec_hrtimeout_interruptible((1));
35594         return 0;
35597 diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
35598 index 35dccb31174c..8181cd65e876 100644
35599 --- a/drivers/media/pci/ivtv/ivtv-ioctl.c
35600 +++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
35601 @@ -1139,7 +1139,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id std)
35602                                 TASK_UNINTERRUPTIBLE);
35603                 if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100)
35604                         break;
35605 -               schedule_timeout(msecs_to_jiffies(25));
35606 +               schedule_msec_hrtimeout((25));
35607         }
35608         finish_wait(&itv->vsync_waitq, &wait);
35609         mutex_lock(&itv->serialize_lock);
35610 diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c
35611 index f04ee84bab5f..c4469b4b8f99 100644
35612 --- a/drivers/media/pci/ivtv/ivtv-streams.c
35613 +++ b/drivers/media/pci/ivtv/ivtv-streams.c
35614 @@ -849,7 +849,7 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
35615                         while (!test_bit(IVTV_F_I_EOS, &itv->i_flags) &&
35616                                 time_before(jiffies,
35617                                             then + msecs_to_jiffies(2000))) {
35618 -                               schedule_timeout(msecs_to_jiffies(10));
35619 +                               schedule_msec_hrtimeout((10));
35620                         }
35622                         /* To convert jiffies to ms, we must multiply by 1000
35623 diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
35624 index 391572a6ec76..efb757d5168a 100644
35625 --- a/drivers/media/pci/saa7134/saa7134-core.c
35626 +++ b/drivers/media/pci/saa7134/saa7134-core.c
35627 @@ -243,7 +243,7 @@ int saa7134_pgtable_build(struct pci_dev *pci, struct saa7134_pgtable *pt,
35629         ptr = pt->cpu + startpage;
35630         for (i = 0; i < length; i++, list = sg_next(list)) {
35631 -               for (p = 0; p * 4096 < list->length; p++, ptr++)
35632 +               for (p = 0; p * 4096 < sg_dma_len(list); p++, ptr++)
35633                         *ptr = cpu_to_le32(sg_dma_address(list) +
35634                                                 list->offset + p * 4096);
35635         }
35636 diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
35637 index 11e1eb6a6809..1d1d32e043f1 100644
35638 --- a/drivers/media/pci/saa7164/saa7164-encoder.c
35639 +++ b/drivers/media/pci/saa7164/saa7164-encoder.c
35640 @@ -1008,7 +1008,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
35641                 printk(KERN_ERR "%s() failed (errno = %d), NO PCI configuration\n",
35642                         __func__, result);
35643                 result = -ENOMEM;
35644 -               goto failed;
35645 +               goto fail_pci;
35646         }
35648         /* Establish encoder defaults here */
35649 @@ -1062,7 +1062,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
35650                           100000, ENCODER_DEF_BITRATE);
35651         if (hdl->error) {
35652                 result = hdl->error;
35653 -               goto failed;
35654 +               goto fail_hdl;
35655         }
35657         port->std = V4L2_STD_NTSC_M;
35658 @@ -1080,7 +1080,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
35659                 printk(KERN_INFO "%s: can't allocate mpeg device\n",
35660                         dev->name);
35661                 result = -ENOMEM;
35662 -               goto failed;
35663 +               goto fail_hdl;
35664         }
35666         port->v4l_device->ctrl_handler = hdl;
35667 @@ -1091,10 +1091,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
35668         if (result < 0) {
35669                 printk(KERN_INFO "%s: can't register mpeg device\n",
35670                         dev->name);
35671 -               /* TODO: We're going to leak here if we don't dealloc
35672 -                The buffers above. The unreg function can't deal wit it.
35673 -               */
35674 -               goto failed;
35675 +               goto fail_reg;
35676         }
35678         printk(KERN_INFO "%s: registered device video%d [mpeg]\n",
35679 @@ -1116,9 +1113,14 @@ int saa7164_encoder_register(struct saa7164_port *port)
35681         saa7164_api_set_encoder(port);
35682         saa7164_api_get_encoder(port);
35683 +       return 0;
35685 -       result = 0;
35686 -failed:
35687 +fail_reg:
35688 +       video_device_release(port->v4l_device);
35689 +       port->v4l_device = NULL;
35690 +fail_hdl:
35691 +       v4l2_ctrl_handler_free(hdl);
35692 +fail_pci:
35693         return result;
35696 diff --git a/drivers/media/pci/sta2x11/Kconfig b/drivers/media/pci/sta2x11/Kconfig
35697 index 4dd98f94a91e..27bb78513631 100644
35698 --- a/drivers/media/pci/sta2x11/Kconfig
35699 +++ b/drivers/media/pci/sta2x11/Kconfig
35700 @@ -3,6 +3,7 @@ config STA2X11_VIP
35701         tristate "STA2X11 VIP Video For Linux"
35702         depends on PCI && VIDEO_V4L2 && VIRT_TO_BUS && I2C
35703         depends on STA2X11 || COMPILE_TEST
35704 +       select GPIOLIB if MEDIA_SUBDRV_AUTOSELECT
35705         select VIDEO_ADV7180 if MEDIA_SUBDRV_AUTOSELECT
35706         select VIDEOBUF2_DMA_CONTIG
35707         select MEDIA_CONTROLLER
35708 diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
35709 index fd1831e97b22..1ddb5d6354cf 100644
35710 --- a/drivers/media/platform/Kconfig
35711 +++ b/drivers/media/platform/Kconfig
35712 @@ -244,6 +244,7 @@ config VIDEO_MEDIATEK_JPEG
35713         depends on MTK_IOMMU_V1 || MTK_IOMMU || COMPILE_TEST
35714         depends on VIDEO_DEV && VIDEO_V4L2
35715         depends on ARCH_MEDIATEK || COMPILE_TEST
35716 +       depends on MTK_SMI || (COMPILE_TEST && MTK_SMI=n)
35717         select VIDEOBUF2_DMA_CONTIG
35718         select V4L2_MEM2MEM_DEV
35719         help
35720 @@ -271,6 +272,7 @@ config VIDEO_MEDIATEK_MDP
35721         depends on MTK_IOMMU || COMPILE_TEST
35722         depends on VIDEO_DEV && VIDEO_V4L2
35723         depends on ARCH_MEDIATEK || COMPILE_TEST
35724 +       depends on MTK_SMI || (COMPILE_TEST && MTK_SMI=n)
35725         select VIDEOBUF2_DMA_CONTIG
35726         select V4L2_MEM2MEM_DEV
35727         select VIDEO_MEDIATEK_VPU
35728 @@ -291,6 +293,7 @@ config VIDEO_MEDIATEK_VCODEC
35729         # our dependencies, to avoid missing symbols during link.
35730         depends on VIDEO_MEDIATEK_VPU || !VIDEO_MEDIATEK_VPU
35731         depends on MTK_SCP || !MTK_SCP
35732 +       depends on MTK_SMI || (COMPILE_TEST && MTK_SMI=n)
35733         select VIDEOBUF2_DMA_CONTIG
35734         select V4L2_MEM2MEM_DEV
35735         select VIDEO_MEDIATEK_VCODEC_VPU if VIDEO_MEDIATEK_VPU
35736 diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
35737 index f2c4dadd6a0e..7bb6babdcade 100644
35738 --- a/drivers/media/platform/aspeed-video.c
35739 +++ b/drivers/media/platform/aspeed-video.c
35740 @@ -514,8 +514,8 @@ static void aspeed_video_off(struct aspeed_video *video)
35741         aspeed_video_write(video, VE_INTERRUPT_STATUS, 0xffffffff);
35743         /* Turn off the relevant clocks */
35744 -       clk_disable(video->vclk);
35745         clk_disable(video->eclk);
35746 +       clk_disable(video->vclk);
35748         clear_bit(VIDEO_CLOCKS_ON, &video->flags);
35750 @@ -526,8 +526,8 @@ static void aspeed_video_on(struct aspeed_video *video)
35751                 return;
35753         /* Turn on the relevant clocks */
35754 -       clk_enable(video->eclk);
35755         clk_enable(video->vclk);
35756 +       clk_enable(video->eclk);
35758         set_bit(VIDEO_CLOCKS_ON, &video->flags);
35760 @@ -1719,8 +1719,11 @@ static int aspeed_video_probe(struct platform_device *pdev)
35761                 return rc;
35763         rc = aspeed_video_setup_video(video);
35764 -       if (rc)
35765 +       if (rc) {
35766 +               clk_unprepare(video->vclk);
35767 +               clk_unprepare(video->eclk);
35768                 return rc;
35769 +       }
35771         return 0;
35773 diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
35774 index 995e95272e51..e600764dce96 100644
35775 --- a/drivers/media/platform/coda/coda-common.c
35776 +++ b/drivers/media/platform/coda/coda-common.c
35777 @@ -2062,7 +2062,9 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
35778         if (q_data_dst->fourcc == V4L2_PIX_FMT_JPEG)
35779                 ctx->params.gop_size = 1;
35780         ctx->gopcounter = ctx->params.gop_size - 1;
35781 -       v4l2_ctrl_s_ctrl(ctx->mb_err_cnt_ctrl, 0);
35782 +       /* Only decoders have this control */
35783 +       if (ctx->mb_err_cnt_ctrl)
35784 +               v4l2_ctrl_s_ctrl(ctx->mb_err_cnt_ctrl, 0);
35786         ret = ctx->ops->start_streaming(ctx);
35787         if (ctx->inst_type == CODA_INST_DECODER) {
35788 diff --git a/drivers/media/platform/meson/ge2d/ge2d.c b/drivers/media/platform/meson/ge2d/ge2d.c
35789 index 153612ca96fc..a1393fefa8ae 100644
35790 --- a/drivers/media/platform/meson/ge2d/ge2d.c
35791 +++ b/drivers/media/platform/meson/ge2d/ge2d.c
35792 @@ -757,7 +757,7 @@ static int ge2d_s_ctrl(struct v4l2_ctrl *ctrl)
35794                 if (ctrl->val == 90) {
35795                         ctx->hflip = 0;
35796 -                       ctx->vflip = 0;
35797 +                       ctx->vflip = 1;
35798                         ctx->xy_swap = 1;
35799                 } else if (ctrl->val == 180) {
35800                         ctx->hflip = 1;
35801 @@ -765,7 +765,7 @@ static int ge2d_s_ctrl(struct v4l2_ctrl *ctrl)
35802                         ctx->xy_swap = 0;
35803                 } else if (ctrl->val == 270) {
35804                         ctx->hflip = 1;
35805 -                       ctx->vflip = 1;
35806 +                       ctx->vflip = 0;
35807                         ctx->xy_swap = 1;
35808                 } else {
35809                         ctx->hflip = 0;
35810 diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
35811 index f9896c121fd8..ae374bb2a48f 100644
35812 --- a/drivers/media/platform/qcom/venus/core.c
35813 +++ b/drivers/media/platform/qcom/venus/core.c
35814 @@ -218,18 +218,17 @@ static int venus_probe(struct platform_device *pdev)
35815                 return -ENOMEM;
35817         core->dev = dev;
35818 -       platform_set_drvdata(pdev, core);
35820         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
35821         core->base = devm_ioremap_resource(dev, r);
35822         if (IS_ERR(core->base))
35823                 return PTR_ERR(core->base);
35825 -       core->video_path = of_icc_get(dev, "video-mem");
35826 +       core->video_path = devm_of_icc_get(dev, "video-mem");
35827         if (IS_ERR(core->video_path))
35828                 return PTR_ERR(core->video_path);
35830 -       core->cpucfg_path = of_icc_get(dev, "cpu-cfg");
35831 +       core->cpucfg_path = devm_of_icc_get(dev, "cpu-cfg");
35832         if (IS_ERR(core->cpucfg_path))
35833                 return PTR_ERR(core->cpucfg_path);
35835 @@ -248,7 +247,7 @@ static int venus_probe(struct platform_device *pdev)
35836                 return -ENODEV;
35838         if (core->pm_ops->core_get) {
35839 -               ret = core->pm_ops->core_get(dev);
35840 +               ret = core->pm_ops->core_get(core);
35841                 if (ret)
35842                         return ret;
35843         }
35844 @@ -273,6 +272,12 @@ static int venus_probe(struct platform_device *pdev)
35845         if (ret)
35846                 goto err_core_put;
35848 +       ret = v4l2_device_register(dev, &core->v4l2_dev);
35849 +       if (ret)
35850 +               goto err_core_deinit;
35852 +       platform_set_drvdata(pdev, core);
35854         pm_runtime_enable(dev);
35856         ret = pm_runtime_get_sync(dev);
35857 @@ -307,10 +312,6 @@ static int venus_probe(struct platform_device *pdev)
35858         if (ret)
35859                 goto err_venus_shutdown;
35861 -       ret = v4l2_device_register(dev, &core->v4l2_dev);
35862 -       if (ret)
35863 -               goto err_core_deinit;
35865         ret = pm_runtime_put_sync(dev);
35866         if (ret) {
35867                 pm_runtime_get_noresume(dev);
35868 @@ -323,8 +324,6 @@ static int venus_probe(struct platform_device *pdev)
35870  err_dev_unregister:
35871         v4l2_device_unregister(&core->v4l2_dev);
35872 -err_core_deinit:
35873 -       hfi_core_deinit(core, false);
35874  err_venus_shutdown:
35875         venus_shutdown(core);
35876  err_runtime_disable:
35877 @@ -332,9 +331,11 @@ static int venus_probe(struct platform_device *pdev)
35878         pm_runtime_set_suspended(dev);
35879         pm_runtime_disable(dev);
35880         hfi_destroy(core);
35881 +err_core_deinit:
35882 +       hfi_core_deinit(core, false);
35883  err_core_put:
35884         if (core->pm_ops->core_put)
35885 -               core->pm_ops->core_put(dev);
35886 +               core->pm_ops->core_put(core);
35887         return ret;
35890 @@ -360,14 +361,14 @@ static int venus_remove(struct platform_device *pdev)
35891         pm_runtime_disable(dev);
35893         if (pm_ops->core_put)
35894 -               pm_ops->core_put(dev);
35895 +               pm_ops->core_put(core);
35897 -       hfi_destroy(core);
35898 +       v4l2_device_unregister(&core->v4l2_dev);
35900 -       icc_put(core->video_path);
35901 -       icc_put(core->cpucfg_path);
35902 +       hfi_destroy(core);
35904         v4l2_device_unregister(&core->v4l2_dev);
35906         mutex_destroy(&core->pm_lock);
35907         mutex_destroy(&core->lock);
35908         venus_dbgfs_deinit(core);
35909 @@ -396,7 +397,7 @@ static __maybe_unused int venus_runtime_suspend(struct device *dev)
35910                 return ret;
35912         if (pm_ops->core_power) {
35913 -               ret = pm_ops->core_power(dev, POWER_OFF);
35914 +               ret = pm_ops->core_power(core, POWER_OFF);
35915                 if (ret)
35916                         return ret;
35917         }
35918 @@ -414,7 +415,7 @@ static __maybe_unused int venus_runtime_suspend(struct device *dev)
35919  err_video_path:
35920         icc_set_bw(core->cpucfg_path, kbps_to_icc(1000), 0);
35921  err_cpucfg_path:
35922 -       pm_ops->core_power(dev, POWER_ON);
35923 +       pm_ops->core_power(core, POWER_ON);
35925         return ret;
35927 @@ -434,7 +435,7 @@ static __maybe_unused int venus_runtime_resume(struct device *dev)
35928                 return ret;
35930         if (pm_ops->core_power) {
35931 -               ret = pm_ops->core_power(dev, POWER_ON);
35932 +               ret = pm_ops->core_power(core, POWER_ON);
35933                 if (ret)
35934                         return ret;
35935         }
35936 diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c
35937 index 4f7565834469..558510a8dfc8 100644
35938 --- a/drivers/media/platform/qcom/venus/hfi_cmds.c
35939 +++ b/drivers/media/platform/qcom/venus/hfi_cmds.c
35940 @@ -1039,6 +1039,18 @@ static int pkt_session_set_property_1x(struct hfi_session_set_property_pkt *pkt,
35941                 pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hierp);
35942                 break;
35943         }
35944 +       case HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO: {
35945 +               struct hfi_uncompressed_plane_actual_info *in = pdata;
35946 +               struct hfi_uncompressed_plane_actual_info *info = prop_data;
35948 +               info->buffer_type = in->buffer_type;
35949 +               info->num_planes = in->num_planes;
35950 +               info->plane_format[0] = in->plane_format[0];
35951 +               if (in->num_planes > 1)
35952 +                       info->plane_format[1] = in->plane_format[1];
35953 +               pkt->shdr.hdr.size += sizeof(u32) + sizeof(*info);
35954 +               break;
35955 +       }
35957         /* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
35958         case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
35959 @@ -1205,18 +1217,6 @@ pkt_session_set_property_4xx(struct hfi_session_set_property_pkt *pkt,
35960                 pkt->shdr.hdr.size += sizeof(u32) + sizeof(*cu);
35961                 break;
35962         }
35963 -       case HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO: {
35964 -               struct hfi_uncompressed_plane_actual_info *in = pdata;
35965 -               struct hfi_uncompressed_plane_actual_info *info = prop_data;
35967 -               info->buffer_type = in->buffer_type;
35968 -               info->num_planes = in->num_planes;
35969 -               info->plane_format[0] = in->plane_format[0];
35970 -               if (in->num_planes > 1)
35971 -                       info->plane_format[1] = in->plane_format[1];
35972 -               pkt->shdr.hdr.size += sizeof(u32) + sizeof(*info);
35973 -               break;
35974 -       }
35975         case HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE:
35976         case HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER:
35977         case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE:
35978 diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c
35979 index 7263c0c32695..5b8389b98299 100644
35980 --- a/drivers/media/platform/qcom/venus/hfi_parser.c
35981 +++ b/drivers/media/platform/qcom/venus/hfi_parser.c
35982 @@ -235,13 +235,13 @@ static int hfi_platform_parser(struct venus_core *core, struct venus_inst *inst)
35983         u32 enc_codecs, dec_codecs, count = 0;
35984         unsigned int entries;
35986 -       if (inst)
35987 -               return 0;
35989         plat = hfi_platform_get(core->res->hfi_version);
35990         if (!plat)
35991                 return -EINVAL;
35993 +       if (inst)
35994 +               return 0;
35996         if (plat->codecs)
35997                 plat->codecs(&enc_codecs, &dec_codecs, &count);
35999 @@ -277,8 +277,10 @@ u32 hfi_parser(struct venus_core *core, struct venus_inst *inst, void *buf,
36001         parser_init(inst, &codecs, &domain);
36003 -       core->codecs_count = 0;
36004 -       memset(core->caps, 0, sizeof(core->caps));
36005 +       if (core->res->hfi_version > HFI_VERSION_1XX) {
36006 +               core->codecs_count = 0;
36007 +               memset(core->caps, 0, sizeof(core->caps));
36008 +       }
36010         while (words_count) {
36011                 data = word + 1;
36012 diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
36013 index 43c4e3d9e281..95b4d40ff6a5 100644
36014 --- a/drivers/media/platform/qcom/venus/pm_helpers.c
36015 +++ b/drivers/media/platform/qcom/venus/pm_helpers.c
36016 @@ -277,16 +277,28 @@ static int load_scale_v1(struct venus_inst *inst)
36017         return 0;
36020 -static int core_get_v1(struct device *dev)
36021 +static int core_get_v1(struct venus_core *core)
36023 -       struct venus_core *core = dev_get_drvdata(dev);
36024 +       int ret;
36026 +       ret = core_clks_get(core);
36027 +       if (ret)
36028 +               return ret;
36030 -       return core_clks_get(core);
36031 +       core->opp_table = dev_pm_opp_set_clkname(core->dev, "core");
36032 +       if (IS_ERR(core->opp_table))
36033 +               return PTR_ERR(core->opp_table);
36035 +       return 0;
36038 -static int core_power_v1(struct device *dev, int on)
36039 +static void core_put_v1(struct venus_core *core)
36041 +       dev_pm_opp_put_clkname(core->opp_table);
36044 +static int core_power_v1(struct venus_core *core, int on)
36046 -       struct venus_core *core = dev_get_drvdata(dev);
36047         int ret = 0;
36049         if (on == POWER_ON)
36050 @@ -299,6 +311,7 @@ static int core_power_v1(struct device *dev, int on)
36052  static const struct venus_pm_ops pm_ops_v1 = {
36053         .core_get = core_get_v1,
36054 +       .core_put = core_put_v1,
36055         .core_power = core_power_v1,
36056         .load_scale = load_scale_v1,
36057  };
36058 @@ -371,6 +384,7 @@ static int venc_power_v3(struct device *dev, int on)
36060  static const struct venus_pm_ops pm_ops_v3 = {
36061         .core_get = core_get_v1,
36062 +       .core_put = core_put_v1,
36063         .core_power = core_power_v1,
36064         .vdec_get = vdec_get_v3,
36065         .vdec_power = vdec_power_v3,
36066 @@ -753,12 +767,12 @@ static int venc_power_v4(struct device *dev, int on)
36067         return ret;
36070 -static int vcodec_domains_get(struct device *dev)
36071 +static int vcodec_domains_get(struct venus_core *core)
36073         int ret;
36074         struct opp_table *opp_table;
36075         struct device **opp_virt_dev;
36076 -       struct venus_core *core = dev_get_drvdata(dev);
36077 +       struct device *dev = core->dev;
36078         const struct venus_resources *res = core->res;
36079         struct device *pd;
36080         unsigned int i;
36081 @@ -809,9 +823,8 @@ static int vcodec_domains_get(struct device *dev)
36082         return ret;
36085 -static void vcodec_domains_put(struct device *dev)
36086 +static void vcodec_domains_put(struct venus_core *core)
36088 -       struct venus_core *core = dev_get_drvdata(dev);
36089         const struct venus_resources *res = core->res;
36090         unsigned int i;
36092 @@ -834,9 +847,9 @@ static void vcodec_domains_put(struct device *dev)
36093         dev_pm_opp_detach_genpd(core->opp_table);
36096 -static int core_get_v4(struct device *dev)
36097 +static int core_get_v4(struct venus_core *core)
36099 -       struct venus_core *core = dev_get_drvdata(dev);
36100 +       struct device *dev = core->dev;
36101         const struct venus_resources *res = core->res;
36102         int ret;
36104 @@ -875,7 +888,7 @@ static int core_get_v4(struct device *dev)
36105                 }
36106         }
36108 -       ret = vcodec_domains_get(dev);
36109 +       ret = vcodec_domains_get(core);
36110         if (ret) {
36111                 if (core->has_opp_table)
36112                         dev_pm_opp_of_remove_table(dev);
36113 @@ -886,14 +899,14 @@ static int core_get_v4(struct device *dev)
36114         return 0;
36117 -static void core_put_v4(struct device *dev)
36118 +static void core_put_v4(struct venus_core *core)
36120 -       struct venus_core *core = dev_get_drvdata(dev);
36121 +       struct device *dev = core->dev;
36123         if (legacy_binding)
36124                 return;
36126 -       vcodec_domains_put(dev);
36127 +       vcodec_domains_put(core);
36129         if (core->has_opp_table)
36130                 dev_pm_opp_of_remove_table(dev);
36131 @@ -901,9 +914,9 @@ static void core_put_v4(struct device *dev)
36135 -static int core_power_v4(struct device *dev, int on)
36136 +static int core_power_v4(struct venus_core *core, int on)
36138 -       struct venus_core *core = dev_get_drvdata(dev);
36139 +       struct device *dev = core->dev;
36140         struct device *pmctrl = core->pmdomains[0];
36141         int ret = 0;
36143 diff --git a/drivers/media/platform/qcom/venus/pm_helpers.h b/drivers/media/platform/qcom/venus/pm_helpers.h
36144 index aa2f6afa2354..a492c50c5543 100644
36145 --- a/drivers/media/platform/qcom/venus/pm_helpers.h
36146 +++ b/drivers/media/platform/qcom/venus/pm_helpers.h
36147 @@ -4,14 +4,15 @@
36148  #define __VENUS_PM_HELPERS_H__
36150  struct device;
36151 +struct venus_core;
36153  #define POWER_ON       1
36154  #define POWER_OFF      0
36156  struct venus_pm_ops {
36157 -       int (*core_get)(struct device *dev);
36158 -       void (*core_put)(struct device *dev);
36159 -       int (*core_power)(struct device *dev, int on);
36160 +       int (*core_get)(struct venus_core *core);
36161 +       void (*core_put)(struct venus_core *core);
36162 +       int (*core_power)(struct venus_core *core, int on);
36164         int (*vdec_get)(struct device *dev);
36165         void (*vdec_put)(struct device *dev);
36166 diff --git a/drivers/media/platform/qcom/venus/venc_ctrls.c b/drivers/media/platform/qcom/venus/venc_ctrls.c
36167 index a52b80055173..abef0037bf55 100644
36168 --- a/drivers/media/platform/qcom/venus/venc_ctrls.c
36169 +++ b/drivers/media/platform/qcom/venus/venc_ctrls.c
36170 @@ -359,7 +359,7 @@ int venc_ctrl_init(struct venus_inst *inst)
36171                 V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
36172                 ~((1 << V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) |
36173                 (1 << V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME)),
36174 -               V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE);
36175 +               V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME);
36177         v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
36178                 V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
36179 diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
36180 index 813670ed9577..79deed8adcea 100644
36181 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
36182 +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
36183 @@ -520,14 +520,15 @@ static void rkisp1_rsz_set_src_fmt(struct rkisp1_resizer *rsz,
36184                                    struct v4l2_mbus_framefmt *format,
36185                                    unsigned int which)
36187 -       const struct rkisp1_isp_mbus_info *mbus_info;
36188 -       struct v4l2_mbus_framefmt *src_fmt;
36189 +       const struct rkisp1_isp_mbus_info *sink_mbus_info;
36190 +       struct v4l2_mbus_framefmt *src_fmt, *sink_fmt;
36192 +       sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SINK, which);
36193         src_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SRC, which);
36194 -       mbus_info = rkisp1_isp_mbus_info_get(src_fmt->code);
36195 +       sink_mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
36197         /* for YUV formats, userspace can change the mbus code on the src pad if it is supported */
36198 -       if (mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV &&
36199 +       if (sink_mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV &&
36200             rkisp1_rsz_get_yuv_mbus_info(format->code))
36201                 src_fmt->code = format->code;
36203 diff --git a/drivers/media/platform/sti/bdisp/bdisp-debug.c b/drivers/media/platform/sti/bdisp/bdisp-debug.c
36204 index 2b270093009c..a27f638df11c 100644
36205 --- a/drivers/media/platform/sti/bdisp/bdisp-debug.c
36206 +++ b/drivers/media/platform/sti/bdisp/bdisp-debug.c
36207 @@ -480,7 +480,7 @@ static int regs_show(struct seq_file *s, void *data)
36208         int ret;
36209         unsigned int i;
36211 -       ret = pm_runtime_get_sync(bdisp->dev);
36212 +       ret = pm_runtime_resume_and_get(bdisp->dev);
36213         if (ret < 0) {
36214                 seq_puts(s, "Cannot wake up IP\n");
36215                 return 0;
36216 diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
36217 index b55de9ab64d8..3181d0781b61 100644
36218 --- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
36219 +++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
36220 @@ -151,8 +151,10 @@ static int sun6i_video_start_streaming(struct vb2_queue *vq, unsigned int count)
36221         }
36223         subdev = sun6i_video_remote_subdev(video, NULL);
36224 -       if (!subdev)
36225 +       if (!subdev) {
36226 +               ret = -EINVAL;
36227                 goto stop_media_pipeline;
36228 +       }
36230         config.pixelformat = video->fmt.fmt.pix.pixelformat;
36231         config.code = video->mbus_code;
36232 diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
36233 index ed863bf5ea80..671e4a928993 100644
36234 --- a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
36235 +++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
36236 @@ -589,7 +589,7 @@ static int deinterlace_start_streaming(struct vb2_queue *vq, unsigned int count)
36237         int ret;
36239         if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
36240 -               ret = pm_runtime_get_sync(dev);
36241 +               ret = pm_runtime_resume_and_get(dev);
36242                 if (ret < 0) {
36243                         dev_err(dev, "Failed to enable module\n");
36245 diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
36246 index cb0437b4c331..163fffc0e1d4 100644
36247 --- a/drivers/media/radio/radio-mr800.c
36248 +++ b/drivers/media/radio/radio-mr800.c
36249 @@ -366,7 +366,7 @@ static int vidioc_s_hw_freq_seek(struct file *file, void *priv,
36250                         retval = -ENODATA;
36251                         break;
36252                 }
36253 -               if (schedule_timeout_interruptible(msecs_to_jiffies(10))) {
36254 +               if (schedule_msec_hrtimeout_interruptible((10))) {
36255                         retval = -ERESTARTSYS;
36256                         break;
36257                 }
36258 diff --git a/drivers/media/radio/radio-tea5777.c b/drivers/media/radio/radio-tea5777.c
36259 index fb9de7bbcd19..e53cf45e7f3f 100644
36260 --- a/drivers/media/radio/radio-tea5777.c
36261 +++ b/drivers/media/radio/radio-tea5777.c
36262 @@ -235,7 +235,7 @@ static int radio_tea5777_update_read_reg(struct radio_tea5777 *tea, int wait)
36263         }
36265         if (wait) {
36266 -               if (schedule_timeout_interruptible(msecs_to_jiffies(wait)))
36267 +               if (schedule_msec_hrtimeout_interruptible((wait)))
36268                         return -ERESTARTSYS;
36269         }
36271 diff --git a/drivers/media/radio/tea575x.c b/drivers/media/radio/tea575x.c
36272 index c37315226c42..e73e6393403c 100644
36273 --- a/drivers/media/radio/tea575x.c
36274 +++ b/drivers/media/radio/tea575x.c
36275 @@ -401,7 +401,7 @@ int snd_tea575x_s_hw_freq_seek(struct file *file, struct snd_tea575x *tea,
36276         for (;;) {
36277                 if (time_after(jiffies, timeout))
36278                         break;
36279 -               if (schedule_timeout_interruptible(msecs_to_jiffies(10))) {
36280 +               if (schedule_msec_hrtimeout_interruptible((10))) {
36281                         /* some signal arrived, stop search */
36282                         tea->val &= ~TEA575X_BIT_SEARCH;
36283                         snd_tea575x_set_freq(tea);
36284 diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
36285 index 0c6229592e13..e5c4a6941d26 100644
36286 --- a/drivers/media/rc/ite-cir.c
36287 +++ b/drivers/media/rc/ite-cir.c
36288 @@ -276,8 +276,14 @@ static irqreturn_t ite_cir_isr(int irq, void *data)
36289         /* read the interrupt flags */
36290         iflags = dev->params.get_irq_causes(dev);
36292 +       /* Check for RX overflow */
36293 +       if (iflags & ITE_IRQ_RX_FIFO_OVERRUN) {
36294 +               dev_warn(&dev->rdev->dev, "receive overflow\n");
36295 +               ir_raw_event_reset(dev->rdev);
36296 +       }
36298         /* check for the receive interrupt */
36299 -       if (iflags & (ITE_IRQ_RX_FIFO | ITE_IRQ_RX_FIFO_OVERRUN)) {
36300 +       if (iflags & ITE_IRQ_RX_FIFO) {
36301                 /* read the FIFO bytes */
36302                 rx_bytes =
36303                         dev->params.get_rx_bytes(dev, rx_buf,
36304 diff --git a/drivers/media/test-drivers/vivid/vivid-core.c b/drivers/media/test-drivers/vivid/vivid-core.c
36305 index 0dc65ef3aa14..ca0ebf6ad9cc 100644
36306 --- a/drivers/media/test-drivers/vivid/vivid-core.c
36307 +++ b/drivers/media/test-drivers/vivid/vivid-core.c
36308 @@ -205,13 +205,13 @@ static const u8 vivid_hdmi_edid[256] = {
36309         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
36310         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x7b,
36312 -       0x02, 0x03, 0x3f, 0xf0, 0x51, 0x61, 0x60, 0x5f,
36313 +       0x02, 0x03, 0x3f, 0xf1, 0x51, 0x61, 0x60, 0x5f,
36314         0x5e, 0x5d, 0x10, 0x1f, 0x04, 0x13, 0x22, 0x21,
36315         0x20, 0x05, 0x14, 0x02, 0x11, 0x01, 0x23, 0x09,
36316         0x07, 0x07, 0x83, 0x01, 0x00, 0x00, 0x6d, 0x03,
36317         0x0c, 0x00, 0x10, 0x00, 0x00, 0x3c, 0x21, 0x00,
36318         0x60, 0x01, 0x02, 0x03, 0x67, 0xd8, 0x5d, 0xc4,
36319 -       0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xea, 0xe3,
36320 +       0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xca, 0xe3,
36321         0x05, 0x00, 0x00, 0xe3, 0x06, 0x01, 0x00, 0x4d,
36322         0xd0, 0x00, 0xa0, 0xf0, 0x70, 0x3e, 0x80, 0x30,
36323         0x20, 0x35, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00,
36324 @@ -220,7 +220,7 @@ static const u8 vivid_hdmi_edid[256] = {
36325         0x00, 0x00, 0x1a, 0x1a, 0x1d, 0x00, 0x80, 0x51,
36326         0xd0, 0x1c, 0x20, 0x40, 0x80, 0x35, 0x00, 0xc0,
36327         0x1c, 0x32, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00,
36328 -       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63,
36329 +       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x82,
36330  };
36332  static int vidioc_querycap(struct file *file, void  *priv,
36333 diff --git a/drivers/media/test-drivers/vivid/vivid-vid-out.c b/drivers/media/test-drivers/vivid/vivid-vid-out.c
36334 index ac1e981e8342..9f731f085179 100644
36335 --- a/drivers/media/test-drivers/vivid/vivid-vid-out.c
36336 +++ b/drivers/media/test-drivers/vivid/vivid-vid-out.c
36337 @@ -1021,7 +1021,7 @@ int vivid_vid_out_s_fbuf(struct file *file, void *fh,
36338                 return -EINVAL;
36339         }
36340         dev->fbuf_out_flags &= ~(chroma_flags | alpha_flags);
36341 -       dev->fbuf_out_flags = a->flags & (chroma_flags | alpha_flags);
36342 +       dev->fbuf_out_flags |= a->flags & (chroma_flags | alpha_flags);
36343         return 0;
36346 diff --git a/drivers/media/tuners/m88rs6000t.c b/drivers/media/tuners/m88rs6000t.c
36347 index b3505f402476..8647c50b66e5 100644
36348 --- a/drivers/media/tuners/m88rs6000t.c
36349 +++ b/drivers/media/tuners/m88rs6000t.c
36350 @@ -525,7 +525,7 @@ static int m88rs6000t_get_rf_strength(struct dvb_frontend *fe, u16 *strength)
36351         PGA2_cri = PGA2_GC >> 2;
36352         PGA2_crf = PGA2_GC & 0x03;
36354 -       for (i = 0; i <= RF_GC; i++)
36355 +       for (i = 0; i <= RF_GC && i < ARRAY_SIZE(RFGS); i++)
36356                 RFG += RFGS[i];
36358         if (RF_GC == 0)
36359 @@ -537,12 +537,12 @@ static int m88rs6000t_get_rf_strength(struct dvb_frontend *fe, u16 *strength)
36360         if (RF_GC == 3)
36361                 RFG += 100;
36363 -       for (i = 0; i <= IF_GC; i++)
36364 +       for (i = 0; i <= IF_GC && i < ARRAY_SIZE(IFGS); i++)
36365                 IFG += IFGS[i];
36367         TIAG = TIA_GC * TIA_GS;
36369 -       for (i = 0; i <= BB_GC; i++)
36370 +       for (i = 0; i <= BB_GC && i < ARRAY_SIZE(BBGS); i++)
36371                 BBG += BBGS[i];
36373         PGA2G = PGA2_cri * PGA2_cri_GS + PGA2_crf * PGA2_crf_GS;
36374 diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
36375 index c1a7634e27b4..28e1fd64dd3c 100644
36376 --- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
36377 +++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
36378 @@ -79,11 +79,17 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
36379                         }
36380                 }
36382 -               if ((ret = dvb_usb_adapter_stream_init(adap)) ||
36383 -                       (ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs)) ||
36384 -                       (ret = dvb_usb_adapter_frontend_init(adap))) {
36385 +               ret = dvb_usb_adapter_stream_init(adap);
36386 +               if (ret)
36387                         return ret;
36388 -               }
36390 +               ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs);
36391 +               if (ret)
36392 +                       goto dvb_init_err;
36394 +               ret = dvb_usb_adapter_frontend_init(adap);
36395 +               if (ret)
36396 +                       goto frontend_init_err;
36398                 /* use exclusive FE lock if there is multiple shared FEs */
36399                 if (adap->fe_adap[1].fe)
36400 @@ -103,6 +109,12 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
36401         }
36403         return 0;
36405 +frontend_init_err:
36406 +       dvb_usb_adapter_dvb_exit(adap);
36407 +dvb_init_err:
36408 +       dvb_usb_adapter_stream_exit(adap);
36409 +       return ret;
36412  static int dvb_usb_adapter_exit(struct dvb_usb_device *d)
36413 @@ -158,22 +170,20 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums)
36415                 if (d->props.priv_init != NULL) {
36416                         ret = d->props.priv_init(d);
36417 -                       if (ret != 0) {
36418 -                               kfree(d->priv);
36419 -                               d->priv = NULL;
36420 -                               return ret;
36421 -                       }
36422 +                       if (ret != 0)
36423 +                               goto err_priv_init;
36424                 }
36425         }
36427         /* check the capabilities and set appropriate variables */
36428         dvb_usb_device_power_ctrl(d, 1);
36430 -       if ((ret = dvb_usb_i2c_init(d)) ||
36431 -               (ret = dvb_usb_adapter_init(d, adapter_nums))) {
36432 -               dvb_usb_exit(d);
36433 -               return ret;
36434 -       }
36435 +       ret = dvb_usb_i2c_init(d);
36436 +       if (ret)
36437 +               goto err_i2c_init;
36438 +       ret = dvb_usb_adapter_init(d, adapter_nums);
36439 +       if (ret)
36440 +               goto err_adapter_init;
36442         if ((ret = dvb_usb_remote_init(d)))
36443                 err("could not initialize remote control.");
36444 @@ -181,6 +191,17 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums)
36445         dvb_usb_device_power_ctrl(d, 0);
36447         return 0;
36449 +err_adapter_init:
36450 +       dvb_usb_adapter_exit(d);
36451 +err_i2c_init:
36452 +       dvb_usb_i2c_exit(d);
36453 +       if (d->priv && d->props.priv_destroy)
36454 +               d->props.priv_destroy(d);
36455 +err_priv_init:
36456 +       kfree(d->priv);
36457 +       d->priv = NULL;
36458 +       return ret;
36461  /* determine the name and the state of the just found USB device */
36462 @@ -255,41 +276,50 @@ int dvb_usb_device_init(struct usb_interface *intf,
36463         if (du != NULL)
36464                 *du = NULL;
36466 -       if ((desc = dvb_usb_find_device(udev, props, &cold)) == NULL) {
36467 +       d = kzalloc(sizeof(*d), GFP_KERNEL);
36468 +       if (!d) {
36469 +               err("no memory for 'struct dvb_usb_device'");
36470 +               return -ENOMEM;
36471 +       }
36473 +       memcpy(&d->props, props, sizeof(struct dvb_usb_device_properties));
36475 +       desc = dvb_usb_find_device(udev, &d->props, &cold);
36476 +       if (!desc) {
36477                 deb_err("something went very wrong, device was not found in current device list - let's see what comes next.\n");
36478 -               return -ENODEV;
36479 +               ret = -ENODEV;
36480 +               goto error;
36481         }
36483         if (cold) {
36484                 info("found a '%s' in cold state, will try to load a firmware", desc->name);
36485                 ret = dvb_usb_download_firmware(udev, props);
36486                 if (!props->no_reconnect || ret != 0)
36487 -                       return ret;
36488 +                       goto error;
36489         }
36491         info("found a '%s' in warm state.", desc->name);
36492 -       d = kzalloc(sizeof(struct dvb_usb_device), GFP_KERNEL);
36493 -       if (d == NULL) {
36494 -               err("no memory for 'struct dvb_usb_device'");
36495 -               return -ENOMEM;
36496 -       }
36498         d->udev = udev;
36499 -       memcpy(&d->props, props, sizeof(struct dvb_usb_device_properties));
36500         d->desc = desc;
36501         d->owner = owner;
36503         usb_set_intfdata(intf, d);
36505 -       if (du != NULL)
36506 +       ret = dvb_usb_init(d, adapter_nums);
36507 +       if (ret) {
36508 +               info("%s error while loading driver (%d)", desc->name, ret);
36509 +               goto error;
36510 +       }
36512 +       if (du)
36513                 *du = d;
36515 -       ret = dvb_usb_init(d, adapter_nums);
36516 +       info("%s successfully initialized and connected.", desc->name);
36517 +       return 0;
36519 -       if (ret == 0)
36520 -               info("%s successfully initialized and connected.", desc->name);
36521 -       else
36522 -               info("%s error while loading driver (%d)", desc->name, ret);
36523 + error:
36524 +       usb_set_intfdata(intf, NULL);
36525 +       kfree(d);
36526         return ret;
36528  EXPORT_SYMBOL(dvb_usb_device_init);
36529 diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h
36530 index 741be0e69447..2b8ad2bde8a4 100644
36531 --- a/drivers/media/usb/dvb-usb/dvb-usb.h
36532 +++ b/drivers/media/usb/dvb-usb/dvb-usb.h
36533 @@ -487,7 +487,7 @@ extern int __must_check
36534  dvb_usb_generic_write(struct dvb_usb_device *, u8 *, u16);
36536  /* commonly used remote control parsing */
36537 -extern int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *, u8[], u32 *, int *);
36538 +extern int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *, u8[5], u32 *, int *);
36540  /* commonly used firmware download types and function */
36541  struct hexline {
36542 diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
36543 index 526424279637..471bd74667e3 100644
36544 --- a/drivers/media/usb/em28xx/em28xx-dvb.c
36545 +++ b/drivers/media/usb/em28xx/em28xx-dvb.c
36546 @@ -2010,6 +2010,7 @@ static int em28xx_dvb_init(struct em28xx *dev)
36547         return result;
36549  out_free:
36550 +       em28xx_uninit_usb_xfer(dev, EM28XX_DIGITAL_MODE);
36551         kfree(dvb);
36552         dev->dvb = NULL;
36553         goto ret;
36554 diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
36555 index 158c8e28ed2c..47d8f28bfdfc 100644
36556 --- a/drivers/media/usb/gspca/gspca.c
36557 +++ b/drivers/media/usb/gspca/gspca.c
36558 @@ -1576,6 +1576,8 @@ int gspca_dev_probe2(struct usb_interface *intf,
36559  #endif
36560         v4l2_ctrl_handler_free(gspca_dev->vdev.ctrl_handler);
36561         v4l2_device_unregister(&gspca_dev->v4l2_dev);
36562 +       if (sd_desc->probe_error)
36563 +               sd_desc->probe_error(gspca_dev);
36564         kfree(gspca_dev->usb_buf);
36565         kfree(gspca_dev);
36566         return ret;
36567 diff --git a/drivers/media/usb/gspca/gspca.h b/drivers/media/usb/gspca/gspca.h
36568 index b0ced2e14006..a6554d5e9e1a 100644
36569 --- a/drivers/media/usb/gspca/gspca.h
36570 +++ b/drivers/media/usb/gspca/gspca.h
36571 @@ -105,6 +105,7 @@ struct sd_desc {
36572         cam_cf_op config;       /* called on probe */
36573         cam_op init;            /* called on probe and resume */
36574         cam_op init_controls;   /* called on probe */
36575 +       cam_v_op probe_error;   /* called if probe failed, do cleanup here */
36576         cam_op start;           /* called on stream on after URBs creation */
36577         cam_pkt_op pkt_scan;
36578  /* optional operations */
36579 diff --git a/drivers/media/usb/gspca/sq905.c b/drivers/media/usb/gspca/sq905.c
36580 index 97799cfb832e..949111070971 100644
36581 --- a/drivers/media/usb/gspca/sq905.c
36582 +++ b/drivers/media/usb/gspca/sq905.c
36583 @@ -158,7 +158,7 @@ static int
36584  sq905_read_data(struct gspca_dev *gspca_dev, u8 *data, int size, int need_lock)
36586         int ret;
36587 -       int act_len;
36588 +       int act_len = 0;
36590         gspca_dev->usb_buf[0] = '\0';
36591         if (need_lock)
36592 diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx.c b/drivers/media/usb/gspca/stv06xx/stv06xx.c
36593 index 95673fc0a99c..d9bc2aacc885 100644
36594 --- a/drivers/media/usb/gspca/stv06xx/stv06xx.c
36595 +++ b/drivers/media/usb/gspca/stv06xx/stv06xx.c
36596 @@ -529,12 +529,21 @@ static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
36597  static int stv06xx_config(struct gspca_dev *gspca_dev,
36598                           const struct usb_device_id *id);
36600 +static void stv06xx_probe_error(struct gspca_dev *gspca_dev)
36602 +       struct sd *sd = (struct sd *)gspca_dev;
36604 +       kfree(sd->sensor_priv);
36605 +       sd->sensor_priv = NULL;
36608  /* sub-driver description */
36609  static const struct sd_desc sd_desc = {
36610         .name = MODULE_NAME,
36611         .config = stv06xx_config,
36612         .init = stv06xx_init,
36613         .init_controls = stv06xx_init_controls,
36614 +       .probe_error = stv06xx_probe_error,
36615         .start = stv06xx_start,
36616         .stopN = stv06xx_stopN,
36617         .pkt_scan = stv06xx_pkt_scan,
36618 diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
36619 index 30ef2a3110f7..9a791d8ef200 100644
36620 --- a/drivers/media/usb/uvc/uvc_driver.c
36621 +++ b/drivers/media/usb/uvc/uvc_driver.c
36622 @@ -1712,10 +1712,35 @@ static int uvc_scan_chain_forward(struct uvc_video_chain *chain,
36623                         if (forward->bNrInPins != 1) {
36624                                 uvc_dbg(chain->dev, DESCR,
36625                                         "Extension unit %d has more than 1 input pin\n",
36626 -                                       entity->id);
36627 +                                       forward->id);
36628                                 return -EINVAL;
36629                         }
36631 +                       /*
36632 +                        * Some devices reference an output terminal as the
36633 +                        * source of extension units. This is incorrect, as
36634 +                        * output terminals only have an input pin, and thus
36635 +                        * can't be connected to any entity in the forward
36636 +                        * direction. The resulting topology would cause issues
36637 +                        * when registering the media controller graph. To
36638 +                        * avoid this problem, connect the extension unit to
36639 +                        * the source of the output terminal instead.
36640 +                        */
36641 +                       if (UVC_ENTITY_IS_OTERM(entity)) {
36642 +                               struct uvc_entity *source;
36644 +                               source = uvc_entity_by_id(chain->dev,
36645 +                                                         entity->baSourceID[0]);
36646 +                               if (!source) {
36647 +                                       uvc_dbg(chain->dev, DESCR,
36648 +                                               "Can't connect extension unit %u in chain\n",
36649 +                                               forward->id);
36650 +                                       break;
36651 +                               }
36653 +                               forward->baSourceID[0] = source->id;
36654 +                       }
36656                         list_add_tail(&forward->chain, &chain->entities);
36657                         if (!found)
36658                                 uvc_dbg_cont(PROBE, " (->");
36659 @@ -1735,6 +1760,13 @@ static int uvc_scan_chain_forward(struct uvc_video_chain *chain,
36660                                 return -EINVAL;
36661                         }
36663 +                       if (UVC_ENTITY_IS_OTERM(entity)) {
36664 +                               uvc_dbg(chain->dev, DESCR,
36665 +                                       "Unsupported connection between output terminals %u and %u\n",
36666 +                                       entity->id, forward->id);
36667 +                               break;
36668 +                       }
36670                         list_add_tail(&forward->chain, &chain->entities);
36671                         if (!found)
36672                                 uvc_dbg_cont(PROBE, " (->");
36673 diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
36674 index d29b861367ea..1ef611e08323 100644
36675 --- a/drivers/media/usb/zr364xx/zr364xx.c
36676 +++ b/drivers/media/usb/zr364xx/zr364xx.c
36677 @@ -1430,7 +1430,7 @@ static int zr364xx_probe(struct usb_interface *intf,
36678         if (hdl->error) {
36679                 err = hdl->error;
36680                 dev_err(&udev->dev, "couldn't register control\n");
36681 -               goto unregister;
36682 +               goto free_hdlr_and_unreg_dev;
36683         }
36684         /* save the init method used by this camera */
36685         cam->method = id->driver_info;
36686 @@ -1503,7 +1503,7 @@ static int zr364xx_probe(struct usb_interface *intf,
36687         if (!cam->read_endpoint) {
36688                 err = -ENOMEM;
36689                 dev_err(&intf->dev, "Could not find bulk-in endpoint\n");
36690 -               goto unregister;
36691 +               goto free_hdlr_and_unreg_dev;
36692         }
36694         /* v4l */
36695 @@ -1515,7 +1515,7 @@ static int zr364xx_probe(struct usb_interface *intf,
36696         /* load zr364xx board specific */
36697         err = zr364xx_board_init(cam);
36698         if (err)
36699 -               goto unregister;
36700 +               goto free_hdlr_and_unreg_dev;
36701         err = v4l2_ctrl_handler_setup(hdl);
36702         if (err)
36703                 goto board_uninit;
36704 @@ -1533,7 +1533,7 @@ static int zr364xx_probe(struct usb_interface *intf,
36705         err = video_register_device(&cam->vdev, VFL_TYPE_VIDEO, -1);
36706         if (err) {
36707                 dev_err(&udev->dev, "video_register_device failed\n");
36708 -               goto free_handler;
36709 +               goto board_uninit;
36710         }
36711         cam->v4l2_dev.release = zr364xx_release;
36713 @@ -1541,11 +1541,10 @@ static int zr364xx_probe(struct usb_interface *intf,
36714                  video_device_node_name(&cam->vdev));
36715         return 0;
36717 -free_handler:
36718 -       v4l2_ctrl_handler_free(hdl);
36719  board_uninit:
36720         zr364xx_board_uninit(cam);
36721 -unregister:
36722 +free_hdlr_and_unreg_dev:
36723 +       v4l2_ctrl_handler_free(hdl);
36724         v4l2_device_unregister(&cam->v4l2_dev);
36725  free_cam:
36726         kfree(cam);
36727 diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
36728 index 016cf6204cbb..6219c8185782 100644
36729 --- a/drivers/media/v4l2-core/v4l2-ctrls.c
36730 +++ b/drivers/media/v4l2-core/v4l2-ctrls.c
36731 @@ -1675,6 +1675,8 @@ static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx,
36732                 p_fwht_params->version = V4L2_FWHT_VERSION;
36733                 p_fwht_params->width = 1280;
36734                 p_fwht_params->height = 720;
36735 +               p_fwht_params->flags = V4L2_FWHT_FL_PIXENC_YUV |
36736 +                       (2 << V4L2_FWHT_FL_COMPONENTS_NUM_OFFSET);
36737                 break;
36738         }
36740 @@ -2395,7 +2397,16 @@ static void new_to_req(struct v4l2_ctrl_ref *ref)
36741         if (!ref)
36742                 return;
36743         ptr_to_ptr(ref->ctrl, ref->ctrl->p_new, ref->p_req);
36744 -       ref->req = ref;
36745 +       ref->valid_p_req = true;
36748 +/* Copy the current value to the request value */
36749 +static void cur_to_req(struct v4l2_ctrl_ref *ref)
36751 +       if (!ref)
36752 +               return;
36753 +       ptr_to_ptr(ref->ctrl, ref->ctrl->p_cur, ref->p_req);
36754 +       ref->valid_p_req = true;
36757  /* Copy the request value to the new value */
36758 @@ -2403,8 +2414,8 @@ static void req_to_new(struct v4l2_ctrl_ref *ref)
36760         if (!ref)
36761                 return;
36762 -       if (ref->req)
36763 -               ptr_to_ptr(ref->ctrl, ref->req->p_req, ref->ctrl->p_new);
36764 +       if (ref->valid_p_req)
36765 +               ptr_to_ptr(ref->ctrl, ref->p_req, ref->ctrl->p_new);
36766         else
36767                 ptr_to_ptr(ref->ctrl, ref->ctrl->p_cur, ref->ctrl->p_new);
36769 @@ -2541,7 +2552,15 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
36770         if (hdl == NULL || hdl->buckets == NULL)
36771                 return;
36773 -       if (!hdl->req_obj.req && !list_empty(&hdl->requests)) {
36774 +       /*
36775 +        * If the main handler is freed and it is used by handler objects in
36776 +        * outstanding requests, then unbind and put those objects before
36777 +        * freeing the main handler.
36778 +        *
36779 +        * The main handler can be identified by having a NULL ops pointer in
36780 +        * the request object.
36781 +        */
36782 +       if (!hdl->req_obj.ops && !list_empty(&hdl->requests)) {
36783                 struct v4l2_ctrl_handler *req, *next_req;
36785                 list_for_each_entry_safe(req, next_req, &hdl->requests, requests) {
36786 @@ -3571,39 +3590,8 @@ static void v4l2_ctrl_request_queue(struct media_request_object *obj)
36787         struct v4l2_ctrl_handler *hdl =
36788                 container_of(obj, struct v4l2_ctrl_handler, req_obj);
36789         struct v4l2_ctrl_handler *main_hdl = obj->priv;
36790 -       struct v4l2_ctrl_handler *prev_hdl = NULL;
36791 -       struct v4l2_ctrl_ref *ref_ctrl, *ref_ctrl_prev = NULL;
36793         mutex_lock(main_hdl->lock);
36794 -       if (list_empty(&main_hdl->requests_queued))
36795 -               goto queue;
36797 -       prev_hdl = list_last_entry(&main_hdl->requests_queued,
36798 -                                  struct v4l2_ctrl_handler, requests_queued);
36799 -       /*
36800 -        * Note: prev_hdl and hdl must contain the same list of control
36801 -        * references, so if any differences are detected then that is a
36802 -        * driver bug and the WARN_ON is triggered.
36803 -        */
36804 -       mutex_lock(prev_hdl->lock);
36805 -       ref_ctrl_prev = list_first_entry(&prev_hdl->ctrl_refs,
36806 -                                        struct v4l2_ctrl_ref, node);
36807 -       list_for_each_entry(ref_ctrl, &hdl->ctrl_refs, node) {
36808 -               if (ref_ctrl->req)
36809 -                       continue;
36810 -               while (ref_ctrl_prev->ctrl->id < ref_ctrl->ctrl->id) {
36811 -                       /* Should never happen, but just in case... */
36812 -                       if (list_is_last(&ref_ctrl_prev->node,
36813 -                                        &prev_hdl->ctrl_refs))
36814 -                               break;
36815 -                       ref_ctrl_prev = list_next_entry(ref_ctrl_prev, node);
36816 -               }
36817 -               if (WARN_ON(ref_ctrl_prev->ctrl->id != ref_ctrl->ctrl->id))
36818 -                       break;
36819 -               ref_ctrl->req = ref_ctrl_prev->req;
36820 -       }
36821 -       mutex_unlock(prev_hdl->lock);
36822 -queue:
36823         list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued);
36824         hdl->request_is_queued = true;
36825         mutex_unlock(main_hdl->lock);
36826 @@ -3615,8 +3603,8 @@ static void v4l2_ctrl_request_unbind(struct media_request_object *obj)
36827                 container_of(obj, struct v4l2_ctrl_handler, req_obj);
36828         struct v4l2_ctrl_handler *main_hdl = obj->priv;
36830 -       list_del_init(&hdl->requests);
36831         mutex_lock(main_hdl->lock);
36832 +       list_del_init(&hdl->requests);
36833         if (hdl->request_is_queued) {
36834                 list_del_init(&hdl->requests_queued);
36835                 hdl->request_is_queued = false;
36836 @@ -3660,7 +3648,7 @@ v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id)
36838         struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id);
36840 -       return (ref && ref->req == ref) ? ref->ctrl : NULL;
36841 +       return (ref && ref->valid_p_req) ? ref->ctrl : NULL;
36843  EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_ctrl_find);
36845 @@ -3675,8 +3663,11 @@ static int v4l2_ctrl_request_bind(struct media_request *req,
36846         if (!ret) {
36847                 ret = media_request_object_bind(req, &req_ops,
36848                                                 from, false, &hdl->req_obj);
36849 -               if (!ret)
36850 +               if (!ret) {
36851 +                       mutex_lock(from->lock);
36852                         list_add_tail(&hdl->requests, &from->requests);
36853 +                       mutex_unlock(from->lock);
36854 +               }
36855         }
36856         return ret;
36858 @@ -3846,7 +3837,13 @@ static int class_check(struct v4l2_ctrl_handler *hdl, u32 which)
36859         return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL;
36862 -/* Get extended controls. Allocates the helpers array if needed. */
36864 + * Get extended controls. Allocates the helpers array if needed.
36865 + *
36866 + * Note that v4l2_g_ext_ctrls_common() with 'which' set to
36867 + * V4L2_CTRL_WHICH_REQUEST_VAL is only called if the request was
36868 + * completed, and in that case valid_p_req is true for all controls.
36869 + */
36870  static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
36871                                    struct v4l2_ext_controls *cs,
36872                                    struct video_device *vdev)
36873 @@ -3855,9 +3852,10 @@ static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
36874         struct v4l2_ctrl_helper *helpers = helper;
36875         int ret;
36876         int i, j;
36877 -       bool def_value;
36878 +       bool is_default, is_request;
36880 -       def_value = (cs->which == V4L2_CTRL_WHICH_DEF_VAL);
36881 +       is_default = (cs->which == V4L2_CTRL_WHICH_DEF_VAL);
36882 +       is_request = (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL);
36884         cs->error_idx = cs->count;
36885         cs->which = V4L2_CTRL_ID2WHICH(cs->which);
36886 @@ -3883,11 +3881,9 @@ static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
36887                         ret = -EACCES;
36889         for (i = 0; !ret && i < cs->count; i++) {
36890 -               int (*ctrl_to_user)(struct v4l2_ext_control *c,
36891 -                                   struct v4l2_ctrl *ctrl);
36892                 struct v4l2_ctrl *master;
36894 -               ctrl_to_user = def_value ? def_to_user : cur_to_user;
36895 +               bool is_volatile = false;
36896 +               u32 idx = i;
36898                 if (helpers[i].mref == NULL)
36899                         continue;
36900 @@ -3897,31 +3893,48 @@ static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
36902                 v4l2_ctrl_lock(master);
36904 -               /* g_volatile_ctrl will update the new control values */
36905 -               if (!def_value &&
36906 +               /*
36907 +                * g_volatile_ctrl will update the new control values.
36908 +                * This makes no sense for V4L2_CTRL_WHICH_DEF_VAL and
36909 +                * V4L2_CTRL_WHICH_REQUEST_VAL. In the case of requests
36910 +                * it is v4l2_ctrl_request_complete() that copies the
36911 +                * volatile controls at the time of request completion
36912 +                * to the request, so you don't want to do that again.
36913 +                */
36914 +               if (!is_default && !is_request &&
36915                     ((master->flags & V4L2_CTRL_FLAG_VOLATILE) ||
36916                     (master->has_volatiles && !is_cur_manual(master)))) {
36917                         for (j = 0; j < master->ncontrols; j++)
36918                                 cur_to_new(master->cluster[j]);
36919                         ret = call_op(master, g_volatile_ctrl);
36920 -                       ctrl_to_user = new_to_user;
36921 +                       is_volatile = true;
36922                 }
36923 -               /* If OK, then copy the current (for non-volatile controls)
36924 -                  or the new (for volatile controls) control values to the
36925 -                  caller */
36926 -               if (!ret) {
36927 -                       u32 idx = i;
36929 -                       do {
36930 -                               if (helpers[idx].ref->req)
36931 -                                       ret = req_to_user(cs->controls + idx,
36932 -                                               helpers[idx].ref->req);
36933 -                               else
36934 -                                       ret = ctrl_to_user(cs->controls + idx,
36935 -                                               helpers[idx].ref->ctrl);
36936 -                               idx = helpers[idx].next;
36937 -                       } while (!ret && idx);
36938 +               if (ret) {
36939 +                       v4l2_ctrl_unlock(master);
36940 +                       break;
36941                 }
36943 +               /*
36944 +                * Copy the default value (if is_default is true), the
36945 +                * request value (if is_request is true and p_req is valid),
36946 +                * the new volatile value (if is_volatile is true) or the
36947 +                * current value.
36948 +                */
36949 +               do {
36950 +                       struct v4l2_ctrl_ref *ref = helpers[idx].ref;
36952 +                       if (is_default)
36953 +                               ret = def_to_user(cs->controls + idx, ref->ctrl);
36954 +                       else if (is_request && ref->valid_p_req)
36955 +                               ret = req_to_user(cs->controls + idx, ref);
36956 +                       else if (is_volatile)
36957 +                               ret = new_to_user(cs->controls + idx, ref->ctrl);
36958 +                       else
36959 +                               ret = cur_to_user(cs->controls + idx, ref->ctrl);
36960 +                       idx = helpers[idx].next;
36961 +               } while (!ret && idx);
36963                 v4l2_ctrl_unlock(master);
36964         }
36966 @@ -4564,8 +4577,6 @@ void v4l2_ctrl_request_complete(struct media_request *req,
36967                 unsigned int i;
36969                 if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
36970 -                       ref->req = ref;
36972                         v4l2_ctrl_lock(master);
36973                         /* g_volatile_ctrl will update the current control values */
36974                         for (i = 0; i < master->ncontrols; i++)
36975 @@ -4575,21 +4586,12 @@ void v4l2_ctrl_request_complete(struct media_request *req,
36976                         v4l2_ctrl_unlock(master);
36977                         continue;
36978                 }
36979 -               if (ref->req == ref)
36980 +               if (ref->valid_p_req)
36981                         continue;
36983 +               /* Copy the current control value into the request */
36984                 v4l2_ctrl_lock(ctrl);
36985 -               if (ref->req) {
36986 -                       ptr_to_ptr(ctrl, ref->req->p_req, ref->p_req);
36987 -               } else {
36988 -                       ptr_to_ptr(ctrl, ctrl->p_cur, ref->p_req);
36989 -                       /*
36990 -                        * Set ref->req to ensure that when userspace wants to
36991 -                        * obtain the controls of this request it will take
36992 -                        * this value and not the current value of the control.
36993 -                        */
36994 -                       ref->req = ref;
36995 -               }
36996 +               cur_to_req(ref);
36997                 v4l2_ctrl_unlock(ctrl);
36998         }
37000 @@ -4653,7 +4655,7 @@ int v4l2_ctrl_request_setup(struct media_request *req,
37001                                 struct v4l2_ctrl_ref *r =
37002                                         find_ref(hdl, master->cluster[i]->id);
37004 -                               if (r->req && r == r->req) {
37005 +                               if (r->valid_p_req) {
37006                                         have_new_data = true;
37007                                         break;
37008                                 }
37009 diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
37010 index cfa730cfd145..f80c2ea39ca4 100644
37011 --- a/drivers/memory/omap-gpmc.c
37012 +++ b/drivers/memory/omap-gpmc.c
37013 @@ -1009,8 +1009,8 @@ EXPORT_SYMBOL(gpmc_cs_request);
37015  void gpmc_cs_free(int cs)
37017 -       struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
37018 -       struct resource *res = &gpmc->mem;
37019 +       struct gpmc_cs_data *gpmc;
37020 +       struct resource *res;
37022         spin_lock(&gpmc_mem_lock);
37023         if (cs >= gpmc_cs_num || cs < 0 || !gpmc_cs_reserved(cs)) {
37024 @@ -1018,6 +1018,9 @@ void gpmc_cs_free(int cs)
37025                 spin_unlock(&gpmc_mem_lock);
37026                 return;
37027         }
37028 +       gpmc = &gpmc_cs[cs];
37029 +       res = &gpmc->mem;
37031         gpmc_cs_disable_mem(cs);
37032         if (res->flags)
37033                 release_resource(res);
37034 diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c
37035 index 3b5b1045edd9..9c0a28416777 100644
37036 --- a/drivers/memory/pl353-smc.c
37037 +++ b/drivers/memory/pl353-smc.c
37038 @@ -63,7 +63,7 @@
37039  /* ECC memory config register specific constants */
37040  #define PL353_SMC_ECC_MEMCFG_MODE_MASK 0xC
37041  #define PL353_SMC_ECC_MEMCFG_MODE_SHIFT        2
37042 -#define PL353_SMC_ECC_MEMCFG_PGSIZE_MASK       0xC
37043 +#define PL353_SMC_ECC_MEMCFG_PGSIZE_MASK       0x3
37045  #define PL353_SMC_DC_UPT_NAND_REGS     ((4 << 23) |    /* CS: NAND chip */ \
37046                                  (2 << 21))     /* UpdateRegs operation */
37047 diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
37048 index 8d36e221def1..45eed659b0c6 100644
37049 --- a/drivers/memory/renesas-rpc-if.c
37050 +++ b/drivers/memory/renesas-rpc-if.c
37051 @@ -192,10 +192,10 @@ int rpcif_sw_init(struct rpcif *rpc, struct device *dev)
37052         }
37054         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap");
37055 -       rpc->size = resource_size(res);
37056         rpc->dirmap = devm_ioremap_resource(&pdev->dev, res);
37057         if (IS_ERR(rpc->dirmap))
37058                 rpc->dirmap = NULL;
37059 +       rpc->size = resource_size(res);
37061         rpc->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
37063 diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c
37064 index 1dabb509dec3..dee503640e12 100644
37065 --- a/drivers/memory/samsung/exynos5422-dmc.c
37066 +++ b/drivers/memory/samsung/exynos5422-dmc.c
37067 @@ -1298,7 +1298,9 @@ static int exynos5_dmc_init_clks(struct exynos5_dmc *dmc)
37069         dmc->curr_volt = target_volt;
37071 -       clk_set_parent(dmc->mout_mx_mspll_ccore, dmc->mout_spll);
37072 +       ret = clk_set_parent(dmc->mout_mx_mspll_ccore, dmc->mout_spll);
37073 +       if (ret)
37074 +               return ret;
37076         clk_prepare_enable(dmc->fout_bpll);
37077         clk_prepare_enable(dmc->mout_bpll);
37078 diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
37079 index 077d9ab112b7..d919ae9691e2 100644
37080 --- a/drivers/mfd/arizona-irq.c
37081 +++ b/drivers/mfd/arizona-irq.c
37082 @@ -100,7 +100,7 @@ static irqreturn_t arizona_irq_thread(int irq, void *data)
37083         unsigned int val;
37084         int ret;
37086 -       ret = pm_runtime_get_sync(arizona->dev);
37087 +       ret = pm_runtime_resume_and_get(arizona->dev);
37088         if (ret < 0) {
37089                 dev_err(arizona->dev, "Failed to resume device: %d\n", ret);
37090                 return IRQ_NONE;
37091 diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c
37092 index 3781d0bb7786..783a14af18e2 100644
37093 --- a/drivers/mfd/da9063-i2c.c
37094 +++ b/drivers/mfd/da9063-i2c.c
37095 @@ -442,6 +442,16 @@ static int da9063_i2c_probe(struct i2c_client *i2c,
37096                 return ret;
37097         }
37099 +       /* If SMBus is not available and only I2C is possible, enter I2C mode */
37100 +       if (i2c_check_functionality(i2c->adapter, I2C_FUNC_I2C)) {
37101 +               ret = regmap_clear_bits(da9063->regmap, DA9063_REG_CONFIG_J,
37102 +                                       DA9063_TWOWIRE_TO);
37103 +               if (ret < 0) {
37104 +                       dev_err(da9063->dev, "Failed to set Two-Wire Bus Mode.\n");
37105 +                       return -EIO;
37106 +               }
37107 +       }
37109         return da9063_device_init(da9063, i2c->irq);
37112 diff --git a/drivers/mfd/intel_pmt.c b/drivers/mfd/intel_pmt.c
37113 index 744b230cdcca..65da2b17a204 100644
37114 --- a/drivers/mfd/intel_pmt.c
37115 +++ b/drivers/mfd/intel_pmt.c
37116 @@ -79,19 +79,18 @@ static int pmt_add_dev(struct pci_dev *pdev, struct intel_dvsec_header *header,
37117         case DVSEC_INTEL_ID_WATCHER:
37118                 if (quirks & PMT_QUIRK_NO_WATCHER) {
37119                         dev_info(dev, "Watcher not supported\n");
37120 -                       return 0;
37121 +                       return -EINVAL;
37122                 }
37123                 name = "pmt_watcher";
37124                 break;
37125         case DVSEC_INTEL_ID_CRASHLOG:
37126                 if (quirks & PMT_QUIRK_NO_CRASHLOG) {
37127                         dev_info(dev, "Crashlog not supported\n");
37128 -                       return 0;
37129 +                       return -EINVAL;
37130                 }
37131                 name = "pmt_crashlog";
37132                 break;
37133         default:
37134 -               dev_err(dev, "Unrecognized PMT capability: %d\n", id);
37135                 return -EINVAL;
37136         }
37138 @@ -174,12 +173,8 @@ static int pmt_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
37139                 header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
37141                 ret = pmt_add_dev(pdev, &header, quirks);
37142 -               if (ret) {
37143 -                       dev_warn(&pdev->dev,
37144 -                                "Failed to add device for DVSEC id %d\n",
37145 -                                header.id);
37146 +               if (ret)
37147                         continue;
37148 -               }
37150                 found_devices = true;
37151         } while (true);
37152 diff --git a/drivers/mfd/stm32-timers.c b/drivers/mfd/stm32-timers.c
37153 index add603359124..44ed2fce0319 100644
37154 --- a/drivers/mfd/stm32-timers.c
37155 +++ b/drivers/mfd/stm32-timers.c
37156 @@ -158,13 +158,18 @@ static const struct regmap_config stm32_timers_regmap_cfg = {
37158  static void stm32_timers_get_arr_size(struct stm32_timers *ddata)
37160 +       u32 arr;
37162 +       /* Backup ARR to restore it after getting the maximum value */
37163 +       regmap_read(ddata->regmap, TIM_ARR, &arr);
37165         /*
37166          * Only the available bits will be written so when readback
37167          * we get the maximum value of auto reload register
37168          */
37169         regmap_write(ddata->regmap, TIM_ARR, ~0L);
37170         regmap_read(ddata->regmap, TIM_ARR, &ddata->max_arr);
37171 -       regmap_write(ddata->regmap, TIM_ARR, 0x0);
37172 +       regmap_write(ddata->regmap, TIM_ARR, arr);
37175  static int stm32_timers_dma_probe(struct device *dev,
37176 diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
37177 index 90f3292230c9..1dd39483e7c1 100644
37178 --- a/drivers/mfd/stmpe.c
37179 +++ b/drivers/mfd/stmpe.c
37180 @@ -312,7 +312,7 @@ EXPORT_SYMBOL_GPL(stmpe_set_altfunc);
37181   * GPIO (all variants)
37182   */
37184 -static const struct resource stmpe_gpio_resources[] = {
37185 +static struct resource stmpe_gpio_resources[] = {
37186         /* Start and end filled dynamically */
37187         {
37188                 .flags  = IORESOURCE_IRQ,
37189 @@ -336,7 +336,8 @@ static const struct mfd_cell stmpe_gpio_cell_noirq = {
37190   * Keypad (1601, 2401, 2403)
37191   */
37193 -static const struct resource stmpe_keypad_resources[] = {
37194 +static struct resource stmpe_keypad_resources[] = {
37195 +       /* Start and end filled dynamically */
37196         {
37197                 .name   = "KEYPAD",
37198                 .flags  = IORESOURCE_IRQ,
37199 @@ -357,7 +358,8 @@ static const struct mfd_cell stmpe_keypad_cell = {
37200  /*
37201   * PWM (1601, 2401, 2403)
37202   */
37203 -static const struct resource stmpe_pwm_resources[] = {
37204 +static struct resource stmpe_pwm_resources[] = {
37205 +       /* Start and end filled dynamically */
37206         {
37207                 .name   = "PWM0",
37208                 .flags  = IORESOURCE_IRQ,
37209 @@ -445,7 +447,8 @@ static struct stmpe_variant_info stmpe801_noirq = {
37210   * Touchscreen (STMPE811 or STMPE610)
37211   */
37213 -static const struct resource stmpe_ts_resources[] = {
37214 +static struct resource stmpe_ts_resources[] = {
37215 +       /* Start and end filled dynamically */
37216         {
37217                 .name   = "TOUCH_DET",
37218                 .flags  = IORESOURCE_IRQ,
37219 @@ -467,7 +470,8 @@ static const struct mfd_cell stmpe_ts_cell = {
37220   * ADC (STMPE811)
37221   */
37223 -static const struct resource stmpe_adc_resources[] = {
37224 +static struct resource stmpe_adc_resources[] = {
37225 +       /* Start and end filled dynamically */
37226         {
37227                 .name   = "STMPE_TEMP_SENS",
37228                 .flags  = IORESOURCE_IRQ,
37229 diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
37230 index b690796d24d4..448b13da62b4 100644
37231 --- a/drivers/mfd/ucb1x00-core.c
37232 +++ b/drivers/mfd/ucb1x00-core.c
37233 @@ -250,7 +250,7 @@ unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync)
37234                         break;
37235                 /* yield to other processes */
37236                 set_current_state(TASK_INTERRUPTIBLE);
37237 -               schedule_timeout(1);
37238 +               schedule_min_hrtimeout();
37239         }
37241         return UCB_ADC_DAT(val);
37242 diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
37243 index 945701bce553..2e081a58da6c 100644
37244 --- a/drivers/misc/kgdbts.c
37245 +++ b/drivers/misc/kgdbts.c
37246 @@ -95,19 +95,19 @@
37248  #include <asm/sections.h>
37250 -#define v1printk(a...) do { \
37251 -       if (verbose) \
37252 -               printk(KERN_INFO a); \
37253 -       } while (0)
37254 -#define v2printk(a...) do { \
37255 -       if (verbose > 1) \
37256 -               printk(KERN_INFO a); \
37257 -               touch_nmi_watchdog();   \
37258 -       } while (0)
37259 -#define eprintk(a...) do { \
37260 -               printk(KERN_ERR a); \
37261 -               WARN_ON(1); \
37262 -       } while (0)
37263 +#define v1printk(a...) do {            \
37264 +       if (verbose)                    \
37265 +               printk(KERN_INFO a);    \
37266 +} while (0)
37267 +#define v2printk(a...) do {            \
37268 +       if (verbose > 1)                \
37269 +               printk(KERN_INFO a);    \
37270 +       touch_nmi_watchdog();           \
37271 +} while (0)
37272 +#define eprintk(a...) do {             \
37273 +       printk(KERN_ERR a);             \
37274 +       WARN_ON(1);                     \
37275 +} while (0)
37276  #define MAX_CONFIG_LEN         40
37278  static struct kgdb_io kgdbts_io_ops;
37279 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
37280 index dd65cedf3b12..9d14bf444481 100644
37281 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
37282 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
37283 @@ -208,7 +208,7 @@ static int lis3_3dc_rates[16] = {0, 1, 10, 25, 50, 100, 200, 400, 1600, 5000};
37284  static int lis3_3dlh_rates[4] = {50, 100, 400, 1000};
37286  /* ODR is Output Data Rate */
37287 -static int lis3lv02d_get_odr(struct lis3lv02d *lis3)
37288 +static int lis3lv02d_get_odr_index(struct lis3lv02d *lis3)
37290         u8 ctrl;
37291         int shift;
37292 @@ -216,15 +216,23 @@ static int lis3lv02d_get_odr(struct lis3lv02d *lis3)
37293         lis3->read(lis3, CTRL_REG1, &ctrl);
37294         ctrl &= lis3->odr_mask;
37295         shift = ffs(lis3->odr_mask) - 1;
37296 -       return lis3->odrs[(ctrl >> shift)];
37297 +       return (ctrl >> shift);
37300  static int lis3lv02d_get_pwron_wait(struct lis3lv02d *lis3)
37302 -       int div = lis3lv02d_get_odr(lis3);
37303 +       int odr_idx = lis3lv02d_get_odr_index(lis3);
37304 +       int div = lis3->odrs[odr_idx];
37306 -       if (WARN_ONCE(div == 0, "device returned spurious data"))
37307 +       if (div == 0) {
37308 +               if (odr_idx == 0) {
37309 +                       /* Power-down mode, not sampling no need to sleep */
37310 +                       return 0;
37311 +               }
37313 +               dev_err(&lis3->pdev->dev, "Error unknown odrs-index: %d\n", odr_idx);
37314                 return -ENXIO;
37315 +       }
37317         /* LIS3 power on delay is quite long */
37318         msleep(lis3->pwron_delay / div);
37319 @@ -816,9 +824,12 @@ static ssize_t lis3lv02d_rate_show(struct device *dev,
37320                         struct device_attribute *attr, char *buf)
37322         struct lis3lv02d *lis3 = dev_get_drvdata(dev);
37323 +       int odr_idx;
37325         lis3lv02d_sysfs_poweron(lis3);
37326 -       return sprintf(buf, "%d\n", lis3lv02d_get_odr(lis3));
37328 +       odr_idx = lis3lv02d_get_odr_index(lis3);
37329 +       return sprintf(buf, "%d\n", lis3->odrs[odr_idx]);
37332  static ssize_t lis3lv02d_rate_set(struct device *dev,
37333 diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
37334 index 14be76d4c2e6..cb34925e10f1 100644
37335 --- a/drivers/misc/mei/hw-me-regs.h
37336 +++ b/drivers/misc/mei/hw-me-regs.h
37337 @@ -105,6 +105,7 @@
37339  #define MEI_DEV_ID_ADP_S      0x7AE8  /* Alder Lake Point S */
37340  #define MEI_DEV_ID_ADP_LP     0x7A60  /* Alder Lake Point LP */
37341 +#define MEI_DEV_ID_ADP_P      0x51E0  /* Alder Lake Point P */
37343  /*
37344   * MEI HW Section
37345 diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
37346 index a7e179626b63..c3393b383e59 100644
37347 --- a/drivers/misc/mei/pci-me.c
37348 +++ b/drivers/misc/mei/pci-me.c
37349 @@ -111,6 +111,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
37351         {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_S, MEI_ME_PCH15_CFG)},
37352         {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_LP, MEI_ME_PCH15_CFG)},
37353 +       {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
37355         /* required last entry */
37356         {0, }
37357 diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
37358 index 8e6607fc8a67..b9ab770bbdb5 100644
37359 --- a/drivers/misc/sgi-xp/xpc_channel.c
37360 +++ b/drivers/misc/sgi-xp/xpc_channel.c
37361 @@ -834,7 +834,7 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
37363         atomic_inc(&ch->n_on_msg_allocate_wq);
37364         prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE);
37365 -       ret = schedule_timeout(1);
37366 +       ret = schedule_min_hrtimeout();
37367         finish_wait(&ch->msg_allocate_wq, &wait);
37368         atomic_dec(&ch->n_on_msg_allocate_wq);
37370 diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
37371 index 345addd9306d..fa8a7fce4481 100644
37372 --- a/drivers/misc/vmw_vmci/vmci_doorbell.c
37373 +++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
37374 @@ -326,7 +326,7 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
37375  bool vmci_dbell_register_notification_bitmap(u64 bitmap_ppn)
37377         int result;
37378 -       struct vmci_notify_bm_set_msg bitmap_set_msg;
37379 +       struct vmci_notify_bm_set_msg bitmap_set_msg = { };
37381         bitmap_set_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
37382                                                   VMCI_SET_NOTIFY_BITMAP);
37383 diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
37384 index cc8eeb361fcd..1018dc77269d 100644
37385 --- a/drivers/misc/vmw_vmci/vmci_guest.c
37386 +++ b/drivers/misc/vmw_vmci/vmci_guest.c
37387 @@ -168,7 +168,7 @@ static int vmci_check_host_caps(struct pci_dev *pdev)
37388                                 VMCI_UTIL_NUM_RESOURCES * sizeof(u32);
37389         struct vmci_datagram *check_msg;
37391 -       check_msg = kmalloc(msg_size, GFP_KERNEL);
37392 +       check_msg = kzalloc(msg_size, GFP_KERNEL);
37393         if (!check_msg) {
37394                 dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__);
37395                 return -ENOMEM;
37396 diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
37397 index d666e24fbe0e..a4c06ef67394 100644
37398 --- a/drivers/mmc/core/block.c
37399 +++ b/drivers/mmc/core/block.c
37400 @@ -572,6 +572,18 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
37401                 main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK;
37402         }
37404 +       /*
37405 +        * Make sure to update CACHE_CTRL in case it was changed. The cache
37406 +        * will get turned back on if the card is re-initialized, e.g.
37407 +        * suspend/resume or hw reset in recovery.
37408 +        */
37409 +       if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_CACHE_CTRL) &&
37410 +           (cmd.opcode == MMC_SWITCH)) {
37411 +               u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg) & 1;
37413 +               card->ext_csd.cache_ctrl = value;
37414 +       }
37416         /*
37417          * According to the SD specs, some commands require a delay after
37418          * issuing the command.
37419 @@ -2224,6 +2236,10 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
37420         case MMC_ISSUE_ASYNC:
37421                 switch (req_op(req)) {
37422                 case REQ_OP_FLUSH:
37423 +                       if (!mmc_cache_enabled(host)) {
37424 +                               blk_mq_end_request(req, BLK_STS_OK);
37425 +                               return MMC_REQ_FINISHED;
37426 +                       }
37427                         ret = mmc_blk_cqe_issue_flush(mq, req);
37428                         break;
37429                 case REQ_OP_READ:
37430 diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
37431 index 1136b859ddd8..e30c4e88e404 100644
37432 --- a/drivers/mmc/core/core.c
37433 +++ b/drivers/mmc/core/core.c
37434 @@ -1207,7 +1207,7 @@ int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
37436         err = mmc_wait_for_cmd(host, &cmd, 0);
37437         if (err)
37438 -               return err;
37439 +               goto power_cycle;
37441         if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
37442                 return -EIO;
37443 @@ -2369,80 +2369,6 @@ void mmc_stop_host(struct mmc_host *host)
37444         mmc_release_host(host);
37447 -#ifdef CONFIG_PM_SLEEP
37448 -/* Do the card removal on suspend if card is assumed removeable
37449 - * Do that in pm notifier while userspace isn't yet frozen, so we will be able
37450 -   to sync the card.
37452 -static int mmc_pm_notify(struct notifier_block *notify_block,
37453 -                       unsigned long mode, void *unused)
37455 -       struct mmc_host *host = container_of(
37456 -               notify_block, struct mmc_host, pm_notify);
37457 -       unsigned long flags;
37458 -       int err = 0;
37460 -       switch (mode) {
37461 -       case PM_HIBERNATION_PREPARE:
37462 -       case PM_SUSPEND_PREPARE:
37463 -       case PM_RESTORE_PREPARE:
37464 -               spin_lock_irqsave(&host->lock, flags);
37465 -               host->rescan_disable = 1;
37466 -               spin_unlock_irqrestore(&host->lock, flags);
37467 -               cancel_delayed_work_sync(&host->detect);
37469 -               if (!host->bus_ops)
37470 -                       break;
37472 -               /* Validate prerequisites for suspend */
37473 -               if (host->bus_ops->pre_suspend)
37474 -                       err = host->bus_ops->pre_suspend(host);
37475 -               if (!err)
37476 -                       break;
37478 -               if (!mmc_card_is_removable(host)) {
37479 -                       dev_warn(mmc_dev(host),
37480 -                                "pre_suspend failed for non-removable host: "
37481 -                                "%d\n", err);
37482 -                       /* Avoid removing non-removable hosts */
37483 -                       break;
37484 -               }
37486 -               /* Calling bus_ops->remove() with a claimed host can deadlock */
37487 -               host->bus_ops->remove(host);
37488 -               mmc_claim_host(host);
37489 -               mmc_detach_bus(host);
37490 -               mmc_power_off(host);
37491 -               mmc_release_host(host);
37492 -               host->pm_flags = 0;
37493 -               break;
37495 -       case PM_POST_SUSPEND:
37496 -       case PM_POST_HIBERNATION:
37497 -       case PM_POST_RESTORE:
37499 -               spin_lock_irqsave(&host->lock, flags);
37500 -               host->rescan_disable = 0;
37501 -               spin_unlock_irqrestore(&host->lock, flags);
37502 -               _mmc_detect_change(host, 0, false);
37504 -       }
37506 -       return 0;
37509 -void mmc_register_pm_notifier(struct mmc_host *host)
37511 -       host->pm_notify.notifier_call = mmc_pm_notify;
37512 -       register_pm_notifier(&host->pm_notify);
37515 -void mmc_unregister_pm_notifier(struct mmc_host *host)
37517 -       unregister_pm_notifier(&host->pm_notify);
37519 -#endif
37521  static int __init mmc_init(void)
37523         int ret;
37524 diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
37525 index 575ac0257af2..db3c9c68875d 100644
37526 --- a/drivers/mmc/core/core.h
37527 +++ b/drivers/mmc/core/core.h
37528 @@ -29,6 +29,7 @@ struct mmc_bus_ops {
37529         int (*shutdown)(struct mmc_host *);
37530         int (*hw_reset)(struct mmc_host *);
37531         int (*sw_reset)(struct mmc_host *);
37532 +       bool (*cache_enabled)(struct mmc_host *);
37533  };
37535  void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
37536 @@ -93,14 +94,6 @@ int mmc_execute_tuning(struct mmc_card *card);
37537  int mmc_hs200_to_hs400(struct mmc_card *card);
37538  int mmc_hs400_to_hs200(struct mmc_card *card);
37540 -#ifdef CONFIG_PM_SLEEP
37541 -void mmc_register_pm_notifier(struct mmc_host *host);
37542 -void mmc_unregister_pm_notifier(struct mmc_host *host);
37543 -#else
37544 -static inline void mmc_register_pm_notifier(struct mmc_host *host) { }
37545 -static inline void mmc_unregister_pm_notifier(struct mmc_host *host) { }
37546 -#endif
37548  void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq);
37549  bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq);
37551 @@ -171,4 +164,12 @@ static inline void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
37552                 host->ops->post_req(host, mrq, err);
37555 +static inline bool mmc_cache_enabled(struct mmc_host *host)
37557 +       if (host->bus_ops->cache_enabled)
37558 +               return host->bus_ops->cache_enabled(host);
37560 +       return false;
37563  #endif
37564 diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
37565 index 9b89a91b6b47..fe05b3645fe9 100644
37566 --- a/drivers/mmc/core/host.c
37567 +++ b/drivers/mmc/core/host.c
37568 @@ -35,6 +35,42 @@
37570  static DEFINE_IDA(mmc_host_ida);
37572 +#ifdef CONFIG_PM_SLEEP
37573 +static int mmc_host_class_prepare(struct device *dev)
37575 +       struct mmc_host *host = cls_dev_to_mmc_host(dev);
37577 +       /*
37578 +        * It's safe to access the bus_ops pointer, as both userspace and the
37579 +        * workqueue for detecting cards are frozen at this point.
37580 +        */
37581 +       if (!host->bus_ops)
37582 +               return 0;
37584 +       /* Validate conditions for system suspend. */
37585 +       if (host->bus_ops->pre_suspend)
37586 +               return host->bus_ops->pre_suspend(host);
37588 +       return 0;
37591 +static void mmc_host_class_complete(struct device *dev)
37593 +       struct mmc_host *host = cls_dev_to_mmc_host(dev);
37595 +       _mmc_detect_change(host, 0, false);
37598 +static const struct dev_pm_ops mmc_host_class_dev_pm_ops = {
37599 +       .prepare = mmc_host_class_prepare,
37600 +       .complete = mmc_host_class_complete,
37603 +#define MMC_HOST_CLASS_DEV_PM_OPS (&mmc_host_class_dev_pm_ops)
37604 +#else
37605 +#define MMC_HOST_CLASS_DEV_PM_OPS NULL
37606 +#endif
37608  static void mmc_host_classdev_release(struct device *dev)
37610         struct mmc_host *host = cls_dev_to_mmc_host(dev);
37611 @@ -46,6 +82,7 @@ static void mmc_host_classdev_release(struct device *dev)
37612  static struct class mmc_host_class = {
37613         .name           = "mmc_host",
37614         .dev_release    = mmc_host_classdev_release,
37615 +       .pm             = MMC_HOST_CLASS_DEV_PM_OPS,
37616  };
37618  int mmc_register_host_class(void)
37619 @@ -538,8 +575,6 @@ int mmc_add_host(struct mmc_host *host)
37620  #endif
37622         mmc_start_host(host);
37623 -       mmc_register_pm_notifier(host);
37625         return 0;
37628 @@ -555,7 +590,6 @@ EXPORT_SYMBOL(mmc_add_host);
37629   */
37630  void mmc_remove_host(struct mmc_host *host)
37632 -       mmc_unregister_pm_notifier(host);
37633         mmc_stop_host(host);
37635  #ifdef CONFIG_DEBUG_FS
37636 diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
37637 index 8741271d3971..4d2b4b0da93c 100644
37638 --- a/drivers/mmc/core/mmc.c
37639 +++ b/drivers/mmc/core/mmc.c
37640 @@ -2029,6 +2029,12 @@ static void mmc_detect(struct mmc_host *host)
37641         }
37644 +static bool _mmc_cache_enabled(struct mmc_host *host)
37646 +       return host->card->ext_csd.cache_size > 0 &&
37647 +              host->card->ext_csd.cache_ctrl & 1;
37650  static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
37652         int err = 0;
37653 @@ -2208,6 +2214,7 @@ static const struct mmc_bus_ops mmc_ops = {
37654         .alive = mmc_alive,
37655         .shutdown = mmc_shutdown,
37656         .hw_reset = _mmc_hw_reset,
37657 +       .cache_enabled = _mmc_cache_enabled,
37658  };
37660  /*
37661 diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
37662 index 265d95ec82ce..c458f6b626a2 100644
37663 --- a/drivers/mmc/core/mmc_ops.c
37664 +++ b/drivers/mmc/core/mmc_ops.c
37665 @@ -988,9 +988,7 @@ int mmc_flush_cache(struct mmc_card *card)
37667         int err = 0;
37669 -       if (mmc_card_mmc(card) &&
37670 -                       (card->ext_csd.cache_size > 0) &&
37671 -                       (card->ext_csd.cache_ctrl & 1)) {
37672 +       if (mmc_cache_enabled(card->host)) {
37673                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
37674                                  EXT_CSD_FLUSH_CACHE, 1,
37675                                  MMC_CACHE_FLUSH_TIMEOUT_MS);
37676 diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
37677 index 6fa51a6ed058..2c48d6504101 100644
37678 --- a/drivers/mmc/core/sd.c
37679 +++ b/drivers/mmc/core/sd.c
37680 @@ -135,6 +135,9 @@ static int mmc_decode_csd(struct mmc_card *card)
37681                         csd->erase_size = UNSTUFF_BITS(resp, 39, 7) + 1;
37682                         csd->erase_size <<= csd->write_blkbits - 9;
37683                 }
37685 +               if (UNSTUFF_BITS(resp, 13, 1))
37686 +                       mmc_card_set_readonly(card);
37687                 break;
37688         case 1:
37689                 /*
37690 @@ -169,6 +172,9 @@ static int mmc_decode_csd(struct mmc_card *card)
37691                 csd->write_blkbits = 9;
37692                 csd->write_partial = 0;
37693                 csd->erase_size = 1;
37695 +               if (UNSTUFF_BITS(resp, 13, 1))
37696 +                       mmc_card_set_readonly(card);
37697                 break;
37698         default:
37699                 pr_err("%s: unrecognised CSD structure version %d\n",
37700 diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
37701 index 0fda7784cab2..3eb94ac2712e 100644
37702 --- a/drivers/mmc/core/sdio.c
37703 +++ b/drivers/mmc/core/sdio.c
37704 @@ -985,21 +985,37 @@ static void mmc_sdio_detect(struct mmc_host *host)
37705   */
37706  static int mmc_sdio_pre_suspend(struct mmc_host *host)
37708 -       int i, err = 0;
37709 +       int i;
37711         for (i = 0; i < host->card->sdio_funcs; i++) {
37712                 struct sdio_func *func = host->card->sdio_func[i];
37713                 if (func && sdio_func_present(func) && func->dev.driver) {
37714                         const struct dev_pm_ops *pmops = func->dev.driver->pm;
37715 -                       if (!pmops || !pmops->suspend || !pmops->resume) {
37716 +                       if (!pmops || !pmops->suspend || !pmops->resume)
37717                                 /* force removal of entire card in that case */
37718 -                               err = -ENOSYS;
37719 -                               break;
37720 -                       }
37721 +                               goto remove;
37722                 }
37723         }
37725 -       return err;
37726 +       return 0;
37728 +remove:
37729 +       if (!mmc_card_is_removable(host)) {
37730 +               dev_warn(mmc_dev(host),
37731 +                        "missing suspend/resume ops for non-removable SDIO card\n");
37732 +               /* Don't remove a non-removable card - we can't re-detect it. */
37733 +               return 0;
37734 +       }
37736 +       /* Remove the SDIO card and let it be re-detected later on. */
37737 +       mmc_sdio_remove(host);
37738 +       mmc_claim_host(host);
37739 +       mmc_detach_bus(host);
37740 +       mmc_power_off(host);
37741 +       mmc_release_host(host);
37742 +       host->pm_flags = 0;
37744 +       return 0;
37747  /*
37748 diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
37749 index f9780c65ebe9..f24623aac2db 100644
37750 --- a/drivers/mmc/host/sdhci-brcmstb.c
37751 +++ b/drivers/mmc/host/sdhci-brcmstb.c
37752 @@ -199,7 +199,6 @@ static int sdhci_brcmstb_add_host(struct sdhci_host *host,
37753         if (dma64) {
37754                 dev_dbg(mmc_dev(host->mmc), "Using 64 bit DMA\n");
37755                 cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
37756 -               cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ;
37757         }
37759         ret = cqhci_init(cq_host, host->mmc, dma64);
37760 diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
37761 index a20459744d21..94327988da91 100644
37762 --- a/drivers/mmc/host/sdhci-esdhc-imx.c
37763 +++ b/drivers/mmc/host/sdhci-esdhc-imx.c
37764 @@ -1488,7 +1488,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
37766         mmc_of_parse_voltage(np, &host->ocr_mask);
37768 -       if (esdhc_is_usdhc(imx_data)) {
37769 +       if (esdhc_is_usdhc(imx_data) && !IS_ERR(imx_data->pinctrl)) {
37770                 imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
37771                                                 ESDHC_PINCTRL_STATE_100MHZ);
37772                 imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
37773 diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
37774 index 9552708846ca..bf04a08eeba1 100644
37775 --- a/drivers/mmc/host/sdhci-pci-core.c
37776 +++ b/drivers/mmc/host/sdhci-pci-core.c
37777 @@ -516,6 +516,7 @@ struct intel_host {
37778         int     drv_strength;
37779         bool    d3_retune;
37780         bool    rpm_retune_ok;
37781 +       bool    needs_pwr_off;
37782         u32     glk_rx_ctrl1;
37783         u32     glk_tun_val;
37784         u32     active_ltr;
37785 @@ -643,9 +644,25 @@ static int bxt_get_cd(struct mmc_host *mmc)
37786  static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
37787                                   unsigned short vdd)
37789 +       struct sdhci_pci_slot *slot = sdhci_priv(host);
37790 +       struct intel_host *intel_host = sdhci_pci_priv(slot);
37791         int cntr;
37792         u8 reg;
37794 +       /*
37795 +        * Bus power may control card power, but a full reset still may not
37796 +        * reset the power, whereas a direct write to SDHCI_POWER_CONTROL can.
37797 +        * That might be needed to initialize correctly, if the card was left
37798 +        * powered on previously.
37799 +        */
37800 +       if (intel_host->needs_pwr_off) {
37801 +               intel_host->needs_pwr_off = false;
37802 +               if (mode != MMC_POWER_OFF) {
37803 +                       sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
37804 +                       usleep_range(10000, 12500);
37805 +               }
37806 +       }
37808         sdhci_set_power(host, mode, vdd);
37810         if (mode == MMC_POWER_OFF)
37811 @@ -1135,6 +1152,14 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
37812         return 0;
37815 +static void byt_needs_pwr_off(struct sdhci_pci_slot *slot)
37817 +       struct intel_host *intel_host = sdhci_pci_priv(slot);
37818 +       u8 reg = sdhci_readb(slot->host, SDHCI_POWER_CONTROL);
37820 +       intel_host->needs_pwr_off = reg  & SDHCI_POWER_ON;
37823  static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
37825         byt_probe_slot(slot);
37826 @@ -1152,6 +1177,8 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
37827             slot->chip->pdev->subsystem_device == PCI_SUBDEVICE_ID_NI_78E3)
37828                 slot->host->mmc->caps2 |= MMC_CAP2_AVOID_3_3V;
37830 +       byt_needs_pwr_off(slot);
37832         return 0;
37835 @@ -1903,6 +1930,8 @@ static const struct pci_device_id pci_ids[] = {
37836         SDHCI_PCI_DEVICE(INTEL, CMLH_SD,   intel_byt_sd),
37837         SDHCI_PCI_DEVICE(INTEL, JSL_EMMC,  intel_glk_emmc),
37838         SDHCI_PCI_DEVICE(INTEL, JSL_SD,    intel_byt_sd),
37839 +       SDHCI_PCI_DEVICE(INTEL, LKF_EMMC,  intel_glk_emmc),
37840 +       SDHCI_PCI_DEVICE(INTEL, LKF_SD,    intel_byt_sd),
37841         SDHCI_PCI_DEVICE(O2, 8120,     o2),
37842         SDHCI_PCI_DEVICE(O2, 8220,     o2),
37843         SDHCI_PCI_DEVICE(O2, 8221,     o2),
37844 diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
37845 index d0ed232af0eb..8f90c4163bb5 100644
37846 --- a/drivers/mmc/host/sdhci-pci.h
37847 +++ b/drivers/mmc/host/sdhci-pci.h
37848 @@ -57,6 +57,8 @@
37849  #define PCI_DEVICE_ID_INTEL_CMLH_SD    0x06f5
37850  #define PCI_DEVICE_ID_INTEL_JSL_EMMC   0x4dc4
37851  #define PCI_DEVICE_ID_INTEL_JSL_SD     0x4df8
37852 +#define PCI_DEVICE_ID_INTEL_LKF_EMMC   0x98c4
37853 +#define PCI_DEVICE_ID_INTEL_LKF_SD     0x98f8
37855  #define PCI_DEVICE_ID_SYSKONNECT_8000  0x8000
37856  #define PCI_DEVICE_ID_VIA_95D0         0x95d0
37857 diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
37858 index 41d193fa77bb..8ea9132ebca4 100644
37859 --- a/drivers/mmc/host/sdhci-tegra.c
37860 +++ b/drivers/mmc/host/sdhci-tegra.c
37861 @@ -119,6 +119,10 @@
37862  /* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
37863  #define SDHCI_TEGRA_CQE_BASE_ADDR                      0xF000
37865 +#define SDHCI_TEGRA_CQE_TRNS_MODE      (SDHCI_TRNS_MULTI | \
37866 +                                        SDHCI_TRNS_BLK_CNT_EN | \
37867 +                                        SDHCI_TRNS_DMA)
37869  struct sdhci_tegra_soc_data {
37870         const struct sdhci_pltfm_data *pdata;
37871         u64 dma_mask;
37872 @@ -1156,6 +1160,7 @@ static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
37873  static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
37875         struct mmc_host *mmc = cq_host->mmc;
37876 +       struct sdhci_host *host = mmc_priv(mmc);
37877         u8 ctrl;
37878         ktime_t timeout;
37879         bool timed_out;
37880 @@ -1170,6 +1175,7 @@ static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
37881          */
37882         if (reg == CQHCI_CTL && !(val & CQHCI_HALT) &&
37883             cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
37884 +               sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
37885                 sdhci_cqe_enable(mmc);
37886                 writel(val, cq_host->mmio + reg);
37887                 timeout = ktime_add_us(ktime_get(), 50);
37888 @@ -1205,6 +1211,7 @@ static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc,
37889  static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
37891         struct cqhci_host *cq_host = mmc->cqe_private;
37892 +       struct sdhci_host *host = mmc_priv(mmc);
37893         u32 val;
37895         /*
37896 @@ -1218,6 +1225,7 @@ static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
37897                 if (val & CQHCI_ENABLE)
37898                         cqhci_writel(cq_host, (val & ~CQHCI_ENABLE),
37899                                      CQHCI_CFG);
37900 +               sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
37901                 sdhci_cqe_enable(mmc);
37902                 if (val & CQHCI_ENABLE)
37903                         cqhci_writel(cq_host, val, CQHCI_CFG);
37904 @@ -1281,12 +1289,36 @@ static void tegra_sdhci_set_timeout(struct sdhci_host *host,
37905         __sdhci_set_timeout(host, cmd);
37908 +static void sdhci_tegra_cqe_pre_enable(struct mmc_host *mmc)
37910 +       struct cqhci_host *cq_host = mmc->cqe_private;
37911 +       u32 reg;
37913 +       reg = cqhci_readl(cq_host, CQHCI_CFG);
37914 +       reg |= CQHCI_ENABLE;
37915 +       cqhci_writel(cq_host, reg, CQHCI_CFG);
37918 +static void sdhci_tegra_cqe_post_disable(struct mmc_host *mmc)
37920 +       struct cqhci_host *cq_host = mmc->cqe_private;
37921 +       struct sdhci_host *host = mmc_priv(mmc);
37922 +       u32 reg;
37924 +       reg = cqhci_readl(cq_host, CQHCI_CFG);
37925 +       reg &= ~CQHCI_ENABLE;
37926 +       cqhci_writel(cq_host, reg, CQHCI_CFG);
37927 +       sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
37930  static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
37931         .write_l    = tegra_cqhci_writel,
37932         .enable = sdhci_tegra_cqe_enable,
37933         .disable = sdhci_cqe_disable,
37934         .dumpregs = sdhci_tegra_dumpregs,
37935         .update_dcmd_desc = sdhci_tegra_update_dcmd_desc,
37936 +       .pre_enable = sdhci_tegra_cqe_pre_enable,
37937 +       .post_disable = sdhci_tegra_cqe_post_disable,
37938  };
37940  static int tegra_sdhci_set_dma_mask(struct sdhci_host *host)
37941 diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
37942 index 2d73407ee52e..a9e20818ff3a 100644
37943 --- a/drivers/mmc/host/sdhci.c
37944 +++ b/drivers/mmc/host/sdhci.c
37945 @@ -2996,6 +2996,37 @@ static bool sdhci_request_done(struct sdhci_host *host)
37946                 return true;
37947         }
37949 +       /*
37950 +        * The controller needs a reset of internal state machines
37951 +        * upon error conditions.
37952 +        */
37953 +       if (sdhci_needs_reset(host, mrq)) {
37954 +               /*
37955 +                * Do not finish until command and data lines are available for
37956 +                * reset. Note there can only be one other mrq, so it cannot
37957 +                * also be in mrqs_done, otherwise host->cmd and host->data_cmd
37958 +                * would both be null.
37959 +                */
37960 +               if (host->cmd || host->data_cmd) {
37961 +                       spin_unlock_irqrestore(&host->lock, flags);
37962 +                       return true;
37963 +               }
37965 +               /* Some controllers need this kick or reset won't work here */
37966 +               if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
37967 +                       /* This is to force an update */
37968 +                       host->ops->set_clock(host, host->clock);
37970 +               /*
37971 +                * Spec says we should do both at the same time, but Ricoh
37972 +                * controllers do not like that.
37973 +                */
37974 +               sdhci_do_reset(host, SDHCI_RESET_CMD);
37975 +               sdhci_do_reset(host, SDHCI_RESET_DATA);
37977 +               host->pending_reset = false;
37978 +       }
37980         /*
37981          * Always unmap the data buffers if they were mapped by
37982          * sdhci_prepare_data() whenever we finish with a request.
37983 @@ -3059,35 +3090,6 @@ static bool sdhci_request_done(struct sdhci_host *host)
37984                 }
37985         }
37987 -       /*
37988 -        * The controller needs a reset of internal state machines
37989 -        * upon error conditions.
37990 -        */
37991 -       if (sdhci_needs_reset(host, mrq)) {
37992 -               /*
37993 -                * Do not finish until command and data lines are available for
37994 -                * reset. Note there can only be one other mrq, so it cannot
37995 -                * also be in mrqs_done, otherwise host->cmd and host->data_cmd
37996 -                * would both be null.
37997 -                */
37998 -               if (host->cmd || host->data_cmd) {
37999 -                       spin_unlock_irqrestore(&host->lock, flags);
38000 -                       return true;
38001 -               }
38003 -               /* Some controllers need this kick or reset won't work here */
38004 -               if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
38005 -                       /* This is to force an update */
38006 -                       host->ops->set_clock(host, host->clock);
38008 -               /* Spec says we should do both at the same time, but Ricoh
38009 -                  controllers do not like that. */
38010 -               sdhci_do_reset(host, SDHCI_RESET_CMD);
38011 -               sdhci_do_reset(host, SDHCI_RESET_DATA);
38013 -               host->pending_reset = false;
38014 -       }
38016         host->mrqs_done[i] = NULL;
38018         spin_unlock_irqrestore(&host->lock, flags);
38019 diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c
38020 index 2413b6750cec..ccbf9885a52b 100644
38021 --- a/drivers/mmc/host/uniphier-sd.c
38022 +++ b/drivers/mmc/host/uniphier-sd.c
38023 @@ -635,7 +635,7 @@ static int uniphier_sd_probe(struct platform_device *pdev)
38025         ret = tmio_mmc_host_probe(host);
38026         if (ret)
38027 -               goto free_host;
38028 +               goto disable_clk;
38030         ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED,
38031                                dev_name(dev), host);
38032 @@ -646,6 +646,8 @@ static int uniphier_sd_probe(struct platform_device *pdev)
38034  remove_host:
38035         tmio_mmc_host_remove(host);
38036 +disable_clk:
38037 +       uniphier_sd_clk_disable(host);
38038  free_host:
38039         tmio_mmc_host_free(host);
38041 @@ -658,6 +660,7 @@ static int uniphier_sd_remove(struct platform_device *pdev)
38043         tmio_mmc_host_remove(host);
38044         uniphier_sd_clk_disable(host);
38045 +       tmio_mmc_host_free(host);
38047         return 0;
38049 diff --git a/drivers/mtd/maps/physmap-bt1-rom.c b/drivers/mtd/maps/physmap-bt1-rom.c
38050 index a35450002284..58782cfaf71c 100644
38051 --- a/drivers/mtd/maps/physmap-bt1-rom.c
38052 +++ b/drivers/mtd/maps/physmap-bt1-rom.c
38053 @@ -79,7 +79,7 @@ static void __xipram bt1_rom_map_copy_from(struct map_info *map,
38054         if (shift) {
38055                 chunk = min_t(ssize_t, 4 - shift, len);
38056                 data = readl_relaxed(src - shift);
38057 -               memcpy(to, &data + shift, chunk);
38058 +               memcpy(to, (char *)&data + shift, chunk);
38059                 src += chunk;
38060                 to += chunk;
38061                 len -= chunk;
38062 diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
38063 index 001ed5deb622..4f63b8430c71 100644
38064 --- a/drivers/mtd/maps/physmap-core.c
38065 +++ b/drivers/mtd/maps/physmap-core.c
38066 @@ -69,8 +69,10 @@ static int physmap_flash_remove(struct platform_device *dev)
38067         int i, err = 0;
38069         info = platform_get_drvdata(dev);
38070 -       if (!info)
38071 +       if (!info) {
38072 +               err = -EINVAL;
38073                 goto out;
38074 +       }
38076         if (info->cmtd) {
38077                 err = mtd_device_unregister(info->cmtd);
38078 diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
38079 index 323035d4f2d0..688de663cabf 100644
38080 --- a/drivers/mtd/mtdchar.c
38081 +++ b/drivers/mtd/mtdchar.c
38082 @@ -651,16 +651,12 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
38083         case MEMGETINFO:
38084         case MEMREADOOB:
38085         case MEMREADOOB64:
38086 -       case MEMLOCK:
38087 -       case MEMUNLOCK:
38088         case MEMISLOCKED:
38089         case MEMGETOOBSEL:
38090         case MEMGETBADBLOCK:
38091 -       case MEMSETBADBLOCK:
38092         case OTPSELECT:
38093         case OTPGETREGIONCOUNT:
38094         case OTPGETREGIONINFO:
38095 -       case OTPLOCK:
38096         case ECCGETLAYOUT:
38097         case ECCGETSTATS:
38098         case MTDFILEMODE:
38099 @@ -671,9 +667,13 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
38100         /* "dangerous" commands */
38101         case MEMERASE:
38102         case MEMERASE64:
38103 +       case MEMLOCK:
38104 +       case MEMUNLOCK:
38105 +       case MEMSETBADBLOCK:
38106         case MEMWRITEOOB:
38107         case MEMWRITEOOB64:
38108         case MEMWRITE:
38109 +       case OTPLOCK:
38110                 if (!(file->f_mode & FMODE_WRITE))
38111                         return -EPERM;
38112                 break;
38113 diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
38114 index 2d6423d89a17..d97ddc65b5d4 100644
38115 --- a/drivers/mtd/mtdcore.c
38116 +++ b/drivers/mtd/mtdcore.c
38117 @@ -820,6 +820,9 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
38119         /* Prefer parsed partitions over driver-provided fallback */
38120         ret = parse_mtd_partitions(mtd, types, parser_data);
38121 +       if (ret == -EPROBE_DEFER)
38122 +               goto out;
38124         if (ret > 0)
38125                 ret = 0;
38126         else if (nr_parts)
38127 diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
38128 index 12ca4f19cb14..665fd9020b76 100644
38129 --- a/drivers/mtd/mtdpart.c
38130 +++ b/drivers/mtd/mtdpart.c
38131 @@ -331,7 +331,7 @@ static int __del_mtd_partitions(struct mtd_info *mtd)
38133         list_for_each_entry_safe(child, next, &mtd->partitions, part.node) {
38134                 if (mtd_has_partitions(child))
38135 -                       del_mtd_partitions(child);
38136 +                       __del_mtd_partitions(child);
38138                 pr_info("Deleting %s MTD partition\n", child->name);
38139                 ret = del_mtd_device(child);
38140 diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
38141 index e6ceec8f50dc..8aab1017b460 100644
38142 --- a/drivers/mtd/nand/raw/atmel/nand-controller.c
38143 +++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
38144 @@ -883,10 +883,12 @@ static int atmel_nand_pmecc_correct_data(struct nand_chip *chip, void *buf,
38145                                                           NULL, 0,
38146                                                           chip->ecc.strength);
38148 -               if (ret >= 0)
38149 +               if (ret >= 0) {
38150 +                       mtd->ecc_stats.corrected += ret;
38151                         max_bitflips = max(ret, max_bitflips);
38152 -               else
38153 +               } else {
38154                         mtd->ecc_stats.failed++;
38155 +               }
38157                 databuf += chip->ecc.size;
38158                 eccbuf += chip->ecc.bytes;
38159 diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
38160 index 659eaa6f0980..5ff4291380c5 100644
38161 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
38162 +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
38163 @@ -2688,6 +2688,12 @@ static int brcmnand_attach_chip(struct nand_chip *chip)
38165         ret = brcmstb_choose_ecc_layout(host);
38167 +       /* If OOB is written with ECC enabled it will cause ECC errors */
38168 +       if (is_hamming_ecc(host->ctrl, &host->hwcfg)) {
38169 +               chip->ecc.write_oob = brcmnand_write_oob_raw;
38170 +               chip->ecc.read_oob = brcmnand_read_oob_raw;
38171 +       }
38173         return ret;
38176 diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
38177 index 0101c0fab50a..a24e2f57fa68 100644
38178 --- a/drivers/mtd/nand/raw/fsmc_nand.c
38179 +++ b/drivers/mtd/nand/raw/fsmc_nand.c
38180 @@ -1077,11 +1077,13 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
38181                 host->read_dma_chan = dma_request_channel(mask, filter, NULL);
38182                 if (!host->read_dma_chan) {
38183                         dev_err(&pdev->dev, "Unable to get read dma channel\n");
38184 +                       ret = -ENODEV;
38185                         goto disable_clk;
38186                 }
38187                 host->write_dma_chan = dma_request_channel(mask, filter, NULL);
38188                 if (!host->write_dma_chan) {
38189                         dev_err(&pdev->dev, "Unable to get write dma channel\n");
38190 +                       ret = -ENODEV;
38191                         goto release_dma_read_chan;
38192                 }
38193         }
38194 diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
38195 index 3fa8c22d3f36..4d08e4ab5c1b 100644
38196 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
38197 +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
38198 @@ -2449,7 +2449,7 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
38199         this->bch_geometry.auxiliary_size = 128;
38200         ret = gpmi_alloc_dma_buffer(this);
38201         if (ret)
38202 -               goto err_out;
38203 +               return ret;
38205         nand_controller_init(&this->base);
38206         this->base.ops = &gpmi_nand_controller_ops;
38207 diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
38208 index fd4c318b520f..87c23bb320bf 100644
38209 --- a/drivers/mtd/nand/raw/qcom_nandc.c
38210 +++ b/drivers/mtd/nand/raw/qcom_nandc.c
38211 @@ -2898,7 +2898,7 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
38212         struct device *dev = nandc->dev;
38213         struct device_node *dn = dev->of_node, *child;
38214         struct qcom_nand_host *host;
38215 -       int ret;
38216 +       int ret = -ENODEV;
38218         for_each_available_child_of_node(dn, child) {
38219                 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
38220 @@ -2916,10 +2916,7 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
38221                 list_add_tail(&host->node, &nandc->host_list);
38222         }
38224 -       if (list_empty(&nandc->host_list))
38225 -               return -ENODEV;
38227 -       return 0;
38228 +       return ret;
38231  /* parse custom DT properties here */
38232 diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
38233 index 61d932c1b718..17f63f95f4a2 100644
38234 --- a/drivers/mtd/nand/spi/core.c
38235 +++ b/drivers/mtd/nand/spi/core.c
38236 @@ -1263,12 +1263,14 @@ static const struct spi_device_id spinand_ids[] = {
38237         { .name = "spi-nand" },
38238         { /* sentinel */ },
38239  };
38240 +MODULE_DEVICE_TABLE(spi, spinand_ids);
38242  #ifdef CONFIG_OF
38243  static const struct of_device_id spinand_of_ids[] = {
38244         { .compatible = "spi-nand" },
38245         { /* sentinel */ },
38246  };
38247 +MODULE_DEVICE_TABLE(of, spinand_of_ids);
38248  #endif
38250  static struct spi_mem_driver spinand_drv = {
38251 diff --git a/drivers/mtd/parsers/qcomsmempart.c b/drivers/mtd/parsers/qcomsmempart.c
38252 index 808cb33d71f8..d9083308f6ba 100644
38253 --- a/drivers/mtd/parsers/qcomsmempart.c
38254 +++ b/drivers/mtd/parsers/qcomsmempart.c
38255 @@ -65,6 +65,13 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
38256         int ret, i, numparts;
38257         char *name, *c;
38259 +       if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS)
38260 +                       && mtd->type == MTD_NORFLASH) {
38261 +               pr_err("%s: SMEM partition parser is incompatible with 4K sectors\n",
38262 +                               mtd->name);
38263 +               return -EINVAL;
38264 +       }
38266         pr_debug("Parsing partition table info from SMEM\n");
38267         ptable = qcom_smem_get(SMEM_APPS, SMEM_AARM_PARTITION_TABLE, &len);
38268         if (IS_ERR(ptable)) {
38269 @@ -104,7 +111,7 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
38270          * complete partition table
38271          */
38272         ptable = qcom_smem_get(SMEM_APPS, SMEM_AARM_PARTITION_TABLE, &len);
38273 -       if (IS_ERR_OR_NULL(ptable)) {
38274 +       if (IS_ERR(ptable)) {
38275                 pr_err("Error reading partition table\n");
38276                 return PTR_ERR(ptable);
38277         }
38278 diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
38279 index 0522304f52fa..72bc1342c3ff 100644
38280 --- a/drivers/mtd/spi-nor/core.c
38281 +++ b/drivers/mtd/spi-nor/core.c
38282 @@ -3301,6 +3301,37 @@ static void spi_nor_resume(struct mtd_info *mtd)
38283                 dev_err(dev, "resume() failed\n");
38286 +static int spi_nor_get_device(struct mtd_info *mtd)
38288 +       struct mtd_info *master = mtd_get_master(mtd);
38289 +       struct spi_nor *nor = mtd_to_spi_nor(master);
38290 +       struct device *dev;
38292 +       if (nor->spimem)
38293 +               dev = nor->spimem->spi->controller->dev.parent;
38294 +       else
38295 +               dev = nor->dev;
38297 +       if (!try_module_get(dev->driver->owner))
38298 +               return -ENODEV;
38300 +       return 0;
38303 +static void spi_nor_put_device(struct mtd_info *mtd)
38305 +       struct mtd_info *master = mtd_get_master(mtd);
38306 +       struct spi_nor *nor = mtd_to_spi_nor(master);
38307 +       struct device *dev;
38309 +       if (nor->spimem)
38310 +               dev = nor->spimem->spi->controller->dev.parent;
38311 +       else
38312 +               dev = nor->dev;
38314 +       module_put(dev->driver->owner);
38317  void spi_nor_restore(struct spi_nor *nor)
38319         /* restore the addressing mode */
38320 @@ -3495,6 +3526,8 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
38321         mtd->_read = spi_nor_read;
38322         mtd->_suspend = spi_nor_suspend;
38323         mtd->_resume = spi_nor_resume;
38324 +       mtd->_get_device = spi_nor_get_device;
38325 +       mtd->_put_device = spi_nor_put_device;
38327         if (nor->params->locking_ops) {
38328                 mtd->_lock = spi_nor_lock;
38329 diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
38330 index 9203abaac229..662b212787d4 100644
38331 --- a/drivers/mtd/spi-nor/macronix.c
38332 +++ b/drivers/mtd/spi-nor/macronix.c
38333 @@ -73,9 +73,6 @@ static const struct flash_info macronix_parts[] = {
38334                               SECT_4K | SPI_NOR_DUAL_READ |
38335                               SPI_NOR_QUAD_READ) },
38336         { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
38337 -       { "mx25l51245g", INFO(0xc2201a, 0, 64 * 1024, 1024,
38338 -                             SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
38339 -                             SPI_NOR_4B_OPCODES) },
38340         { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024,
38341                               SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
38342                               SPI_NOR_4B_OPCODES) },
38343 diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
38344 index 3d63b15bbaa1..164071e9d457 100644
38345 --- a/drivers/net/caif/caif_hsi.c
38346 +++ b/drivers/net/caif/caif_hsi.c
38347 @@ -924,7 +924,7 @@ static void cfhsi_wake_down(struct work_struct *work)
38348                         break;
38350                 set_current_state(TASK_INTERRUPTIBLE);
38351 -               schedule_timeout(1);
38352 +               schedule_min_hrtimeout();
38353                 retry--;
38354         }
38356 diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c
38357 index 6a64fe410987..c3508109263e 100644
38358 --- a/drivers/net/can/dev/skb.c
38359 +++ b/drivers/net/can/dev/skb.c
38360 @@ -151,7 +151,11 @@ void can_free_echo_skb(struct net_device *dev, unsigned int idx)
38362         struct can_priv *priv = netdev_priv(dev);
38364 -       BUG_ON(idx >= priv->echo_skb_max);
38365 +       if (idx >= priv->echo_skb_max) {
38366 +               netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
38367 +                          __func__, idx, priv->echo_skb_max);
38368 +               return;
38369 +       }
38371         if (priv->echo_skb[idx]) {
38372                 dev_kfree_skb_any(priv->echo_skb[idx]);
38373 diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
38374 index 0c8d36bc668c..f71127229caf 100644
38375 --- a/drivers/net/can/m_can/m_can.c
38376 +++ b/drivers/net/can/m_can/m_can.c
38377 @@ -1455,6 +1455,8 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
38378         int i;
38379         int putidx;
38381 +       cdev->tx_skb = NULL;
38383         /* Generate ID field for TX buffer Element */
38384         /* Common to all supported M_CAN versions */
38385         if (cf->can_id & CAN_EFF_FLAG) {
38386 @@ -1571,7 +1573,6 @@ static void m_can_tx_work_queue(struct work_struct *ws)
38387                                                    tx_work);
38389         m_can_tx_handler(cdev);
38390 -       cdev->tx_skb = NULL;
38393  static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
38394 diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
38395 index a57da43680d8..bd7d0251be10 100644
38396 --- a/drivers/net/can/spi/mcp251x.c
38397 +++ b/drivers/net/can/spi/mcp251x.c
38398 @@ -956,8 +956,6 @@ static int mcp251x_stop(struct net_device *net)
38400         priv->force_quit = 1;
38401         free_irq(spi->irq, priv);
38402 -       destroy_workqueue(priv->wq);
38403 -       priv->wq = NULL;
38405         mutex_lock(&priv->mcp_lock);
38407 @@ -1224,24 +1222,15 @@ static int mcp251x_open(struct net_device *net)
38408                 goto out_close;
38409         }
38411 -       priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
38412 -                                  0);
38413 -       if (!priv->wq) {
38414 -               ret = -ENOMEM;
38415 -               goto out_clean;
38416 -       }
38417 -       INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
38418 -       INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
38420         ret = mcp251x_hw_wake(spi);
38421         if (ret)
38422 -               goto out_free_wq;
38423 +               goto out_free_irq;
38424         ret = mcp251x_setup(net, spi);
38425         if (ret)
38426 -               goto out_free_wq;
38427 +               goto out_free_irq;
38428         ret = mcp251x_set_normal_mode(spi);
38429         if (ret)
38430 -               goto out_free_wq;
38431 +               goto out_free_irq;
38433         can_led_event(net, CAN_LED_EVENT_OPEN);
38435 @@ -1250,9 +1239,7 @@ static int mcp251x_open(struct net_device *net)
38437         return 0;
38439 -out_free_wq:
38440 -       destroy_workqueue(priv->wq);
38441 -out_clean:
38442 +out_free_irq:
38443         free_irq(spi->irq, priv);
38444         mcp251x_hw_sleep(spi);
38445  out_close:
38446 @@ -1373,6 +1360,15 @@ static int mcp251x_can_probe(struct spi_device *spi)
38447         if (ret)
38448                 goto out_clk;
38450 +       priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
38451 +                                  0);
38452 +       if (!priv->wq) {
38453 +               ret = -ENOMEM;
38454 +               goto out_clk;
38455 +       }
38456 +       INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
38457 +       INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
38459         priv->spi = spi;
38460         mutex_init(&priv->mcp_lock);
38462 @@ -1417,6 +1413,8 @@ static int mcp251x_can_probe(struct spi_device *spi)
38463         return 0;
38465  error_probe:
38466 +       destroy_workqueue(priv->wq);
38467 +       priv->wq = NULL;
38468         mcp251x_power_enable(priv->power, 0);
38470  out_clk:
38471 @@ -1438,6 +1436,9 @@ static int mcp251x_can_remove(struct spi_device *spi)
38473         mcp251x_power_enable(priv->power, 0);
38475 +       destroy_workqueue(priv->wq);
38476 +       priv->wq = NULL;
38478         clk_disable_unprepare(priv->clk);
38480         free_candev(net);
38481 diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
38482 index 799e9d5d3481..4a742aa5c417 100644
38483 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
38484 +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
38485 @@ -2856,8 +2856,8 @@ static int mcp251xfd_probe(struct spi_device *spi)
38487         clk = devm_clk_get(&spi->dev, NULL);
38488         if (IS_ERR(clk))
38489 -               dev_err_probe(&spi->dev, PTR_ERR(clk),
38490 -                             "Failed to get Oscillator (clock)!\n");
38491 +               return dev_err_probe(&spi->dev, PTR_ERR(clk),
38492 +                                    "Failed to get Oscillator (clock)!\n");
38493         freq = clk_get_rate(clk);
38495         /* Sanity check */
38496 @@ -2957,10 +2957,12 @@ static int mcp251xfd_probe(struct spi_device *spi)
38498         err = mcp251xfd_register(priv);
38499         if (err)
38500 -               goto out_free_candev;
38501 +               goto out_can_rx_offload_del;
38503         return 0;
38505 + out_can_rx_offload_del:
38506 +       can_rx_offload_del(&priv->offload);
38507   out_free_candev:
38508         spi->max_speed_hz = priv->spi_max_speed_hz_orig;
38510 diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
38511 index e393e8457d77..4274f78682d9 100644
38512 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c
38513 +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
38514 @@ -288,7 +288,7 @@ static int pcan_usb_write_mode(struct peak_usb_device *dev, u8 onoff)
38515         } else {
38516                 /* the PCAN-USB needs time to init */
38517                 set_current_state(TASK_INTERRUPTIBLE);
38518 -               schedule_timeout(msecs_to_jiffies(PCAN_USB_STARTUP_TIMEOUT));
38519 +               schedule_msec_hrtimeout((PCAN_USB_STARTUP_TIMEOUT));
38520         }
38522         return err;
38523 diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
38524 index ba5d546d06aa..9c86cacc4a72 100644
38525 --- a/drivers/net/dsa/bcm_sf2.c
38526 +++ b/drivers/net/dsa/bcm_sf2.c
38527 @@ -32,6 +32,36 @@
38528  #include "b53/b53_priv.h"
38529  #include "b53/b53_regs.h"
38531 +static u16 bcm_sf2_reg_rgmii_cntrl(struct bcm_sf2_priv *priv, int port)
38533 +       switch (priv->type) {
38534 +       case BCM4908_DEVICE_ID:
38535 +               switch (port) {
38536 +               case 7:
38537 +                       return REG_RGMII_11_CNTRL;
38538 +               default:
38539 +                       break;
38540 +               }
38541 +               break;
38542 +       default:
38543 +               switch (port) {
38544 +               case 0:
38545 +                       return REG_RGMII_0_CNTRL;
38546 +               case 1:
38547 +                       return REG_RGMII_1_CNTRL;
38548 +               case 2:
38549 +                       return REG_RGMII_2_CNTRL;
38550 +               default:
38551 +                       break;
38552 +               }
38553 +       }
38555 +       WARN_ONCE(1, "Unsupported port %d\n", port);
38557 +       /* RO fallback reg */
38558 +       return REG_SWITCH_STATUS;
38561  /* Return the number of active ports, not counting the IMP (CPU) port */
38562  static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
38564 @@ -647,6 +677,7 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
38566         struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
38567         u32 id_mode_dis = 0, port_mode;
38568 +       u32 reg_rgmii_ctrl;
38569         u32 reg;
38571         if (port == core_readl(priv, CORE_IMP0_PRT_ID))
38572 @@ -670,10 +701,12 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
38573                 return;
38574         }
38576 +       reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
38578         /* Clear id_mode_dis bit, and the existing port mode, let
38579          * RGMII_MODE_EN bet set by mac_link_{up,down}
38580          */
38581 -       reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
38582 +       reg = reg_readl(priv, reg_rgmii_ctrl);
38583         reg &= ~ID_MODE_DIS;
38584         reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
38586 @@ -681,13 +714,14 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
38587         if (id_mode_dis)
38588                 reg |= ID_MODE_DIS;
38590 -       reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
38591 +       reg_writel(priv, reg, reg_rgmii_ctrl);
38594  static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port,
38595                                     phy_interface_t interface, bool link)
38597         struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
38598 +       u32 reg_rgmii_ctrl;
38599         u32 reg;
38601         if (!phy_interface_mode_is_rgmii(interface) &&
38602 @@ -695,13 +729,15 @@ static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port,
38603             interface != PHY_INTERFACE_MODE_REVMII)
38604                 return;
38606 +       reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
38608         /* If the link is down, just disable the interface to conserve power */
38609 -       reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
38610 +       reg = reg_readl(priv, reg_rgmii_ctrl);
38611         if (link)
38612                 reg |= RGMII_MODE_EN;
38613         else
38614                 reg &= ~RGMII_MODE_EN;
38615 -       reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
38616 +       reg_writel(priv, reg, reg_rgmii_ctrl);
38619  static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
38620 @@ -735,11 +771,15 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
38622         struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
38623         struct ethtool_eee *p = &priv->dev->ports[port].eee;
38624 -       u32 reg, offset;
38626         bcm_sf2_sw_mac_link_set(ds, port, interface, true);
38628         if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
38629 +               u32 reg_rgmii_ctrl;
38630 +               u32 reg, offset;
38632 +               reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
38634                 if (priv->type == BCM4908_DEVICE_ID ||
38635                     priv->type == BCM7445_DEVICE_ID)
38636                         offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
38637 @@ -750,7 +790,7 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
38638                     interface == PHY_INTERFACE_MODE_RGMII_TXID ||
38639                     interface == PHY_INTERFACE_MODE_MII ||
38640                     interface == PHY_INTERFACE_MODE_REVMII) {
38641 -                       reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
38642 +                       reg = reg_readl(priv, reg_rgmii_ctrl);
38643                         reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
38645                         if (tx_pause)
38646 @@ -758,7 +798,7 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
38647                         if (rx_pause)
38648                                 reg |= RX_PAUSE_EN;
38650 -                       reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
38651 +                       reg_writel(priv, reg, reg_rgmii_ctrl);
38652                 }
38654                 reg = SW_OVERRIDE | LINK_STS;
38655 @@ -1144,9 +1184,7 @@ static const u16 bcm_sf2_4908_reg_offsets[] = {
38656         [REG_PHY_REVISION]      = 0x14,
38657         [REG_SPHY_CNTRL]        = 0x24,
38658         [REG_CROSSBAR]          = 0xc8,
38659 -       [REG_RGMII_0_CNTRL]     = 0xe0,
38660 -       [REG_RGMII_1_CNTRL]     = 0xec,
38661 -       [REG_RGMII_2_CNTRL]     = 0xf8,
38662 +       [REG_RGMII_11_CNTRL]    = 0x014c,
38663         [REG_LED_0_CNTRL]       = 0x40,
38664         [REG_LED_1_CNTRL]       = 0x4c,
38665         [REG_LED_2_CNTRL]       = 0x58,
38666 diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
38667 index 1d2d55c9f8aa..9e141d1a0b07 100644
38668 --- a/drivers/net/dsa/bcm_sf2_regs.h
38669 +++ b/drivers/net/dsa/bcm_sf2_regs.h
38670 @@ -21,6 +21,7 @@ enum bcm_sf2_reg_offs {
38671         REG_RGMII_0_CNTRL,
38672         REG_RGMII_1_CNTRL,
38673         REG_RGMII_2_CNTRL,
38674 +       REG_RGMII_11_CNTRL,
38675         REG_LED_0_CNTRL,
38676         REG_LED_1_CNTRL,
38677         REG_LED_2_CNTRL,
38678 @@ -48,8 +49,6 @@ enum bcm_sf2_reg_offs {
38679  #define  PHY_PHYAD_SHIFT               8
38680  #define  PHY_PHYAD_MASK                        0x1F
38682 -#define REG_RGMII_CNTRL_P(x)           (REG_RGMII_0_CNTRL + (x))
38684  /* Relative to REG_RGMII_CNTRL */
38685  #define  RGMII_MODE_EN                 (1 << 0)
38686  #define  ID_MODE_DIS                   (1 << 1)
38687 diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
38688 index 21953d6d484c..ada7a38d4d31 100644
38689 --- a/drivers/net/dsa/mv88e6xxx/devlink.c
38690 +++ b/drivers/net/dsa/mv88e6xxx/devlink.c
38691 @@ -678,7 +678,7 @@ static int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds,
38692                                 sizeof(struct mv88e6xxx_devlink_atu_entry);
38693                         break;
38694                 case MV88E6XXX_REGION_VTU:
38695 -                       size = mv88e6xxx_max_vid(chip) *
38696 +                       size = (mv88e6xxx_max_vid(chip) + 1) *
38697                                 sizeof(struct mv88e6xxx_devlink_vtu_entry);
38698                         break;
38699                 }
38700 diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
38701 index 3195936dc5be..2ce04fef698d 100644
38702 --- a/drivers/net/dsa/mv88e6xxx/serdes.c
38703 +++ b/drivers/net/dsa/mv88e6xxx/serdes.c
38704 @@ -443,15 +443,15 @@ int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
38705  u8 mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
38707         /* There are no configurable serdes lanes on this switch chip but we
38708 -        * need to return non-zero so that callers of
38709 +        * need to return a non-negative lane number so that callers of
38710          * mv88e6xxx_serdes_get_lane() know this is a serdes port.
38711          */
38712         switch (chip->ports[port].cmode) {
38713         case MV88E6185_PORT_STS_CMODE_SERDES:
38714         case MV88E6185_PORT_STS_CMODE_1000BASE_X:
38715 -               return 0xff;
38716 -       default:
38717                 return 0;
38718 +       default:
38719 +               return -ENODEV;
38720         }
38723 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
38724 index b53a0d87371a..cf4249d59383 100644
38725 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
38726 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
38727 @@ -122,7 +122,10 @@ enum board_idx {
38728         NETXTREME_E_VF,
38729         NETXTREME_C_VF,
38730         NETXTREME_S_VF,
38731 +       NETXTREME_C_VF_HV,
38732 +       NETXTREME_E_VF_HV,
38733         NETXTREME_E_P5_VF,
38734 +       NETXTREME_E_P5_VF_HV,
38735  };
38737  /* indexed by enum above */
38738 @@ -170,7 +173,10 @@ static const struct {
38739         [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
38740         [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
38741         [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
38742 +       [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
38743 +       [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
38744         [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
38745 +       [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
38746  };
38748  static const struct pci_device_id bnxt_pci_tbl[] = {
38749 @@ -222,15 +228,25 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
38750         { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
38751  #ifdef CONFIG_BNXT_SRIOV
38752         { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
38753 +       { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
38754 +       { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
38755         { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
38756 +       { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
38757         { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
38758 +       { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
38759 +       { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
38760 +       { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
38761 +       { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
38762         { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
38763         { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
38764         { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
38765         { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
38766         { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
38767 +       { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
38768         { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
38769         { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
38770 +       { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
38771 +       { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
38772         { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
38773  #endif
38774         { 0 }
38775 @@ -265,7 +281,8 @@ static struct workqueue_struct *bnxt_pf_wq;
38776  static bool bnxt_vf_pciid(enum board_idx idx)
38778         return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
38779 -               idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
38780 +               idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
38781 +               idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF);
38784  #define DB_CP_REARM_FLAGS      (DB_KEY_CP | DB_IDX_VALID)
38785 @@ -1732,14 +1749,16 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
38787         cons = rxcmp->rx_cmp_opaque;
38788         if (unlikely(cons != rxr->rx_next_cons)) {
38789 -               int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
38790 +               int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
38792                 /* 0xffff is forced error, don't print it */
38793                 if (rxr->rx_next_cons != 0xffff)
38794                         netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
38795                                     cons, rxr->rx_next_cons);
38796                 bnxt_sched_reset(bp, rxr);
38797 -               return rc1;
38798 +               if (rc1)
38799 +                       return rc1;
38800 +               goto next_rx_no_prod_no_len;
38801         }
38802         rx_buf = &rxr->rx_buf_ring[cons];
38803         data = rx_buf->data;
38804 @@ -9736,7 +9755,9 @@ static ssize_t bnxt_show_temp(struct device *dev,
38805         if (!rc)
38806                 len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
38807         mutex_unlock(&bp->hwrm_cmd_lock);
38808 -       return rc ?: len;
38809 +       if (rc)
38810 +               return rc;
38811 +       return len;
38813  static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
38815 diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
38816 index e6d4ad99cc38..3f1c189646f4 100644
38817 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
38818 +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
38819 @@ -521,7 +521,7 @@
38820  #define    CN23XX_BAR1_INDEX_OFFSET                3
38822  #define    CN23XX_PEM_BAR1_INDEX_REG(port, idx)                \
38823 -               (CN23XX_PEM_BAR1_INDEX_START + ((port) << CN23XX_PEM_OFFSET) + \
38824 +               (CN23XX_PEM_BAR1_INDEX_START + (((u64)port) << CN23XX_PEM_OFFSET) + \
38825                  ((idx) << CN23XX_BAR1_INDEX_OFFSET))
38827  /*############################ DPI #########################*/
38828 diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
38829 index f782e6af45e9..50bbe79fb93d 100644
38830 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
38831 +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
38832 @@ -776,7 +776,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
38833         mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
38834         mbx.rq.qs_num = qs->vnic_id;
38835         mbx.rq.rq_num = qidx;
38836 -       mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
38837 +       mbx.rq.cfg = ((u64)rq->caching << 26) | (rq->cq_qs << 19) |
38838                           (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
38839                           (rq->cont_qs_rbdr_idx << 8) |
38840                           (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
38841 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
38842 index 83b46440408b..bde8494215c4 100644
38843 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
38844 +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
38845 @@ -174,31 +174,31 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
38846                                       WORD_MASK, f->fs.nat_lip[15] |
38847                                       f->fs.nat_lip[14] << 8 |
38848                                       f->fs.nat_lip[13] << 16 |
38849 -                                     f->fs.nat_lip[12] << 24, 1);
38850 +                                     (u64)f->fs.nat_lip[12] << 24, 1);
38852                         set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 1,
38853                                       WORD_MASK, f->fs.nat_lip[11] |
38854                                       f->fs.nat_lip[10] << 8 |
38855                                       f->fs.nat_lip[9] << 16 |
38856 -                                     f->fs.nat_lip[8] << 24, 1);
38857 +                                     (u64)f->fs.nat_lip[8] << 24, 1);
38859                         set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 2,
38860                                       WORD_MASK, f->fs.nat_lip[7] |
38861                                       f->fs.nat_lip[6] << 8 |
38862                                       f->fs.nat_lip[5] << 16 |
38863 -                                     f->fs.nat_lip[4] << 24, 1);
38864 +                                     (u64)f->fs.nat_lip[4] << 24, 1);
38866                         set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 3,
38867                                       WORD_MASK, f->fs.nat_lip[3] |
38868                                       f->fs.nat_lip[2] << 8 |
38869                                       f->fs.nat_lip[1] << 16 |
38870 -                                     f->fs.nat_lip[0] << 24, 1);
38871 +                                     (u64)f->fs.nat_lip[0] << 24, 1);
38872                 } else {
38873                         set_tcb_field(adap, f, tid, TCB_RX_FRAG3_LEN_RAW_W,
38874                                       WORD_MASK, f->fs.nat_lip[3] |
38875                                       f->fs.nat_lip[2] << 8 |
38876                                       f->fs.nat_lip[1] << 16 |
38877 -                                     f->fs.nat_lip[0] << 24, 1);
38878 +                                     (u64)f->fs.nat_lip[0] << 25, 1);
38879                 }
38880         }
38882 @@ -208,25 +208,25 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
38883                                       WORD_MASK, f->fs.nat_fip[15] |
38884                                       f->fs.nat_fip[14] << 8 |
38885                                       f->fs.nat_fip[13] << 16 |
38886 -                                     f->fs.nat_fip[12] << 24, 1);
38887 +                                     (u64)f->fs.nat_fip[12] << 24, 1);
38889                         set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 1,
38890                                       WORD_MASK, f->fs.nat_fip[11] |
38891                                       f->fs.nat_fip[10] << 8 |
38892                                       f->fs.nat_fip[9] << 16 |
38893 -                                     f->fs.nat_fip[8] << 24, 1);
38894 +                                     (u64)f->fs.nat_fip[8] << 24, 1);
38896                         set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 2,
38897                                       WORD_MASK, f->fs.nat_fip[7] |
38898                                       f->fs.nat_fip[6] << 8 |
38899                                       f->fs.nat_fip[5] << 16 |
38900 -                                     f->fs.nat_fip[4] << 24, 1);
38901 +                                     (u64)f->fs.nat_fip[4] << 24, 1);
38903                         set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 3,
38904                                       WORD_MASK, f->fs.nat_fip[3] |
38905                                       f->fs.nat_fip[2] << 8 |
38906                                       f->fs.nat_fip[1] << 16 |
38907 -                                     f->fs.nat_fip[0] << 24, 1);
38908 +                                     (u64)f->fs.nat_fip[0] << 24, 1);
38910                 } else {
38911                         set_tcb_field(adap, f, tid,
38912 @@ -234,13 +234,13 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
38913                                       WORD_MASK, f->fs.nat_fip[3] |
38914                                       f->fs.nat_fip[2] << 8 |
38915                                       f->fs.nat_fip[1] << 16 |
38916 -                                     f->fs.nat_fip[0] << 24, 1);
38917 +                                     (u64)f->fs.nat_fip[0] << 24, 1);
38918                 }
38919         }
38921         set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK,
38922                       (dp ? (nat_lp[1] | nat_lp[0] << 8) : 0) |
38923 -                     (sp ? (nat_fp[1] << 16 | nat_fp[0] << 24) : 0),
38924 +                     (sp ? (nat_fp[1] << 16 | (u64)nat_fp[0] << 24) : 0),
38925                       1);
38928 diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
38929 index 256fae15e032..1e5f2edb70cf 100644
38930 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
38931 +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
38932 @@ -2563,12 +2563,12 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
38933         spin_lock_bh(&eosw_txq->lock);
38934         if (tc != FW_SCHED_CLS_NONE) {
38935                 if (eosw_txq->state != CXGB4_EO_STATE_CLOSED)
38936 -                       goto out_unlock;
38937 +                       goto out_free_skb;
38939                 next_state = CXGB4_EO_STATE_FLOWC_OPEN_SEND;
38940         } else {
38941                 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE)
38942 -                       goto out_unlock;
38943 +                       goto out_free_skb;
38945                 next_state = CXGB4_EO_STATE_FLOWC_CLOSE_SEND;
38946         }
38947 @@ -2604,17 +2604,19 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
38948                 eosw_txq_flush_pending_skbs(eosw_txq);
38950         ret = eosw_txq_enqueue(eosw_txq, skb);
38951 -       if (ret) {
38952 -               dev_consume_skb_any(skb);
38953 -               goto out_unlock;
38954 -       }
38955 +       if (ret)
38956 +               goto out_free_skb;
38958         eosw_txq->state = next_state;
38959         eosw_txq->flowc_idx = eosw_txq->pidx;
38960         eosw_txq_advance(eosw_txq, 1);
38961         ethofld_xmit(dev, eosw_txq);
38963 -out_unlock:
38964 +       spin_unlock_bh(&eosw_txq->lock);
38965 +       return 0;
38967 +out_free_skb:
38968 +       dev_consume_skb_any(skb);
38969         spin_unlock_bh(&eosw_txq->lock);
38970         return ret;
38972 diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
38973 index f04ec53544ae..b1443ff439de 100644
38974 --- a/drivers/net/ethernet/cisco/enic/enic_main.c
38975 +++ b/drivers/net/ethernet/cisco/enic/enic_main.c
38976 @@ -768,7 +768,7 @@ static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq,
38977         return err;
38980 -static inline void enic_queue_wq_skb(struct enic *enic,
38981 +static inline int enic_queue_wq_skb(struct enic *enic,
38982         struct vnic_wq *wq, struct sk_buff *skb)
38984         unsigned int mss = skb_shinfo(skb)->gso_size;
38985 @@ -814,6 +814,7 @@ static inline void enic_queue_wq_skb(struct enic *enic,
38986                 wq->to_use = buf->next;
38987                 dev_kfree_skb(skb);
38988         }
38989 +       return err;
38992  /* netif_tx_lock held, process context with BHs disabled, or BH */
38993 @@ -857,7 +858,8 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
38994                 return NETDEV_TX_BUSY;
38995         }
38997 -       enic_queue_wq_skb(enic, wq, skb);
38998 +       if (enic_queue_wq_skb(enic, wq, skb))
38999 +               goto error;
39001         if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
39002                 netif_tx_stop_queue(txq);
39003 @@ -865,6 +867,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
39004         if (!netdev_xmit_more() || netif_xmit_stopped(txq))
39005                 vnic_wq_doorbell(wq);
39007 +error:
39008         spin_unlock(&enic->wq_lock[txq_map]);
39010         return NETDEV_TX_OK;
39011 diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
39012 index 67c436400352..de7b31842233 100644
39013 --- a/drivers/net/ethernet/freescale/Makefile
39014 +++ b/drivers/net/ethernet/freescale/Makefile
39015 @@ -24,6 +24,4 @@ obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/
39017  obj-$(CONFIG_FSL_DPAA2_ETH) += dpaa2/
39019 -obj-$(CONFIG_FSL_ENETC) += enetc/
39020 -obj-$(CONFIG_FSL_ENETC_MDIO) += enetc/
39021 -obj-$(CONFIG_FSL_ENETC_VF) += enetc/
39022 +obj-y += enetc/
39023 diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
39024 index 3db882322b2b..70aea9c274fe 100644
39025 --- a/drivers/net/ethernet/freescale/fec_main.c
39026 +++ b/drivers/net/ethernet/freescale/fec_main.c
39027 @@ -2048,6 +2048,8 @@ static int fec_enet_mii_probe(struct net_device *ndev)
39028         fep->link = 0;
39029         fep->full_duplex = 0;
39031 +       phy_dev->mac_managed_pm = 1;
39033         phy_attached_info(phy_dev);
39035         return 0;
39036 @@ -3864,6 +3866,7 @@ static int __maybe_unused fec_resume(struct device *dev)
39037                 netif_device_attach(ndev);
39038                 netif_tx_unlock_bh(ndev);
39039                 napi_enable(&fep->napi);
39040 +               phy_init_hw(ndev->phydev);
39041                 phy_start(ndev->phydev);
39042         }
39043         rtnl_unlock();
39044 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
39045 index bf4302a5cf95..0f70158c2551 100644
39046 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
39047 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
39048 @@ -576,8 +576,8 @@ static int hns3_nic_net_stop(struct net_device *netdev)
39049         if (h->ae_algo->ops->set_timer_task)
39050                 h->ae_algo->ops->set_timer_task(priv->ae_handle, false);
39052 -       netif_tx_stop_all_queues(netdev);
39053         netif_carrier_off(netdev);
39054 +       netif_tx_disable(netdev);
39056         hns3_nic_net_down(netdev);
39058 @@ -823,7 +823,7 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
39059   * and it is udp packet, which has a dest port as the IANA assigned.
39060   * the hardware is expected to do the checksum offload, but the
39061   * hardware will not do the checksum offload when udp dest port is
39062 - * 4789 or 6081.
39063 + * 4789, 4790 or 6081.
39064   */
39065  static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
39067 @@ -841,7 +841,8 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
39069         if (!(!skb->encapsulation &&
39070               (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) ||
39071 -             l4.udp->dest == htons(GENEVE_UDP_PORT))))
39072 +             l4.udp->dest == htons(GENEVE_UDP_PORT) ||
39073 +             l4.udp->dest == htons(4790))))
39074                 return false;
39076         skb_checksum_help(skb);
39077 @@ -1277,23 +1278,21 @@ static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size,
39080  static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size,
39081 -                                  u8 max_non_tso_bd_num)
39082 +                                  u8 max_non_tso_bd_num, unsigned int bd_num,
39083 +                                  unsigned int recursion_level)
39085 +#define HNS3_MAX_RECURSION_LEVEL       24
39087         struct sk_buff *frag_skb;
39088 -       unsigned int bd_num = 0;
39090         /* If the total len is within the max bd limit */
39091 -       if (likely(skb->len <= HNS3_MAX_BD_SIZE && !skb_has_frag_list(skb) &&
39092 +       if (likely(skb->len <= HNS3_MAX_BD_SIZE && !recursion_level &&
39093 +                  !skb_has_frag_list(skb) &&
39094                    skb_shinfo(skb)->nr_frags < max_non_tso_bd_num))
39095                 return skb_shinfo(skb)->nr_frags + 1U;
39097 -       /* The below case will always be linearized, return
39098 -        * HNS3_MAX_BD_NUM_TSO + 1U to make sure it is linearized.
39099 -        */
39100 -       if (unlikely(skb->len > HNS3_MAX_TSO_SIZE ||
39101 -                    (!skb_is_gso(skb) && skb->len >
39102 -                     HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))))
39103 -               return HNS3_MAX_TSO_BD_NUM + 1U;
39104 +       if (unlikely(recursion_level >= HNS3_MAX_RECURSION_LEVEL))
39105 +               return UINT_MAX;
39107         bd_num = hns3_skb_bd_num(skb, bd_size, bd_num);
39109 @@ -1301,7 +1300,8 @@ static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size,
39110                 return bd_num;
39112         skb_walk_frags(skb, frag_skb) {
39113 -               bd_num = hns3_skb_bd_num(frag_skb, bd_size, bd_num);
39114 +               bd_num = hns3_tx_bd_num(frag_skb, bd_size, max_non_tso_bd_num,
39115 +                                       bd_num, recursion_level + 1);
39116                 if (bd_num > HNS3_MAX_TSO_BD_NUM)
39117                         return bd_num;
39118         }
39119 @@ -1361,6 +1361,43 @@ void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
39120                 size[i] = skb_frag_size(&shinfo->frags[i]);
39123 +static int hns3_skb_linearize(struct hns3_enet_ring *ring,
39124 +                             struct sk_buff *skb,
39125 +                             u8 max_non_tso_bd_num,
39126 +                             unsigned int bd_num)
39128 +       /* 'bd_num == UINT_MAX' means the skb' fraglist has a
39129 +        * recursion level of over HNS3_MAX_RECURSION_LEVEL.
39130 +        */
39131 +       if (bd_num == UINT_MAX) {
39132 +               u64_stats_update_begin(&ring->syncp);
39133 +               ring->stats.over_max_recursion++;
39134 +               u64_stats_update_end(&ring->syncp);
39135 +               return -ENOMEM;
39136 +       }
39138 +       /* The skb->len has exceeded the hw limitation, linearization
39139 +        * will not help.
39140 +        */
39141 +       if (skb->len > HNS3_MAX_TSO_SIZE ||
39142 +           (!skb_is_gso(skb) && skb->len >
39143 +            HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))) {
39144 +               u64_stats_update_begin(&ring->syncp);
39145 +               ring->stats.hw_limitation++;
39146 +               u64_stats_update_end(&ring->syncp);
39147 +               return -ENOMEM;
39148 +       }
39150 +       if (__skb_linearize(skb)) {
39151 +               u64_stats_update_begin(&ring->syncp);
39152 +               ring->stats.sw_err_cnt++;
39153 +               u64_stats_update_end(&ring->syncp);
39154 +               return -ENOMEM;
39155 +       }
39157 +       return 0;
39160  static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
39161                                   struct net_device *netdev,
39162                                   struct sk_buff *skb)
39163 @@ -1370,7 +1407,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
39164         unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U];
39165         unsigned int bd_num;
39167 -       bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num);
39168 +       bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num, 0, 0);
39169         if (unlikely(bd_num > max_non_tso_bd_num)) {
39170                 if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) &&
39171                     !hns3_skb_need_linearized(skb, bd_size, bd_num,
39172 @@ -1379,16 +1416,11 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
39173                         goto out;
39174                 }
39176 -               if (__skb_linearize(skb))
39177 +               if (hns3_skb_linearize(ring, skb, max_non_tso_bd_num,
39178 +                                      bd_num))
39179                         return -ENOMEM;
39181                 bd_num = hns3_tx_bd_count(skb->len);
39182 -               if ((skb_is_gso(skb) && bd_num > HNS3_MAX_TSO_BD_NUM) ||
39183 -                   (!skb_is_gso(skb) &&
39184 -                    bd_num > max_non_tso_bd_num)) {
39185 -                       trace_hns3_over_max_bd(skb);
39186 -                       return -ENOMEM;
39187 -               }
39189                 u64_stats_update_begin(&ring->syncp);
39190                 ring->stats.tx_copy++;
39191 @@ -1412,6 +1444,10 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
39192                 return bd_num;
39193         }
39195 +       u64_stats_update_begin(&ring->syncp);
39196 +       ring->stats.tx_busy++;
39197 +       u64_stats_update_end(&ring->syncp);
39199         return -EBUSY;
39202 @@ -1459,6 +1495,7 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
39203                                  struct sk_buff *skb, enum hns_desc_type type)
39205         unsigned int size = skb_headlen(skb);
39206 +       struct sk_buff *frag_skb;
39207         int i, ret, bd_num = 0;
39209         if (size) {
39210 @@ -1483,6 +1520,15 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
39211                 bd_num += ret;
39212         }
39214 +       skb_walk_frags(skb, frag_skb) {
39215 +               ret = hns3_fill_skb_to_desc(ring, frag_skb,
39216 +                                           DESC_TYPE_FRAGLIST_SKB);
39217 +               if (unlikely(ret < 0))
39218 +                       return ret;
39220 +               bd_num += ret;
39221 +       }
39223         return bd_num;
39226 @@ -1513,8 +1559,6 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
39227         struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping];
39228         struct netdev_queue *dev_queue;
39229         int pre_ntu, next_to_use_head;
39230 -       struct sk_buff *frag_skb;
39231 -       int bd_num = 0;
39232         bool doorbell;
39233         int ret;
39235 @@ -1530,15 +1574,8 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
39236         ret = hns3_nic_maybe_stop_tx(ring, netdev, skb);
39237         if (unlikely(ret <= 0)) {
39238                 if (ret == -EBUSY) {
39239 -                       u64_stats_update_begin(&ring->syncp);
39240 -                       ring->stats.tx_busy++;
39241 -                       u64_stats_update_end(&ring->syncp);
39242                         hns3_tx_doorbell(ring, 0, true);
39243                         return NETDEV_TX_BUSY;
39244 -               } else if (ret == -ENOMEM) {
39245 -                       u64_stats_update_begin(&ring->syncp);
39246 -                       ring->stats.sw_err_cnt++;
39247 -                       u64_stats_update_end(&ring->syncp);
39248                 }
39250                 hns3_rl_err(netdev, "xmit error: %d!\n", ret);
39251 @@ -1551,21 +1588,14 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
39252         if (unlikely(ret < 0))
39253                 goto fill_err;
39255 +       /* 'ret < 0' means filling error, 'ret == 0' means skb->len is
39256 +        * zero, which is unlikely, and 'ret > 0' means how many tx desc
39257 +        * need to be notified to the hw.
39258 +        */
39259         ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
39260 -       if (unlikely(ret < 0))
39261 +       if (unlikely(ret <= 0))
39262                 goto fill_err;
39264 -       bd_num += ret;
39266 -       skb_walk_frags(skb, frag_skb) {
39267 -               ret = hns3_fill_skb_to_desc(ring, frag_skb,
39268 -                                           DESC_TYPE_FRAGLIST_SKB);
39269 -               if (unlikely(ret < 0))
39270 -                       goto fill_err;
39272 -               bd_num += ret;
39273 -       }
39275         pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
39276                                         (ring->desc_num - 1);
39277         ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
39278 @@ -1576,7 +1606,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
39279         dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
39280         doorbell = __netdev_tx_sent_queue(dev_queue, skb->len,
39281                                           netdev_xmit_more());
39282 -       hns3_tx_doorbell(ring, bd_num, doorbell);
39283 +       hns3_tx_doorbell(ring, ret, doorbell);
39285         return NETDEV_TX_OK;
39287 @@ -1748,11 +1778,15 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
39288                         tx_drop += ring->stats.tx_l4_proto_err;
39289                         tx_drop += ring->stats.tx_l2l3l4_err;
39290                         tx_drop += ring->stats.tx_tso_err;
39291 +                       tx_drop += ring->stats.over_max_recursion;
39292 +                       tx_drop += ring->stats.hw_limitation;
39293                         tx_errors += ring->stats.sw_err_cnt;
39294                         tx_errors += ring->stats.tx_vlan_err;
39295                         tx_errors += ring->stats.tx_l4_proto_err;
39296                         tx_errors += ring->stats.tx_l2l3l4_err;
39297                         tx_errors += ring->stats.tx_tso_err;
39298 +                       tx_errors += ring->stats.over_max_recursion;
39299 +                       tx_errors += ring->stats.hw_limitation;
39300                 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
39302                 /* fetch the rx stats */
39303 @@ -3704,7 +3738,6 @@ static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
39305  static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
39307 -       struct hnae3_ring_chain_node vector_ring_chain;
39308         struct hnae3_handle *h = priv->ae_handle;
39309         struct hns3_enet_tqp_vector *tqp_vector;
39310         int ret;
39311 @@ -3736,6 +3769,8 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
39312         }
39314         for (i = 0; i < priv->vector_num; i++) {
39315 +               struct hnae3_ring_chain_node vector_ring_chain;
39317                 tqp_vector = &priv->tqp_vector[i];
39319                 tqp_vector->rx_group.total_bytes = 0;
39320 @@ -4554,6 +4589,11 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
39321         struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
39322         int ret = 0;
39324 +       if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
39325 +               netdev_err(kinfo->netdev, "device is not initialized yet\n");
39326 +               return -EFAULT;
39327 +       }
39329         clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
39331         if (netif_running(kinfo->netdev)) {
39332 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
39333 index d069b04ee587..e44224e23315 100644
39334 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
39335 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
39336 @@ -376,6 +376,8 @@ struct ring_stats {
39337                         u64 tx_l4_proto_err;
39338                         u64 tx_l2l3l4_err;
39339                         u64 tx_tso_err;
39340 +                       u64 over_max_recursion;
39341 +                       u64 hw_limitation;
39342                 };
39343                 struct {
39344                         u64 rx_pkts;
39345 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
39346 index adcec4ea7cb9..d20f2e246017 100644
39347 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
39348 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
39349 @@ -44,6 +44,8 @@ static const struct hns3_stats hns3_txq_stats[] = {
39350         HNS3_TQP_STAT("l4_proto_err", tx_l4_proto_err),
39351         HNS3_TQP_STAT("l2l3l4_err", tx_l2l3l4_err),
39352         HNS3_TQP_STAT("tso_err", tx_tso_err),
39353 +       HNS3_TQP_STAT("over_max_recursion", over_max_recursion),
39354 +       HNS3_TQP_STAT("hw_limitation", hw_limitation),
39355  };
39357  #define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats)
39358 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
39359 index 0ca7f1b984bf..78d3eb142df8 100644
39360 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
39361 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
39362 @@ -753,8 +753,9 @@ static int hclge_config_igu_egu_hw_err_int(struct hclge_dev *hdev, bool en)
39364         /* configure IGU,EGU error interrupts */
39365         hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_COMMON_INT_EN, false);
39366 +       desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_TYPE);
39367         if (en)
39368 -               desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
39369 +               desc.data[0] |= cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
39371         desc.data[1] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN_MASK);
39373 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
39374 index 608fe26fc3fe..d647f3c84134 100644
39375 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
39376 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
39377 @@ -32,7 +32,8 @@
39378  #define HCLGE_TQP_ECC_ERR_INT_EN_MASK  0x0FFF
39379  #define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK    0x0F000000
39380  #define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN 0x0F000000
39381 -#define HCLGE_IGU_ERR_INT_EN   0x0000066F
39382 +#define HCLGE_IGU_ERR_INT_EN   0x0000000F
39383 +#define HCLGE_IGU_ERR_INT_TYPE 0x00000660
39384  #define HCLGE_IGU_ERR_INT_EN_MASK      0x000F
39385  #define HCLGE_IGU_TNL_ERR_INT_EN    0x0002AABF
39386  #define HCLGE_IGU_TNL_ERR_INT_EN_MASK  0x003F
39387 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
39388 index b0dbe6dcaa7b..7a560d0e19b9 100644
39389 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
39390 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
39391 @@ -11379,7 +11379,6 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
39392  #define REG_LEN_PER_LINE       (REG_NUM_PER_LINE * sizeof(u32))
39393  #define REG_SEPARATOR_LINE     1
39394  #define REG_NUM_REMAIN_MASK    3
39395 -#define BD_LIST_MAX_NUM                30
39397  int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
39399 @@ -11473,15 +11472,19 @@ static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
39401         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
39402         int data_len_per_desc, bd_num, i;
39403 -       int bd_num_list[BD_LIST_MAX_NUM];
39404 +       int *bd_num_list;
39405         u32 data_len;
39406         int ret;
39408 +       bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
39409 +       if (!bd_num_list)
39410 +               return -ENOMEM;
39412         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
39413         if (ret) {
39414                 dev_err(&hdev->pdev->dev,
39415                         "Get dfx reg bd num fail, status is %d.\n", ret);
39416 -               return ret;
39417 +               goto out;
39418         }
39420         data_len_per_desc = sizeof_field(struct hclge_desc, data);
39421 @@ -11492,6 +11495,8 @@ static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
39422                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
39423         }
39425 +out:
39426 +       kfree(bd_num_list);
39427         return ret;
39430 @@ -11499,16 +11504,20 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
39432         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
39433         int bd_num, bd_num_max, buf_len, i;
39434 -       int bd_num_list[BD_LIST_MAX_NUM];
39435         struct hclge_desc *desc_src;
39436 +       int *bd_num_list;
39437         u32 *reg = data;
39438         int ret;
39440 +       bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
39441 +       if (!bd_num_list)
39442 +               return -ENOMEM;
39444         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
39445         if (ret) {
39446                 dev_err(&hdev->pdev->dev,
39447                         "Get dfx reg bd num fail, status is %d.\n", ret);
39448 -               return ret;
39449 +               goto out;
39450         }
39452         bd_num_max = bd_num_list[0];
39453 @@ -11517,8 +11526,10 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
39455         buf_len = sizeof(*desc_src) * bd_num_max;
39456         desc_src = kzalloc(buf_len, GFP_KERNEL);
39457 -       if (!desc_src)
39458 -               return -ENOMEM;
39459 +       if (!desc_src) {
39460 +               ret = -ENOMEM;
39461 +               goto out;
39462 +       }
39464         for (i = 0; i < dfx_reg_type_num; i++) {
39465                 bd_num = bd_num_list[i];
39466 @@ -11534,6 +11545,8 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
39467         }
39469         kfree(desc_src);
39470 +out:
39471 +       kfree(bd_num_list);
39472         return ret;
39475 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
39476 index 51a36e74f088..c3bb16b1f060 100644
39477 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
39478 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
39479 @@ -535,7 +535,7 @@ static void hclge_get_link_mode(struct hclge_vport *vport,
39480         unsigned long advertising;
39481         unsigned long supported;
39482         unsigned long send_data;
39483 -       u8 msg_data[10];
39484 +       u8 msg_data[10] = {};
39485         u8 dest_vfid;
39487         advertising = hdev->hw.mac.advertising[0];
39488 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
39489 index e89820702540..c194bba187d6 100644
39490 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
39491 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
39492 @@ -255,6 +255,8 @@ void hclge_mac_start_phy(struct hclge_dev *hdev)
39493         if (!phydev)
39494                 return;
39496 +       phy_loopback(phydev, false);
39498         phy_start(phydev);
39501 diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
39502 index 15f93b355099..5069f690cf0b 100644
39503 --- a/drivers/net/ethernet/intel/i40e/i40e.h
39504 +++ b/drivers/net/ethernet/intel/i40e/i40e.h
39505 @@ -1142,7 +1142,6 @@ static inline bool i40e_is_sw_dcb(struct i40e_pf *pf)
39506         return !!(pf->flags & I40E_FLAG_DISABLE_FW_LLDP);
39509 -void i40e_set_lldp_forwarding(struct i40e_pf *pf, bool enable);
39510  #ifdef CONFIG_I40E_DCB
39511  void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
39512                            struct i40e_dcbx_config *old_cfg,
39513 diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
39514 index ce626eace692..140b677f114d 100644
39515 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
39516 +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
39517 @@ -1566,8 +1566,10 @@ enum i40e_aq_phy_type {
39518         I40E_PHY_TYPE_25GBASE_LR                = 0x22,
39519         I40E_PHY_TYPE_25GBASE_AOC               = 0x23,
39520         I40E_PHY_TYPE_25GBASE_ACC               = 0x24,
39521 -       I40E_PHY_TYPE_2_5GBASE_T                = 0x30,
39522 -       I40E_PHY_TYPE_5GBASE_T                  = 0x31,
39523 +       I40E_PHY_TYPE_2_5GBASE_T                = 0x26,
39524 +       I40E_PHY_TYPE_5GBASE_T                  = 0x27,
39525 +       I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS    = 0x30,
39526 +       I40E_PHY_TYPE_5GBASE_T_LINK_STATUS      = 0x31,
39527         I40E_PHY_TYPE_MAX,
39528         I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP   = 0xFD,
39529         I40E_PHY_TYPE_EMPTY                     = 0xFE,
39530 diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
39531 index a2dba32383f6..32f3facbed1a 100644
39532 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c
39533 +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
39534 @@ -375,6 +375,7 @@ void i40e_client_subtask(struct i40e_pf *pf)
39535                                 clear_bit(__I40E_CLIENT_INSTANCE_OPENED,
39536                                           &cdev->state);
39537                                 i40e_client_del_instance(pf);
39538 +                               return;
39539                         }
39540                 }
39541         }
39542 diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
39543 index ec19e18305ec..ce35e064cf60 100644
39544 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c
39545 +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
39546 @@ -1154,8 +1154,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
39547                 break;
39548         case I40E_PHY_TYPE_100BASE_TX:
39549         case I40E_PHY_TYPE_1000BASE_T:
39550 -       case I40E_PHY_TYPE_2_5GBASE_T:
39551 -       case I40E_PHY_TYPE_5GBASE_T:
39552 +       case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
39553 +       case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
39554         case I40E_PHY_TYPE_10GBASE_T:
39555                 media = I40E_MEDIA_TYPE_BASET;
39556                 break;
39557 diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
39558 index 0e92668012e3..93dd58fda272 100644
39559 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
39560 +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
39561 @@ -841,8 +841,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
39562                                                              10000baseT_Full);
39563                 break;
39564         case I40E_PHY_TYPE_10GBASE_T:
39565 -       case I40E_PHY_TYPE_5GBASE_T:
39566 -       case I40E_PHY_TYPE_2_5GBASE_T:
39567 +       case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
39568 +       case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
39569         case I40E_PHY_TYPE_1000BASE_T:
39570         case I40E_PHY_TYPE_100BASE_TX:
39571                 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
39572 @@ -1409,7 +1409,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
39574                 memset(&config, 0, sizeof(config));
39575                 config.phy_type = abilities.phy_type;
39576 -               config.abilities = abilities.abilities;
39577 +               config.abilities = abilities.abilities |
39578 +                                  I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
39579                 config.phy_type_ext = abilities.phy_type_ext;
39580                 config.link_speed = abilities.link_speed;
39581                 config.eee_capability = abilities.eee_capability;
39582 @@ -5287,7 +5288,6 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
39583                         i40e_aq_cfg_lldp_mib_change_event(&pf->hw, false, NULL);
39584                         i40e_aq_stop_lldp(&pf->hw, true, false, NULL);
39585                 } else {
39586 -                       i40e_set_lldp_forwarding(pf, false);
39587                         status = i40e_aq_start_lldp(&pf->hw, false, NULL);
39588                         if (status) {
39589                                 adq_err = pf->hw.aq.asq_last_status;
39590 diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
39591 index 527023ee4c07..ac4b44fc19f1 100644
39592 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c
39593 +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
39594 @@ -6878,40 +6878,6 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
39596  #endif /* CONFIG_I40E_DCB */
39598 -/**
39599 - * i40e_set_lldp_forwarding - set forwarding of lldp frames
39600 - * @pf: PF being configured
39601 - * @enable: if forwarding to OS shall be enabled
39602 - *
39603 - * Toggle forwarding of lldp frames behavior,
39604 - * When passing DCB control from firmware to software
39605 - * lldp frames must be forwarded to the software based
39606 - * lldp agent.
39607 - */
39608 -void i40e_set_lldp_forwarding(struct i40e_pf *pf, bool enable)
39610 -       if (pf->lan_vsi == I40E_NO_VSI)
39611 -               return;
39613 -       if (!pf->vsi[pf->lan_vsi])
39614 -               return;
39616 -       /* No need to check the outcome, commands may fail
39617 -        * if desired value is already set
39618 -        */
39619 -       i40e_aq_add_rem_control_packet_filter(&pf->hw, NULL, ETH_P_LLDP,
39620 -                                             I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX |
39621 -                                             I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC,
39622 -                                             pf->vsi[pf->lan_vsi]->seid, 0,
39623 -                                             enable, NULL, NULL);
39625 -       i40e_aq_add_rem_control_packet_filter(&pf->hw, NULL, ETH_P_LLDP,
39626 -                                             I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX |
39627 -                                             I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC,
39628 -                                             pf->vsi[pf->lan_vsi]->seid, 0,
39629 -                                             enable, NULL, NULL);
39632  /**
39633   * i40e_print_link_message - print link up or down
39634   * @vsi: the VSI for which link needs a message
39635 @@ -10735,10 +10701,6 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
39636          */
39637         i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
39638                                                        pf->main_vsi_seid);
39639 -#ifdef CONFIG_I40E_DCB
39640 -       if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)
39641 -               i40e_set_lldp_forwarding(pf, true);
39642 -#endif /* CONFIG_I40E_DCB */
39644         /* restart the VSIs that were rebuilt and running before the reset */
39645         i40e_pf_unquiesce_all_vsi(pf);
39646 @@ -15753,10 +15715,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
39647          */
39648         i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
39649                                                        pf->main_vsi_seid);
39650 -#ifdef CONFIG_I40E_DCB
39651 -       if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)
39652 -               i40e_set_lldp_forwarding(pf, true);
39653 -#endif /* CONFIG_I40E_DCB */
39655         if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
39656                 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
39657 diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
39658 index 06b4271219b1..70b515049540 100644
39659 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
39660 +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
39661 @@ -1961,10 +1961,6 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
39662                                  union i40e_rx_desc *rx_desc)
39665 -       /* XDP packets use error pointer so abort at this point */
39666 -       if (IS_ERR(skb))
39667 -               return true;
39669         /* ERR_MASK will only have valid bits if EOP set, and
39670          * what we are doing here is actually checking
39671          * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
39672 @@ -2534,7 +2530,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
39673                 }
39675                 /* exit if we failed to retrieve a buffer */
39676 -               if (!skb) {
39677 +               if (!xdp_res && !skb) {
39678                         rx_ring->rx_stats.alloc_buff_failed++;
39679                         rx_buffer->pagecnt_bias++;
39680                         break;
39681 @@ -2547,7 +2543,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
39682                 if (i40e_is_non_eop(rx_ring, rx_desc))
39683                         continue;
39685 -               if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
39686 +               if (xdp_res || i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
39687                         skb = NULL;
39688                         continue;
39689                 }
39690 diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
39691 index 5c10faaca790..c81109a63e90 100644
39692 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h
39693 +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
39694 @@ -239,11 +239,8 @@ struct i40e_phy_info {
39695  #define I40E_CAP_PHY_TYPE_25GBASE_ACC BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC + \
39696                                              I40E_PHY_TYPE_OFFSET)
39697  /* Offset for 2.5G/5G PHY Types value to bit number conversion */
39698 -#define I40E_PHY_TYPE_OFFSET2 (-10)
39699 -#define I40E_CAP_PHY_TYPE_2_5GBASE_T BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T + \
39700 -                                            I40E_PHY_TYPE_OFFSET2)
39701 -#define I40E_CAP_PHY_TYPE_5GBASE_T BIT_ULL(I40E_PHY_TYPE_5GBASE_T + \
39702 -                                            I40E_PHY_TYPE_OFFSET2)
39703 +#define I40E_CAP_PHY_TYPE_2_5GBASE_T BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T)
39704 +#define I40E_CAP_PHY_TYPE_5GBASE_T BIT_ULL(I40E_PHY_TYPE_5GBASE_T)
39705  #define I40E_HW_CAP_MAX_GPIO                   30
39706  /* Capabilities of a PF or a VF or the whole device */
39707  struct i40e_hw_capabilities {
39708 diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
39709 index dc5b3c06d1e0..ebd08543791b 100644
39710 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c
39711 +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
39712 @@ -3899,8 +3899,6 @@ static void iavf_remove(struct pci_dev *pdev)
39714         iounmap(hw->hw_addr);
39715         pci_release_regions(pdev);
39716 -       iavf_free_all_tx_resources(adapter);
39717 -       iavf_free_all_rx_resources(adapter);
39718         iavf_free_queues(adapter);
39719         kfree(adapter->vf_res);
39720         spin_lock_bh(&adapter->mac_vlan_list_lock);
39721 diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
39722 index d13c7fc8fb0a..195d122c9cb2 100644
39723 --- a/drivers/net/ethernet/intel/ice/ice_lib.c
39724 +++ b/drivers/net/ethernet/intel/ice/ice_lib.c
39725 @@ -2818,38 +2818,46 @@ int ice_vsi_release(struct ice_vsi *vsi)
39728  /**
39729 - * ice_vsi_rebuild_update_coalesce - set coalesce for a q_vector
39730 + * ice_vsi_rebuild_update_coalesce_intrl - set interrupt rate limit for a q_vector
39731   * @q_vector: pointer to q_vector which is being updated
39732 - * @coalesce: pointer to array of struct with stored coalesce
39733 + * @stored_intrl_setting: original INTRL setting
39734   *
39735   * Set coalesce param in q_vector and update these parameters in HW.
39736   */
39737  static void
39738 -ice_vsi_rebuild_update_coalesce(struct ice_q_vector *q_vector,
39739 -                               struct ice_coalesce_stored *coalesce)
39740 +ice_vsi_rebuild_update_coalesce_intrl(struct ice_q_vector *q_vector,
39741 +                                     u16 stored_intrl_setting)
39743 -       struct ice_ring_container *rx_rc = &q_vector->rx;
39744 -       struct ice_ring_container *tx_rc = &q_vector->tx;
39745         struct ice_hw *hw = &q_vector->vsi->back->hw;
39747 -       tx_rc->itr_setting = coalesce->itr_tx;
39748 -       rx_rc->itr_setting = coalesce->itr_rx;
39750 -       /* dynamic ITR values will be updated during Tx/Rx */
39751 -       if (!ITR_IS_DYNAMIC(tx_rc->itr_setting))
39752 -               wr32(hw, GLINT_ITR(tx_rc->itr_idx, q_vector->reg_idx),
39753 -                    ITR_REG_ALIGN(tx_rc->itr_setting) >>
39754 -                    ICE_ITR_GRAN_S);
39755 -       if (!ITR_IS_DYNAMIC(rx_rc->itr_setting))
39756 -               wr32(hw, GLINT_ITR(rx_rc->itr_idx, q_vector->reg_idx),
39757 -                    ITR_REG_ALIGN(rx_rc->itr_setting) >>
39758 -                    ICE_ITR_GRAN_S);
39760 -       q_vector->intrl = coalesce->intrl;
39761 +       q_vector->intrl = stored_intrl_setting;
39762         wr32(hw, GLINT_RATE(q_vector->reg_idx),
39763              ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
39766 +/**
39767 + * ice_vsi_rebuild_update_coalesce_itr - set coalesce for a q_vector
39768 + * @q_vector: pointer to q_vector which is being updated
39769 + * @rc: pointer to ring container
39770 + * @stored_itr_setting: original ITR setting
39771 + *
39772 + * Set coalesce param in q_vector and update these parameters in HW.
39773 + */
39774 +static void
39775 +ice_vsi_rebuild_update_coalesce_itr(struct ice_q_vector *q_vector,
39776 +                                   struct ice_ring_container *rc,
39777 +                                   u16 stored_itr_setting)
39779 +       struct ice_hw *hw = &q_vector->vsi->back->hw;
39781 +       rc->itr_setting = stored_itr_setting;
39783 +       /* dynamic ITR values will be updated during Tx/Rx */
39784 +       if (!ITR_IS_DYNAMIC(rc->itr_setting))
39785 +               wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
39786 +                    ITR_REG_ALIGN(rc->itr_setting) >> ICE_ITR_GRAN_S);
39789  /**
39790   * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors
39791   * @vsi: VSI connected with q_vectors
39792 @@ -2869,6 +2877,11 @@ ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
39793                 coalesce[i].itr_tx = q_vector->tx.itr_setting;
39794                 coalesce[i].itr_rx = q_vector->rx.itr_setting;
39795                 coalesce[i].intrl = q_vector->intrl;
39797 +               if (i < vsi->num_txq)
39798 +                       coalesce[i].tx_valid = true;
39799 +               if (i < vsi->num_rxq)
39800 +                       coalesce[i].rx_valid = true;
39801         }
39803         return vsi->num_q_vectors;
39804 @@ -2893,17 +2906,59 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
39805         if ((size && !coalesce) || !vsi)
39806                 return;
39808 -       for (i = 0; i < size && i < vsi->num_q_vectors; i++)
39809 -               ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
39810 -                                               &coalesce[i]);
39812 -       /* number of q_vectors increased, so assume coalesce settings were
39813 -        * changed globally (i.e. ethtool -C eth0 instead of per-queue) and use
39814 -        * the previous settings from q_vector 0 for all of the new q_vectors
39815 +       /* There are a couple of cases that have to be handled here:
39816 +        *   1. The case where the number of queue vectors stays the same, but
39817 +        *      the number of Tx or Rx rings changes (the first for loop)
39818 +        *   2. The case where the number of queue vectors increased (the
39819 +        *      second for loop)
39820          */
39821 -       for (; i < vsi->num_q_vectors; i++)
39822 -               ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
39823 -                                               &coalesce[0]);
39824 +       for (i = 0; i < size && i < vsi->num_q_vectors; i++) {
39825 +               /* There are 2 cases to handle here and they are the same for
39826 +                * both Tx and Rx:
39827 +                *   if the entry was valid previously (coalesce[i].[tr]x_valid
39828 +                *   and the loop variable is less than the number of rings
39829 +                *   allocated, then write the previous values
39830 +                *
39831 +                *   if the entry was not valid previously, but the number of
39832 +                *   rings is less than are allocated (this means the number of
39833 +                *   rings increased from previously), then write out the
39834 +                *   values in the first element
39835 +                */
39836 +               if (i < vsi->alloc_rxq && coalesce[i].rx_valid)
39837 +                       ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
39838 +                                                           &vsi->q_vectors[i]->rx,
39839 +                                                           coalesce[i].itr_rx);
39840 +               else if (i < vsi->alloc_rxq)
39841 +                       ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
39842 +                                                           &vsi->q_vectors[i]->rx,
39843 +                                                           coalesce[0].itr_rx);
39845 +               if (i < vsi->alloc_txq && coalesce[i].tx_valid)
39846 +                       ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
39847 +                                                           &vsi->q_vectors[i]->tx,
39848 +                                                           coalesce[i].itr_tx);
39849 +               else if (i < vsi->alloc_txq)
39850 +                       ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
39851 +                                                           &vsi->q_vectors[i]->tx,
39852 +                                                           coalesce[0].itr_tx);
39854 +               ice_vsi_rebuild_update_coalesce_intrl(vsi->q_vectors[i],
39855 +                                                     coalesce[i].intrl);
39856 +       }
39858 +       /* the number of queue vectors increased so write whatever is in
39859 +        * the first element
39860 +        */
39861 +       for (; i < vsi->num_q_vectors; i++) {
39862 +               ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
39863 +                                                   &vsi->q_vectors[i]->tx,
39864 +                                                   coalesce[0].itr_tx);
39865 +               ice_vsi_rebuild_update_coalesce_itr(vsi->q_vectors[i],
39866 +                                                   &vsi->q_vectors[i]->rx,
39867 +                                                   coalesce[0].itr_rx);
39868 +               ice_vsi_rebuild_update_coalesce_intrl(vsi->q_vectors[i],
39869 +                                                     coalesce[0].intrl);
39870 +       }
39873  /**
39874 @@ -2932,9 +2987,11 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
39876         coalesce = kcalloc(vsi->num_q_vectors,
39877                            sizeof(struct ice_coalesce_stored), GFP_KERNEL);
39878 -       if (coalesce)
39879 -               prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi,
39880 -                                                                 coalesce);
39881 +       if (!coalesce)
39882 +               return -ENOMEM;
39884 +       prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
39886         ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
39887         ice_vsi_free_q_vectors(vsi);
39889 diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
39890 index 5dab77504fa5..672a7ff0ee36 100644
39891 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h
39892 +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
39893 @@ -351,6 +351,8 @@ struct ice_coalesce_stored {
39894         u16 itr_tx;
39895         u16 itr_rx;
39896         u8 intrl;
39897 +       u8 tx_valid;
39898 +       u8 rx_valid;
39899  };
39901  /* iterator for handling rings in ring container */
39902 diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
39903 index 25dd903a3e92..d849b0f65de2 100644
39904 --- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
39905 +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
39906 @@ -431,7 +431,8 @@ static void prestera_port_handle_event(struct prestera_switch *sw,
39907                         netif_carrier_on(port->dev);
39908                         if (!delayed_work_pending(caching_dw))
39909                                 queue_delayed_work(prestera_wq, caching_dw, 0);
39910 -               } else {
39911 +               } else if (netif_running(port->dev) &&
39912 +                          netif_carrier_ok(port->dev)) {
39913                         netif_carrier_off(port->dev);
39914                         if (delayed_work_pending(caching_dw))
39915                                 cancel_delayed_work(caching_dw);
39916 diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
39917 index 01d3ee4b5829..bcd5e7ae8482 100644
39918 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
39919 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
39920 @@ -1319,7 +1319,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
39921                 skb->protocol = eth_type_trans(skb, netdev);
39923                 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
39924 -                   RX_DMA_VID(trxd.rxd3))
39925 +                   (trxd.rxd2 & RX_DMA_VTAG))
39926                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
39927                                                RX_DMA_VID(trxd.rxd3));
39928                 skb_record_rx_queue(skb, 0);
39929 diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
39930 index fd3cec8f06ba..c47272100615 100644
39931 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
39932 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
39933 @@ -296,6 +296,7 @@
39934  #define RX_DMA_LSO             BIT(30)
39935  #define RX_DMA_PLEN0(_x)       (((_x) & 0x3fff) << 16)
39936  #define RX_DMA_GET_PLEN0(_x)   (((_x) >> 16) & 0x3fff)
39937 +#define RX_DMA_VTAG            BIT(15)
39939  /* QDMA descriptor rxd3 */
39940  #define RX_DMA_VID(_x)         ((_x) & 0xfff)
39941 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
39942 index bdbffe484fce..d2efe2455955 100644
39943 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
39944 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
39945 @@ -576,7 +576,7 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
39947         pi = mlx5e_txqsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS);
39948         wqe = MLX5E_TX_FETCH_WQE(sq, pi);
39949 -       prefetchw(wqe->data);
39950 +       net_prefetchw(wqe->data);
39952         *session = (struct mlx5e_tx_mpwqe) {
39953                 .wqe = wqe,
39954 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
39955 index 22bee4990232..bb61f52d782d 100644
39956 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
39957 +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
39958 @@ -850,7 +850,7 @@ mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
39959                 return;
39960         }
39962 -       if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action &
39963 +       if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action ==
39964             MLX5_ACCEL_ESP_ACTION_DECRYPT)
39965                 ida_simple_remove(&fipsec->halloc, sa_ctx->sa_handle);
39967 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
39968 index 9143ec326ebf..f146c618a78e 100644
39969 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
39970 +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
39971 @@ -1532,6 +1532,7 @@ static void dr_ste_v1_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *val
39973         DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_gvmi, misc_mask, source_port);
39974         DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_qp, misc_mask, source_sqn);
39975 +       misc_mask->source_eswitch_owner_vhca_id = 0;
39978  static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
39979 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
39980 index 7846a21555ef..1f6bc0c7e91d 100644
39981 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
39982 +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
39983 @@ -535,6 +535,16 @@ mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
39984         u16 erif_index = 0;
39985         int err;
39987 +       /* Add the eRIF */
39988 +       if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
39989 +               erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
39990 +               err = mr->mr_ops->route_erif_add(mlxsw_sp,
39991 +                                                rve->mr_route->route_priv,
39992 +                                                erif_index);
39993 +               if (err)
39994 +                       return err;
39995 +       }
39997         /* Update the route action, as the new eVIF can be a tunnel or a pimreg
39998          * device which will require updating the action.
39999          */
40000 @@ -544,17 +554,7 @@ mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
40001                                                       rve->mr_route->route_priv,
40002                                                       route_action);
40003                 if (err)
40004 -                       return err;
40005 -       }
40007 -       /* Add the eRIF */
40008 -       if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
40009 -               erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
40010 -               err = mr->mr_ops->route_erif_add(mlxsw_sp,
40011 -                                                rve->mr_route->route_priv,
40012 -                                                erif_index);
40013 -               if (err)
40014 -                       goto err_route_erif_add;
40015 +                       goto err_route_action_update;
40016         }
40018         /* Update the minimum MTU */
40019 @@ -572,14 +572,14 @@ mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
40020         return 0;
40022  err_route_min_mtu_update:
40023 -       if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
40024 -               mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
40025 -                                          erif_index);
40026 -err_route_erif_add:
40027         if (route_action != rve->mr_route->route_action)
40028                 mr->mr_ops->route_action_update(mlxsw_sp,
40029                                                 rve->mr_route->route_priv,
40030                                                 rve->mr_route->route_action);
40031 +err_route_action_update:
40032 +       if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
40033 +               mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
40034 +                                          erif_index);
40035         return err;
40038 diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
40039 index 713ee3041d49..bea978df7713 100644
40040 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
40041 +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
40042 @@ -364,6 +364,7 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
40044         attrs.split = eth_port.is_split;
40045         attrs.splittable = !attrs.split;
40046 +       attrs.lanes = eth_port.port_lanes;
40047         attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
40048         attrs.phys.port_number = eth_port.label_port;
40049         attrs.phys.split_subport_number = eth_port.label_subport;
40050 diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
40051 index 117188e3c7de..87b8c032195d 100644
40052 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
40053 +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
40054 @@ -1437,6 +1437,7 @@ netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt,
40056         struct emac_tpd tpd;
40057         u32 prod_idx;
40058 +       int len;
40060         memset(&tpd, 0, sizeof(tpd));
40062 @@ -1456,9 +1457,10 @@ netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt,
40063         if (skb_network_offset(skb) != ETH_HLEN)
40064                 TPD_TYP_SET(&tpd, 1);
40066 +       len = skb->len;
40067         emac_tx_fill_tpd(adpt, tx_q, skb, &tpd);
40069 -       netdev_sent_queue(adpt->netdev, skb->len);
40070 +       netdev_sent_queue(adpt->netdev, len);
40072         /* Make sure the are enough free descriptors to hold one
40073          * maximum-sized SKB.  We need one desc for each fragment,
40074 diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
40075 index eb0c03bdb12d..cad57d58d764 100644
40076 --- a/drivers/net/ethernet/renesas/ravb_main.c
40077 +++ b/drivers/net/ethernet/renesas/ravb_main.c
40078 @@ -911,31 +911,20 @@ static int ravb_poll(struct napi_struct *napi, int budget)
40079         int q = napi - priv->napi;
40080         int mask = BIT(q);
40081         int quota = budget;
40082 -       u32 ris0, tis;
40084 -       for (;;) {
40085 -               tis = ravb_read(ndev, TIS);
40086 -               ris0 = ravb_read(ndev, RIS0);
40087 -               if (!((ris0 & mask) || (tis & mask)))
40088 -                       break;
40089 +       /* Processing RX Descriptor Ring */
40090 +       /* Clear RX interrupt */
40091 +       ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
40092 +       if (ravb_rx(ndev, &quota, q))
40093 +               goto out;
40095 -               /* Processing RX Descriptor Ring */
40096 -               if (ris0 & mask) {
40097 -                       /* Clear RX interrupt */
40098 -                       ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
40099 -                       if (ravb_rx(ndev, &quota, q))
40100 -                               goto out;
40101 -               }
40102 -               /* Processing TX Descriptor Ring */
40103 -               if (tis & mask) {
40104 -                       spin_lock_irqsave(&priv->lock, flags);
40105 -                       /* Clear TX interrupt */
40106 -                       ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
40107 -                       ravb_tx_free(ndev, q, true);
40108 -                       netif_wake_subqueue(ndev, q);
40109 -                       spin_unlock_irqrestore(&priv->lock, flags);
40110 -               }
40111 -       }
40112 +       /* Processing RX Descriptor Ring */
40113 +       spin_lock_irqsave(&priv->lock, flags);
40114 +       /* Clear TX interrupt */
40115 +       ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
40116 +       ravb_tx_free(ndev, q, true);
40117 +       netif_wake_subqueue(ndev, q);
40118 +       spin_unlock_irqrestore(&priv->lock, flags);
40120         napi_complete(napi);
40122 diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
40123 index da6886dcac37..4fa72b573c17 100644
40124 --- a/drivers/net/ethernet/sfc/ef10.c
40125 +++ b/drivers/net/ethernet/sfc/ef10.c
40126 @@ -2928,8 +2928,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
40128         /* Get the transmit queue */
40129         tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
40130 -       tx_queue = efx_channel_get_tx_queue(channel,
40131 -                                           tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
40132 +       tx_queue = channel->tx_queue + (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
40134         if (!tx_queue->timestamping) {
40135                 /* Transmit completion */
40136 diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
40137 index 1bfeee283ea9..a3ca406a3561 100644
40138 --- a/drivers/net/ethernet/sfc/efx_channels.c
40139 +++ b/drivers/net/ethernet/sfc/efx_channels.c
40140 @@ -914,6 +914,8 @@ int efx_set_channels(struct efx_nic *efx)
40141                         }
40142                 }
40143         }
40144 +       if (xdp_queue_number)
40145 +               efx->xdp_tx_queue_count = xdp_queue_number;
40147         rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
40148         if (rc)
40149 diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
40150 index d75cf5ff5686..49df02ecee91 100644
40151 --- a/drivers/net/ethernet/sfc/farch.c
40152 +++ b/drivers/net/ethernet/sfc/farch.c
40153 @@ -835,14 +835,14 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
40154                 /* Transmit completion */
40155                 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
40156                 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
40157 -               tx_queue = efx_channel_get_tx_queue(
40158 -                       channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
40159 +               tx_queue = channel->tx_queue +
40160 +                               (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
40161                 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
40162         } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
40163                 /* Rewrite the FIFO write pointer */
40164                 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
40165 -               tx_queue = efx_channel_get_tx_queue(
40166 -                       channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
40167 +               tx_queue = channel->tx_queue +
40168 +                               (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
40170                 netif_tx_lock(efx->net_dev);
40171                 efx_farch_notify_tx_desc(tx_queue);
40172 @@ -1081,16 +1081,16 @@ static void
40173  efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
40175         struct efx_tx_queue *tx_queue;
40176 +       struct efx_channel *channel;
40177         int qid;
40179         qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
40180         if (qid < EFX_MAX_TXQ_PER_CHANNEL * (efx->n_tx_channels + efx->n_extra_tx_channels)) {
40181 -               tx_queue = efx_get_tx_queue(efx, qid / EFX_MAX_TXQ_PER_CHANNEL,
40182 -                                           qid % EFX_MAX_TXQ_PER_CHANNEL);
40183 -               if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
40184 +               channel = efx_get_tx_channel(efx, qid / EFX_MAX_TXQ_PER_CHANNEL);
40185 +               tx_queue = channel->tx_queue + (qid % EFX_MAX_TXQ_PER_CHANNEL);
40186 +               if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0))
40187                         efx_farch_magic_event(tx_queue->channel,
40188                                               EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
40189 -               }
40190         }
40193 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
40194 index bf3250e0e59c..749585fe6fc9 100644
40195 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
40196 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
40197 @@ -352,6 +352,8 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
40198         plat_dat->bsp_priv = gmac;
40199         plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
40200         plat_dat->multicast_filter_bins = 0;
40201 +       plat_dat->tx_fifo_size = 8192;
40202 +       plat_dat->rx_fifo_size = 8192;
40204         err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
40205         if (err)
40206 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
40207 index 29f765a246a0..aaf37598cbd3 100644
40208 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
40209 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
40210 @@ -638,6 +638,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
40211         value &= ~GMAC_PACKET_FILTER_PCF;
40212         value &= ~GMAC_PACKET_FILTER_PM;
40213         value &= ~GMAC_PACKET_FILTER_PR;
40214 +       value &= ~GMAC_PACKET_FILTER_RA;
40215         if (dev->flags & IFF_PROMISC) {
40216                 /* VLAN Tag Filter Fail Packets Queuing */
40217                 if (hw->vlan_fail_q_en) {
40218 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
40219 index 62aa0e95beb7..a7249e4071f1 100644
40220 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
40221 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
40222 @@ -222,7 +222,7 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
40223                                        u32 channel, int fifosz, u8 qmode)
40225         unsigned int rqs = fifosz / 256 - 1;
40226 -       u32 mtl_rx_op, mtl_rx_int;
40227 +       u32 mtl_rx_op;
40229         mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
40231 @@ -283,11 +283,6 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
40232         }
40234         writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
40236 -       /* Enable MTL RX overflow */
40237 -       mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
40238 -       writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
40239 -              ioaddr + MTL_CHAN_INT_CTRL(channel));
40242  static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
40243 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
40244 index 4749bd0af160..369d7cde3993 100644
40245 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
40246 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
40247 @@ -2757,8 +2757,15 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
40249         /* Enable TSO */
40250         if (priv->tso) {
40251 -               for (chan = 0; chan < tx_cnt; chan++)
40252 +               for (chan = 0; chan < tx_cnt; chan++) {
40253 +                       struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
40255 +                       /* TSO and TBS cannot co-exist */
40256 +                       if (tx_q->tbs & STMMAC_TBS_AVAIL)
40257 +                               continue;
40259                         stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
40260 +               }
40261         }
40263         /* Enable Split Header */
40264 @@ -2850,9 +2857,8 @@ static int stmmac_open(struct net_device *dev)
40265                 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
40266                 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
40268 +               /* Setup per-TXQ tbs flag before TX descriptor alloc */
40269                 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
40270 -               if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan))
40271 -                       tx_q->tbs &= ~STMMAC_TBS_AVAIL;
40272         }
40274         ret = alloc_dma_desc_resources(priv);
40275 @@ -4162,7 +4168,6 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
40276         /* To handle GMAC own interrupts */
40277         if ((priv->plat->has_gmac) || xmac) {
40278                 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
40279 -               int mtl_status;
40281                 if (unlikely(status)) {
40282                         /* For LPI we need to save the tx status */
40283 @@ -4173,17 +4178,8 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
40284                 }
40286                 for (queue = 0; queue < queues_count; queue++) {
40287 -                       struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
40289 -                       mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
40290 -                                                               queue);
40291 -                       if (mtl_status != -EINVAL)
40292 -                               status |= mtl_status;
40294 -                       if (status & CORE_IRQ_MTL_RX_OVERFLOW)
40295 -                               stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
40296 -                                                      rx_q->rx_tail_addr,
40297 -                                                      queue);
40298 +                       status = stmmac_host_mtl_irq_status(priv, priv->hw,
40299 +                                                           queue);
40300                 }
40302                 /* PCS link status */
40303 diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
40304 index c7031e1960d4..03055c96f076 100644
40305 --- a/drivers/net/ethernet/ti/davinci_emac.c
40306 +++ b/drivers/net/ethernet/ti/davinci_emac.c
40307 @@ -169,11 +169,11 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
40308  /* EMAC mac_status register */
40309  #define EMAC_MACSTATUS_TXERRCODE_MASK  (0xF00000)
40310  #define EMAC_MACSTATUS_TXERRCODE_SHIFT (20)
40311 -#define EMAC_MACSTATUS_TXERRCH_MASK    (0x7)
40312 +#define EMAC_MACSTATUS_TXERRCH_MASK    (0x70000)
40313  #define EMAC_MACSTATUS_TXERRCH_SHIFT   (16)
40314  #define EMAC_MACSTATUS_RXERRCODE_MASK  (0xF000)
40315  #define EMAC_MACSTATUS_RXERRCODE_SHIFT (12)
40316 -#define EMAC_MACSTATUS_RXERRCH_MASK    (0x7)
40317 +#define EMAC_MACSTATUS_RXERRCH_MASK    (0x700)
40318  #define EMAC_MACSTATUS_RXERRCH_SHIFT   (8)
40320  /* EMAC RX register masks */
40321 diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
40322 index c6eb7f2368aa..911b5ef9e680 100644
40323 --- a/drivers/net/ethernet/xilinx/Kconfig
40324 +++ b/drivers/net/ethernet/xilinx/Kconfig
40325 @@ -18,12 +18,14 @@ if NET_VENDOR_XILINX
40327  config XILINX_EMACLITE
40328         tristate "Xilinx 10/100 Ethernet Lite support"
40329 +       depends on HAS_IOMEM
40330         select PHYLIB
40331         help
40332           This driver supports the 10/100 Ethernet Lite from Xilinx.
40334  config XILINX_AXI_EMAC
40335         tristate "Xilinx 10/100/1000 AXI Ethernet support"
40336 +       depends on HAS_IOMEM
40337         select PHYLINK
40338         help
40339           This driver supports the 10/100/1000 Ethernet from Xilinx for the
40340 @@ -31,6 +33,7 @@ config XILINX_AXI_EMAC
40342  config XILINX_LL_TEMAC
40343         tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
40344 +       depends on HAS_IOMEM
40345         select PHYLIB
40346         help
40347           This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
40348 diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
40349 index 0152f1e70783..9defaa21a1a9 100644
40350 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
40351 +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
40352 @@ -1085,7 +1085,7 @@ static int init_queues(struct port *port)
40353         int i;
40355         if (!ports_open) {
40356 -               dma_pool = dma_pool_create(DRV_NAME, port->netdev->dev.parent,
40357 +               dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
40358                                            POOL_ALLOC_SIZE, 32, 0);
40359                 if (!dma_pool)
40360                         return -ENOMEM;
40361 @@ -1435,6 +1435,9 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
40362         ndev->netdev_ops = &ixp4xx_netdev_ops;
40363         ndev->ethtool_ops = &ixp4xx_ethtool_ops;
40364         ndev->tx_queue_len = 100;
40365 +       /* Inherit the DMA masks from the platform device */
40366 +       ndev->dev.dma_mask = dev->dma_mask;
40367 +       ndev->dev.coherent_dma_mask = dev->coherent_dma_mask;
40369         netif_napi_add(ndev, &port->napi, eth_poll, NAPI_WEIGHT);
40371 diff --git a/drivers/net/fddi/Kconfig b/drivers/net/fddi/Kconfig
40372 index f722079dfb6a..f99c1048c97e 100644
40373 --- a/drivers/net/fddi/Kconfig
40374 +++ b/drivers/net/fddi/Kconfig
40375 @@ -40,17 +40,20 @@ config DEFXX
40377  config DEFXX_MMIO
40378         bool
40379 -       prompt "Use MMIO instead of PIO" if PCI || EISA
40380 +       prompt "Use MMIO instead of IOP" if PCI || EISA
40381         depends on DEFXX
40382 -       default n if PCI || EISA
40383 +       default n if EISA
40384         default y
40385         help
40386           This instructs the driver to use EISA or PCI memory-mapped I/O
40387 -         (MMIO) as appropriate instead of programmed I/O ports (PIO).
40388 +         (MMIO) as appropriate instead of programmed I/O ports (IOP).
40389           Enabling this gives an improvement in processing time in parts
40390 -         of the driver, but it may cause problems with EISA (DEFEA)
40391 -         adapters.  TURBOchannel does not have the concept of I/O ports,
40392 -         so MMIO is always used for these (DEFTA) adapters.
40393 +         of the driver, but it requires a memory window to be configured
40394 +         for EISA (DEFEA) adapters that may not always be available.
40395 +         Conversely some PCIe host bridges do not support IOP, so MMIO
40396 +         may be required to access PCI (DEFPA) adapters on downstream PCI
40397 +         buses with some systems.  TURBOchannel does not have the concept
40398 +         of I/O ports, so MMIO is always used for these (DEFTA) adapters.
40400           If unsure, say N.
40402 diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
40403 index 077c68498f04..c7ce6d5491af 100644
40404 --- a/drivers/net/fddi/defxx.c
40405 +++ b/drivers/net/fddi/defxx.c
40406 @@ -495,6 +495,25 @@ static const struct net_device_ops dfx_netdev_ops = {
40407         .ndo_set_mac_address    = dfx_ctl_set_mac_address,
40408  };
40410 +static void dfx_register_res_alloc_err(const char *print_name, bool mmio,
40411 +                                      bool eisa)
40413 +       pr_err("%s: Cannot use %s, no address set, aborting\n",
40414 +              print_name, mmio ? "MMIO" : "I/O");
40415 +       pr_err("%s: Recompile driver with \"CONFIG_DEFXX_MMIO=%c\"\n",
40416 +              print_name, mmio ? 'n' : 'y');
40417 +       if (eisa && mmio)
40418 +               pr_err("%s: Or run ECU and set adapter's MMIO location\n",
40419 +                      print_name);
40422 +static void dfx_register_res_err(const char *print_name, bool mmio,
40423 +                                unsigned long start, unsigned long len)
40425 +       pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, aborting\n",
40426 +              print_name, mmio ? "MMIO" : "I/O", len, start);
40429  /*
40430   * ================
40431   * = dfx_register =
40432 @@ -568,15 +587,12 @@ static int dfx_register(struct device *bdev)
40433         dev_set_drvdata(bdev, dev);
40435         dfx_get_bars(bdev, bar_start, bar_len);
40436 -       if (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0) {
40437 -               pr_err("%s: Cannot use MMIO, no address set, aborting\n",
40438 -                      print_name);
40439 -               pr_err("%s: Run ECU and set adapter's MMIO location\n",
40440 -                      print_name);
40441 -               pr_err("%s: Or recompile driver with \"CONFIG_DEFXX_MMIO=n\""
40442 -                      "\n", print_name);
40443 +       if (bar_len[0] == 0 ||
40444 +           (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0)) {
40445 +               dfx_register_res_alloc_err(print_name, dfx_use_mmio,
40446 +                                          dfx_bus_eisa);
40447                 err = -ENXIO;
40448 -               goto err_out;
40449 +               goto err_out_disable;
40450         }
40452         if (dfx_use_mmio)
40453 @@ -585,18 +601,16 @@ static int dfx_register(struct device *bdev)
40454         else
40455                 region = request_region(bar_start[0], bar_len[0], print_name);
40456         if (!region) {
40457 -               pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, "
40458 -                      "aborting\n", dfx_use_mmio ? "MMIO" : "I/O", print_name,
40459 -                      (long)bar_len[0], (long)bar_start[0]);
40460 +               dfx_register_res_err(print_name, dfx_use_mmio,
40461 +                                    bar_start[0], bar_len[0]);
40462                 err = -EBUSY;
40463                 goto err_out_disable;
40464         }
40465         if (bar_start[1] != 0) {
40466                 region = request_region(bar_start[1], bar_len[1], print_name);
40467                 if (!region) {
40468 -                       pr_err("%s: Cannot reserve I/O resource "
40469 -                              "0x%lx @ 0x%lx, aborting\n", print_name,
40470 -                              (long)bar_len[1], (long)bar_start[1]);
40471 +                       dfx_register_res_err(print_name, 0,
40472 +                                            bar_start[1], bar_len[1]);
40473                         err = -EBUSY;
40474                         goto err_out_csr_region;
40475                 }
40476 @@ -604,9 +618,8 @@ static int dfx_register(struct device *bdev)
40477         if (bar_start[2] != 0) {
40478                 region = request_region(bar_start[2], bar_len[2], print_name);
40479                 if (!region) {
40480 -                       pr_err("%s: Cannot reserve I/O resource "
40481 -                              "0x%lx @ 0x%lx, aborting\n", print_name,
40482 -                              (long)bar_len[2], (long)bar_start[2]);
40483 +                       dfx_register_res_err(print_name, 0,
40484 +                                            bar_start[2], bar_len[2]);
40485                         err = -EBUSY;
40486                         goto err_out_bh_region;
40487                 }
40488 diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
40489 index 42f31c681846..61cd3dd4deab 100644
40490 --- a/drivers/net/geneve.c
40491 +++ b/drivers/net/geneve.c
40492 @@ -891,7 +891,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
40493         __be16 sport;
40494         int err;
40496 -       if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
40497 +       if (!pskb_inet_may_pull(skb))
40498                 return -EINVAL;
40500         sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
40501 @@ -988,7 +988,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
40502         __be16 sport;
40503         int err;
40505 -       if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
40506 +       if (!pskb_inet_may_pull(skb))
40507                 return -EINVAL;
40509         sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
40510 diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
40511 index 390d3403386a..144892060718 100644
40512 --- a/drivers/net/ipa/gsi.c
40513 +++ b/drivers/net/ipa/gsi.c
40514 @@ -211,8 +211,8 @@ static void gsi_irq_setup(struct gsi *gsi)
40515         iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
40517         /* The inter-EE registers are in the non-adjusted address range */
40518 -       iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_CH_IRQ_OFFSET);
40519 -       iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET);
40520 +       iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET);
40521 +       iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET);
40523         iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
40525 diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
40526 index 1622d8cf8dea..48ef04afab79 100644
40527 --- a/drivers/net/ipa/gsi_reg.h
40528 +++ b/drivers/net/ipa/gsi_reg.h
40529 @@ -53,15 +53,15 @@
40530  #define GSI_EE_REG_ADJUST                      0x0000d000      /* IPA v4.5+ */
40532  /* The two inter-EE IRQ register offsets are relative to gsi->virt_raw */
40533 -#define GSI_INTER_EE_SRC_CH_IRQ_OFFSET \
40534 -                       GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(GSI_EE_AP)
40535 -#define GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(ee) \
40536 -                       (0x0000c018 + 0x1000 * (ee))
40538 -#define GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET \
40539 -                       GSI_INTER_EE_N_SRC_EV_CH_IRQ_OFFSET(GSI_EE_AP)
40540 -#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_OFFSET(ee) \
40541 -                       (0x0000c01c + 0x1000 * (ee))
40542 +#define GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET \
40543 +                       GSI_INTER_EE_N_SRC_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
40544 +#define GSI_INTER_EE_N_SRC_CH_IRQ_MSK_OFFSET(ee) \
40545 +                       (0x0000c020 + 0x1000 * (ee))
40547 +#define GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET \
40548 +                       GSI_INTER_EE_N_SRC_EV_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
40549 +#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_MSK_OFFSET(ee) \
40550 +                       (0x0000c024 + 0x1000 * (ee))
40552  /* All other register offsets are relative to gsi->virt */
40553  #define GSI_CH_C_CNTXT_0_OFFSET(ch) \
40554 diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
40555 index 6eac50d4b42f..d453ec016168 100644
40556 --- a/drivers/net/phy/intel-xway.c
40557 +++ b/drivers/net/phy/intel-xway.c
40558 @@ -11,6 +11,18 @@
40560  #define XWAY_MDIO_IMASK                        0x19    /* interrupt mask */
40561  #define XWAY_MDIO_ISTAT                        0x1A    /* interrupt status */
40562 +#define XWAY_MDIO_LED                  0x1B    /* led control */
40564 +/* bit 15:12 are reserved */
40565 +#define XWAY_MDIO_LED_LED3_EN          BIT(11) /* Enable the integrated function of LED3 */
40566 +#define XWAY_MDIO_LED_LED2_EN          BIT(10) /* Enable the integrated function of LED2 */
40567 +#define XWAY_MDIO_LED_LED1_EN          BIT(9)  /* Enable the integrated function of LED1 */
40568 +#define XWAY_MDIO_LED_LED0_EN          BIT(8)  /* Enable the integrated function of LED0 */
40569 +/* bit 7:4 are reserved */
40570 +#define XWAY_MDIO_LED_LED3_DA          BIT(3)  /* Direct Access to LED3 */
40571 +#define XWAY_MDIO_LED_LED2_DA          BIT(2)  /* Direct Access to LED2 */
40572 +#define XWAY_MDIO_LED_LED1_DA          BIT(1)  /* Direct Access to LED1 */
40573 +#define XWAY_MDIO_LED_LED0_DA          BIT(0)  /* Direct Access to LED0 */
40575  #define XWAY_MDIO_INIT_WOL             BIT(15) /* Wake-On-LAN */
40576  #define XWAY_MDIO_INIT_MSRE            BIT(14)
40577 @@ -159,6 +171,15 @@ static int xway_gphy_config_init(struct phy_device *phydev)
40578         /* Clear all pending interrupts */
40579         phy_read(phydev, XWAY_MDIO_ISTAT);
40581 +       /* Ensure that integrated led function is enabled for all leds */
40582 +       err = phy_write(phydev, XWAY_MDIO_LED,
40583 +                       XWAY_MDIO_LED_LED0_EN |
40584 +                       XWAY_MDIO_LED_LED1_EN |
40585 +                       XWAY_MDIO_LED_LED2_EN |
40586 +                       XWAY_MDIO_LED_LED3_EN);
40587 +       if (err)
40588 +               return err;
40590         phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LEDCH,
40591                       XWAY_MMD_LEDCH_NACS_NONE |
40592                       XWAY_MMD_LEDCH_SBF_F02HZ |
40593 diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
40594 index 8018ddf7f316..f86c9ddc609e 100644
40595 --- a/drivers/net/phy/marvell.c
40596 +++ b/drivers/net/phy/marvell.c
40597 @@ -967,22 +967,28 @@ static int m88e1111_get_downshift(struct phy_device *phydev, u8 *data)
40599  static int m88e1111_set_downshift(struct phy_device *phydev, u8 cnt)
40601 -       int val;
40602 +       int val, err;
40604         if (cnt > MII_M1111_PHY_EXT_CR_DOWNSHIFT_MAX)
40605                 return -E2BIG;
40607 -       if (!cnt)
40608 -               return phy_clear_bits(phydev, MII_M1111_PHY_EXT_CR,
40609 -                                     MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN);
40610 +       if (!cnt) {
40611 +               err = phy_clear_bits(phydev, MII_M1111_PHY_EXT_CR,
40612 +                                    MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN);
40613 +       } else {
40614 +               val = MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN;
40615 +               val |= FIELD_PREP(MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK, cnt - 1);
40617 -       val = MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN;
40618 -       val |= FIELD_PREP(MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK, cnt - 1);
40619 +               err = phy_modify(phydev, MII_M1111_PHY_EXT_CR,
40620 +                                MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN |
40621 +                                MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK,
40622 +                                val);
40623 +       }
40625 -       return phy_modify(phydev, MII_M1111_PHY_EXT_CR,
40626 -                         MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN |
40627 -                         MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK,
40628 -                         val);
40629 +       if (err < 0)
40630 +               return err;
40632 +       return genphy_soft_reset(phydev);
40635  static int m88e1111_get_tunable(struct phy_device *phydev,
40636 @@ -1025,22 +1031,28 @@ static int m88e1011_get_downshift(struct phy_device *phydev, u8 *data)
40638  static int m88e1011_set_downshift(struct phy_device *phydev, u8 cnt)
40640 -       int val;
40641 +       int val, err;
40643         if (cnt > MII_M1011_PHY_SCR_DOWNSHIFT_MAX)
40644                 return -E2BIG;
40646 -       if (!cnt)
40647 -               return phy_clear_bits(phydev, MII_M1011_PHY_SCR,
40648 -                                     MII_M1011_PHY_SCR_DOWNSHIFT_EN);
40649 +       if (!cnt) {
40650 +               err = phy_clear_bits(phydev, MII_M1011_PHY_SCR,
40651 +                                    MII_M1011_PHY_SCR_DOWNSHIFT_EN);
40652 +       } else {
40653 +               val = MII_M1011_PHY_SCR_DOWNSHIFT_EN;
40654 +               val |= FIELD_PREP(MII_M1011_PHY_SCR_DOWNSHIFT_MASK, cnt - 1);
40656 -       val = MII_M1011_PHY_SCR_DOWNSHIFT_EN;
40657 -       val |= FIELD_PREP(MII_M1011_PHY_SCR_DOWNSHIFT_MASK, cnt - 1);
40658 +               err = phy_modify(phydev, MII_M1011_PHY_SCR,
40659 +                                MII_M1011_PHY_SCR_DOWNSHIFT_EN |
40660 +                                MII_M1011_PHY_SCR_DOWNSHIFT_MASK,
40661 +                                val);
40662 +       }
40664 -       return phy_modify(phydev, MII_M1011_PHY_SCR,
40665 -                         MII_M1011_PHY_SCR_DOWNSHIFT_EN |
40666 -                         MII_M1011_PHY_SCR_DOWNSHIFT_MASK,
40667 -                         val);
40668 +       if (err < 0)
40669 +               return err;
40671 +       return genphy_soft_reset(phydev);
40674  static int m88e1011_get_tunable(struct phy_device *phydev,
40675 diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
40676 index cc38e326405a..af2e1759b523 100644
40677 --- a/drivers/net/phy/phy_device.c
40678 +++ b/drivers/net/phy/phy_device.c
40679 @@ -273,6 +273,9 @@ static __maybe_unused int mdio_bus_phy_suspend(struct device *dev)
40681         struct phy_device *phydev = to_phy_device(dev);
40683 +       if (phydev->mac_managed_pm)
40684 +               return 0;
40686         /* We must stop the state machine manually, otherwise it stops out of
40687          * control, possibly with the phydev->lock held. Upon resume, netdev
40688          * may call phy routines that try to grab the same lock, and that may
40689 @@ -294,6 +297,9 @@ static __maybe_unused int mdio_bus_phy_resume(struct device *dev)
40690         struct phy_device *phydev = to_phy_device(dev);
40691         int ret;
40693 +       if (phydev->mac_managed_pm)
40694 +               return 0;
40696         if (!phydev->suspended_by_mdio_bus)
40697                 goto no_resume;
40699 diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
40700 index ddb78fb4d6dc..d8cac02a79b9 100644
40701 --- a/drivers/net/phy/smsc.c
40702 +++ b/drivers/net/phy/smsc.c
40703 @@ -185,10 +185,13 @@ static int lan87xx_config_aneg(struct phy_device *phydev)
40704         return genphy_config_aneg(phydev);
40707 -static int lan87xx_config_aneg_ext(struct phy_device *phydev)
40708 +static int lan95xx_config_aneg_ext(struct phy_device *phydev)
40710         int rc;
40712 +       if (phydev->phy_id != 0x0007c0f0) /* not (LAN9500A or LAN9505A) */
40713 +               return lan87xx_config_aneg(phydev);
40715         /* Extend Manual AutoMDIX timer */
40716         rc = phy_read(phydev, PHY_EDPD_CONFIG);
40717         if (rc < 0)
40718 @@ -441,7 +444,7 @@ static struct phy_driver smsc_phy_driver[] = {
40719         .read_status    = lan87xx_read_status,
40720         .config_init    = smsc_phy_config_init,
40721         .soft_reset     = smsc_phy_reset,
40722 -       .config_aneg    = lan87xx_config_aneg_ext,
40723 +       .config_aneg    = lan95xx_config_aneg_ext,
40725         /* IRQ related */
40726         .config_intr    = smsc_phy_config_intr,
40727 diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
40728 index d650b39b6e5d..c1316718304d 100644
40729 --- a/drivers/net/usb/ax88179_178a.c
40730 +++ b/drivers/net/usb/ax88179_178a.c
40731 @@ -296,12 +296,12 @@ static int ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
40732         int ret;
40734         if (2 == size) {
40735 -               u16 buf;
40736 +               u16 buf = 0;
40737                 ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0);
40738                 le16_to_cpus(&buf);
40739                 *((u16 *)data) = buf;
40740         } else if (4 == size) {
40741 -               u32 buf;
40742 +               u32 buf = 0;
40743                 ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 0);
40744                 le32_to_cpus(&buf);
40745                 *((u32 *)data) = buf;
40746 @@ -1296,6 +1296,8 @@ static void ax88179_get_mac_addr(struct usbnet *dev)
40748         u8 mac[ETH_ALEN];
40750 +       memset(mac, 0, sizeof(mac));
40752         /* Maybe the boot loader passed the MAC address via device tree */
40753         if (!eth_platform_get_mac_address(&dev->udev->dev, mac)) {
40754                 netif_dbg(dev, ifup, dev->net,
40755 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
40756 index 9bc58e64b5b7..3ef4b2841402 100644
40757 --- a/drivers/net/usb/hso.c
40758 +++ b/drivers/net/usb/hso.c
40759 @@ -3104,7 +3104,7 @@ static void hso_free_interface(struct usb_interface *interface)
40760                         cancel_work_sync(&serial_table[i]->async_put_intf);
40761                         cancel_work_sync(&serial_table[i]->async_get_intf);
40762                         hso_serial_tty_unregister(serial);
40763 -                       kref_put(&serial_table[i]->ref, hso_serial_ref_free);
40764 +                       kref_put(&serial->parent->ref, hso_serial_ref_free);
40765                 }
40766         }
40768 diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
40769 index e81c5699c952..d2b360cfb402 100644
40770 --- a/drivers/net/usb/lan78xx.c
40771 +++ b/drivers/net/usb/lan78xx.c
40772 @@ -2655,7 +2655,7 @@ static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
40773         while (!skb_queue_empty(&dev->rxq) &&
40774                !skb_queue_empty(&dev->txq) &&
40775                !skb_queue_empty(&dev->done)) {
40776 -               schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
40777 +               schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS));
40778                 set_current_state(TASK_UNINTERRUPTIBLE);
40779                 netif_dbg(dev, ifdown, dev->net,
40780                           "waited for %d urb completions\n", temp);
40781 diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
40782 index f4f37ecfed58..36647378e016 100644
40783 --- a/drivers/net/usb/usbnet.c
40784 +++ b/drivers/net/usb/usbnet.c
40785 @@ -764,7 +764,7 @@ static void wait_skb_queue_empty(struct sk_buff_head *q)
40786         spin_lock_irqsave(&q->lock, flags);
40787         while (!skb_queue_empty(q)) {
40788                 spin_unlock_irqrestore(&q->lock, flags);
40789 -               schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
40790 +               schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS));
40791                 set_current_state(TASK_UNINTERRUPTIBLE);
40792                 spin_lock_irqsave(&q->lock, flags);
40793         }
40794 diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
40795 index 4d9dc7d15908..0720f5f92caa 100644
40796 --- a/drivers/net/wan/hdlc_fr.c
40797 +++ b/drivers/net/wan/hdlc_fr.c
40798 @@ -415,7 +415,7 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
40800                 if (pad > 0) { /* Pad the frame with zeros */
40801                         if (__skb_pad(skb, pad, false))
40802 -                               goto out;
40803 +                               goto drop;
40804                         skb_put(skb, pad);
40805                 }
40806         }
40807 @@ -448,9 +448,8 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
40808         return NETDEV_TX_OK;
40810  drop:
40811 -       kfree_skb(skb);
40812 -out:
40813         dev->stats.tx_dropped++;
40814 +       kfree_skb(skb);
40815         return NETDEV_TX_OK;
40818 diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
40819 index c3372498f4f1..8fda0446ff71 100644
40820 --- a/drivers/net/wan/lapbether.c
40821 +++ b/drivers/net/wan/lapbether.c
40822 @@ -51,6 +51,8 @@ struct lapbethdev {
40823         struct list_head        node;
40824         struct net_device       *ethdev;        /* link to ethernet device */
40825         struct net_device       *axdev;         /* lapbeth device (lapb#) */
40826 +       bool                    up;
40827 +       spinlock_t              up_lock;        /* Protects "up" */
40828  };
40830  static LIST_HEAD(lapbeth_devices);
40831 @@ -101,8 +103,9 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
40832         rcu_read_lock();
40833         lapbeth = lapbeth_get_x25_dev(dev);
40834         if (!lapbeth)
40835 -               goto drop_unlock;
40836 -       if (!netif_running(lapbeth->axdev))
40837 +               goto drop_unlock_rcu;
40838 +       spin_lock_bh(&lapbeth->up_lock);
40839 +       if (!lapbeth->up)
40840                 goto drop_unlock;
40842         len = skb->data[0] + skb->data[1] * 256;
40843 @@ -117,11 +120,14 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
40844                 goto drop_unlock;
40845         }
40846  out:
40847 +       spin_unlock_bh(&lapbeth->up_lock);
40848         rcu_read_unlock();
40849         return 0;
40850  drop_unlock:
40851         kfree_skb(skb);
40852         goto out;
40853 +drop_unlock_rcu:
40854 +       rcu_read_unlock();
40855  drop:
40856         kfree_skb(skb);
40857         return 0;
40858 @@ -151,13 +157,11 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
40859  static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
40860                                       struct net_device *dev)
40862 +       struct lapbethdev *lapbeth = netdev_priv(dev);
40863         int err;
40865 -       /*
40866 -        * Just to be *really* sure not to send anything if the interface
40867 -        * is down, the ethernet device may have gone.
40868 -        */
40869 -       if (!netif_running(dev))
40870 +       spin_lock_bh(&lapbeth->up_lock);
40871 +       if (!lapbeth->up)
40872                 goto drop;
40874         /* There should be a pseudo header of 1 byte added by upper layers.
40875 @@ -194,6 +198,7 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
40876                 goto drop;
40877         }
40878  out:
40879 +       spin_unlock_bh(&lapbeth->up_lock);
40880         return NETDEV_TX_OK;
40881  drop:
40882         kfree_skb(skb);
40883 @@ -285,6 +290,7 @@ static const struct lapb_register_struct lapbeth_callbacks = {
40884   */
40885  static int lapbeth_open(struct net_device *dev)
40887 +       struct lapbethdev *lapbeth = netdev_priv(dev);
40888         int err;
40890         if ((err = lapb_register(dev, &lapbeth_callbacks)) != LAPB_OK) {
40891 @@ -292,13 +298,22 @@ static int lapbeth_open(struct net_device *dev)
40892                 return -ENODEV;
40893         }
40895 +       spin_lock_bh(&lapbeth->up_lock);
40896 +       lapbeth->up = true;
40897 +       spin_unlock_bh(&lapbeth->up_lock);
40899         return 0;
40902  static int lapbeth_close(struct net_device *dev)
40904 +       struct lapbethdev *lapbeth = netdev_priv(dev);
40905         int err;
40907 +       spin_lock_bh(&lapbeth->up_lock);
40908 +       lapbeth->up = false;
40909 +       spin_unlock_bh(&lapbeth->up_lock);
40911         if ((err = lapb_unregister(dev)) != LAPB_OK)
40912                 pr_err("lapb_unregister error: %d\n", err);
40914 @@ -356,6 +371,9 @@ static int lapbeth_new_device(struct net_device *dev)
40915         dev_hold(dev);
40916         lapbeth->ethdev = dev;
40918 +       lapbeth->up = false;
40919 +       spin_lock_init(&lapbeth->up_lock);
40921         rc = -EIO;
40922         if (register_netdevice(ndev))
40923                 goto fail;
40924 diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
40925 index 0a37be6a7d33..fab398046a3f 100644
40926 --- a/drivers/net/wireless/ath/ath10k/htc.c
40927 +++ b/drivers/net/wireless/ath/ath10k/htc.c
40928 @@ -669,7 +669,7 @@ static int ath10k_htc_send_bundle(struct ath10k_htc_ep *ep,
40930         ath10k_dbg(ar, ATH10K_DBG_HTC,
40931                    "bundle tx status %d eid %d req count %d count %d len %d\n",
40932 -                  ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, bundle_skb->len);
40933 +                  ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, skb_len);
40934         return ret;
40937 diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
40938 index d97b33f789e4..7efbe03fbca8 100644
40939 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
40940 +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
40941 @@ -592,6 +592,9 @@ static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
40942                                         GFP_ATOMIC
40943                                         );
40944                 break;
40945 +       default:
40946 +               kfree(tb);
40947 +               return;
40948         }
40950  exit:
40951 diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
40952 index cccfd3bd4d27..ca5cda890d58 100644
40953 --- a/drivers/net/wireless/ath/ath11k/wmi.c
40954 +++ b/drivers/net/wireless/ath/ath11k/wmi.c
40955 @@ -5417,31 +5417,6 @@ int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
40956         return 0;
40959 -static int
40960 -ath11k_pull_pdev_temp_ev(struct ath11k_base *ab, u8 *evt_buf,
40961 -                        u32 len, const struct wmi_pdev_temperature_event *ev)
40963 -       const void **tb;
40964 -       int ret;
40966 -       tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
40967 -       if (IS_ERR(tb)) {
40968 -               ret = PTR_ERR(tb);
40969 -               ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
40970 -               return ret;
40971 -       }
40973 -       ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
40974 -       if (!ev) {
40975 -               ath11k_warn(ab, "failed to fetch pdev temp ev");
40976 -               kfree(tb);
40977 -               return -EPROTO;
40978 -       }
40980 -       kfree(tb);
40981 -       return 0;
40984  size_t ath11k_wmi_fw_stats_num_vdevs(struct list_head *head)
40986         struct ath11k_fw_stats_vdev *i;
40987 @@ -6849,23 +6824,37 @@ ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
40988                                   struct sk_buff *skb)
40990         struct ath11k *ar;
40991 -       struct wmi_pdev_temperature_event ev = {0};
40992 +       const void **tb;
40993 +       const struct wmi_pdev_temperature_event *ev;
40994 +       int ret;
40996 +       tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
40997 +       if (IS_ERR(tb)) {
40998 +               ret = PTR_ERR(tb);
40999 +               ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
41000 +               return;
41001 +       }
41003 -       if (ath11k_pull_pdev_temp_ev(ab, skb->data, skb->len, &ev) != 0) {
41004 -               ath11k_warn(ab, "failed to extract pdev temperature event");
41005 +       ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
41006 +       if (!ev) {
41007 +               ath11k_warn(ab, "failed to fetch pdev temp ev");
41008 +               kfree(tb);
41009                 return;
41010         }
41012         ath11k_dbg(ab, ATH11K_DBG_WMI,
41013 -                  "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
41014 +                  "pdev temperature ev temp %d pdev_id %d\n", ev->temp, ev->pdev_id);
41016 -       ar = ath11k_mac_get_ar_by_pdev_id(ab, ev.pdev_id);
41017 +       ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
41018         if (!ar) {
41019 -               ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
41020 +               ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev->pdev_id);
41021 +               kfree(tb);
41022                 return;
41023         }
41025 -       ath11k_thermal_event_temperature(ar, ev.temp);
41026 +       ath11k_thermal_event_temperature(ar, ev->temp);
41028 +       kfree(tb);
41031  static void ath11k_fils_discovery_event(struct ath11k_base *ab,
41032 diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
41033 index db0c6fa9c9dc..ff61ae34ecdf 100644
41034 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
41035 +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
41036 @@ -246,7 +246,7 @@ static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
41037         if (unlikely(r)) {
41038                 ath_dbg(common, WMI, "REGISTER READ FAILED: (0x%04x, %d)\n",
41039                         reg_offset, r);
41040 -               return -EIO;
41041 +               return -1;
41042         }
41044         return be32_to_cpu(val);
41045 diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
41046 index 5abc2a5526ec..2ca3b86714a9 100644
41047 --- a/drivers/net/wireless/ath/ath9k/hw.c
41048 +++ b/drivers/net/wireless/ath/ath9k/hw.c
41049 @@ -286,7 +286,7 @@ static bool ath9k_hw_read_revisions(struct ath_hw *ah)
41051         srev = REG_READ(ah, AR_SREV);
41053 -       if (srev == -EIO) {
41054 +       if (srev == -1) {
41055                 ath_err(ath9k_hw_common(ah),
41056                         "Failed to read SREV register");
41057                 return false;
41058 diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
41059 index 60db38c38960..fd37d4d2983b 100644
41060 --- a/drivers/net/wireless/cisco/airo.c
41061 +++ b/drivers/net/wireless/cisco/airo.c
41062 @@ -3817,6 +3817,68 @@ static inline void set_auth_type(struct airo_info *local, int auth_type)
41063                 local->last_auth = auth_type;
41066 +static int noinline_for_stack airo_readconfig(struct airo_info *ai, u8 *mac, int lock)
41068 +       int i, status;
41069 +       /* large variables, so don't inline this function,
41070 +        * maybe change to kmalloc
41071 +        */
41072 +       tdsRssiRid rssi_rid;
41073 +       CapabilityRid cap_rid;
41075 +       kfree(ai->SSID);
41076 +       ai->SSID = NULL;
41077 +       // general configuration (read/modify/write)
41078 +       status = readConfigRid(ai, lock);
41079 +       if (status != SUCCESS) return ERROR;
41081 +       status = readCapabilityRid(ai, &cap_rid, lock);
41082 +       if (status != SUCCESS) return ERROR;
41084 +       status = PC4500_readrid(ai, RID_RSSI, &rssi_rid, sizeof(rssi_rid), lock);
41085 +       if (status == SUCCESS) {
41086 +               if (ai->rssi || (ai->rssi = kmalloc(512, GFP_KERNEL)) != NULL)
41087 +                       memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512); /* Skip RID length member */
41088 +       }
41089 +       else {
41090 +               kfree(ai->rssi);
41091 +               ai->rssi = NULL;
41092 +               if (cap_rid.softCap & cpu_to_le16(8))
41093 +                       ai->config.rmode |= RXMODE_NORMALIZED_RSSI;
41094 +               else
41095 +                       airo_print_warn(ai->dev->name, "unknown received signal "
41096 +                                       "level scale");
41097 +       }
41098 +       ai->config.opmode = adhoc ? MODE_STA_IBSS : MODE_STA_ESS;
41099 +       set_auth_type(ai, AUTH_OPEN);
41100 +       ai->config.modulation = MOD_CCK;
41102 +       if (le16_to_cpu(cap_rid.len) >= sizeof(cap_rid) &&
41103 +           (cap_rid.extSoftCap & cpu_to_le16(1)) &&
41104 +           micsetup(ai) == SUCCESS) {
41105 +               ai->config.opmode |= MODE_MIC;
41106 +               set_bit(FLAG_MIC_CAPABLE, &ai->flags);
41107 +       }
41109 +       /* Save off the MAC */
41110 +       for (i = 0; i < ETH_ALEN; i++) {
41111 +               mac[i] = ai->config.macAddr[i];
41112 +       }
41114 +       /* Check to see if there are any insmod configured
41115 +          rates to add */
41116 +       if (rates[0]) {
41117 +               memset(ai->config.rates, 0, sizeof(ai->config.rates));
41118 +               for (i = 0; i < 8 && rates[i]; i++) {
41119 +                       ai->config.rates[i] = rates[i];
41120 +               }
41121 +       }
41122 +       set_bit (FLAG_COMMIT, &ai->flags);
41124 +       return SUCCESS;
41128  static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
41130         Cmd cmd;
41131 @@ -3863,58 +3925,9 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
41132         if (lock)
41133                 up(&ai->sem);
41134         if (ai->config.len == 0) {
41135 -               int i;
41136 -               tdsRssiRid rssi_rid;
41137 -               CapabilityRid cap_rid;
41139 -               kfree(ai->SSID);
41140 -               ai->SSID = NULL;
41141 -               // general configuration (read/modify/write)
41142 -               status = readConfigRid(ai, lock);
41143 -               if (status != SUCCESS) return ERROR;
41145 -               status = readCapabilityRid(ai, &cap_rid, lock);
41146 -               if (status != SUCCESS) return ERROR;
41148 -               status = PC4500_readrid(ai, RID_RSSI,&rssi_rid, sizeof(rssi_rid), lock);
41149 -               if (status == SUCCESS) {
41150 -                       if (ai->rssi || (ai->rssi = kmalloc(512, GFP_KERNEL)) != NULL)
41151 -                               memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512); /* Skip RID length member */
41152 -               }
41153 -               else {
41154 -                       kfree(ai->rssi);
41155 -                       ai->rssi = NULL;
41156 -                       if (cap_rid.softCap & cpu_to_le16(8))
41157 -                               ai->config.rmode |= RXMODE_NORMALIZED_RSSI;
41158 -                       else
41159 -                               airo_print_warn(ai->dev->name, "unknown received signal "
41160 -                                               "level scale");
41161 -               }
41162 -               ai->config.opmode = adhoc ? MODE_STA_IBSS : MODE_STA_ESS;
41163 -               set_auth_type(ai, AUTH_OPEN);
41164 -               ai->config.modulation = MOD_CCK;
41166 -               if (le16_to_cpu(cap_rid.len) >= sizeof(cap_rid) &&
41167 -                   (cap_rid.extSoftCap & cpu_to_le16(1)) &&
41168 -                   micsetup(ai) == SUCCESS) {
41169 -                       ai->config.opmode |= MODE_MIC;
41170 -                       set_bit(FLAG_MIC_CAPABLE, &ai->flags);
41171 -               }
41173 -               /* Save off the MAC */
41174 -               for (i = 0; i < ETH_ALEN; i++) {
41175 -                       mac[i] = ai->config.macAddr[i];
41176 -               }
41178 -               /* Check to see if there are any insmod configured
41179 -                  rates to add */
41180 -               if (rates[0]) {
41181 -                       memset(ai->config.rates, 0, sizeof(ai->config.rates));
41182 -                       for (i = 0; i < 8 && rates[i]; i++) {
41183 -                               ai->config.rates[i] = rates[i];
41184 -                       }
41185 -               }
41186 -               set_bit (FLAG_COMMIT, &ai->flags);
41187 +               status = airo_readconfig(ai, mac, lock);
41188 +               if (status != SUCCESS)
41189 +                       return ERROR;
41190         }
41192         /* Setup the SSIDs if present */
41193 diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
41194 index 23fbddd0c1f8..534ab3b894e2 100644
41195 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
41196 +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
41197 @@ -815,7 +815,7 @@ static int ipw2100_hw_send_command(struct ipw2100_priv *priv,
41198          * doesn't seem to have as many firmware restart cycles...
41199          *
41200          * As a test, we're sticking in a 1/100s delay here */
41201 -       schedule_timeout_uninterruptible(msecs_to_jiffies(10));
41202 +       schedule_msec_hrtimeout_uninterruptible((10));
41204         return 0;
41206 @@ -1266,7 +1266,7 @@ static int ipw2100_start_adapter(struct ipw2100_priv *priv)
41207         IPW_DEBUG_FW("Waiting for f/w initialization to complete...\n");
41208         i = 5000;
41209         do {
41210 -               schedule_timeout_uninterruptible(msecs_to_jiffies(40));
41211 +               schedule_msec_hrtimeout_uninterruptible((40));
41212                 /* Todo... wait for sync command ... */
41214                 read_register(priv->net_dev, IPW_REG_INTA, &inta);
41215 diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
41216 index a0cf78c418ac..903de34028ef 100644
41217 --- a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
41218 +++ b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
41219 @@ -633,8 +633,10 @@ int libipw_wx_set_encodeext(struct libipw_device *ieee,
41220         }
41222         if (ext->alg != IW_ENCODE_ALG_NONE) {
41223 -               memcpy(sec.keys[idx], ext->key, ext->key_len);
41224 -               sec.key_sizes[idx] = ext->key_len;
41225 +               int key_len = clamp_val(ext->key_len, 0, SCM_KEY_LEN);
41227 +               memcpy(sec.keys[idx], ext->key, key_len);
41228 +               sec.key_sizes[idx] = key_len;
41229                 sec.flags |= (1 << idx);
41230                 if (ext->alg == IW_ENCODE_ALG_WEP) {
41231                         sec.encode_alg[idx] = SEC_ALG_WEP;
41232 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
41233 index 579bc81cc0ae..4cd8c39cc3e9 100644
41234 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
41235 +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
41236 @@ -1,6 +1,6 @@
41237  // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
41238  /*
41239 - * Copyright (C) 2018-2020 Intel Corporation
41240 + * Copyright (C) 2018-2021 Intel Corporation
41241   */
41242  #include <linux/firmware.h>
41243  #include "iwl-drv.h"
41244 @@ -426,7 +426,8 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
41245         const struct firmware *fw;
41246         int res;
41248 -       if (!iwlwifi_mod_params.enable_ini)
41249 +       if (!iwlwifi_mod_params.enable_ini ||
41250 +           trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_9000)
41251                 return;
41253         res = firmware_request_nowarn(&fw, "iwl-debug-yoyo.bin", dev);
41254 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
41255 index 60e0db4a5e20..9236f9106826 100644
41256 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
41257 +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
41258 @@ -2,7 +2,7 @@
41259  /*
41260   * Copyright (C) 2015 Intel Mobile Communications GmbH
41261   * Copyright (C) 2016-2017 Intel Deutschland GmbH
41262 - * Copyright (C) 2019-2020 Intel Corporation
41263 + * Copyright (C) 2019-2021 Intel Corporation
41264   */
41265  #include <linux/kernel.h>
41266  #include <linux/bsearch.h>
41267 @@ -21,7 +21,6 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
41268                                   const struct iwl_cfg_trans_params *cfg_trans)
41270         struct iwl_trans *trans;
41271 -       int txcmd_size, txcmd_align;
41272  #ifdef CONFIG_LOCKDEP
41273         static struct lock_class_key __key;
41274  #endif
41275 @@ -31,10 +30,40 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
41276                 return NULL;
41278         trans->trans_cfg = cfg_trans;
41279 -       if (!cfg_trans->gen2) {
41281 +#ifdef CONFIG_LOCKDEP
41282 +       lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
41283 +                        &__key, 0);
41284 +#endif
41286 +       trans->dev = dev;
41287 +       trans->ops = ops;
41288 +       trans->num_rx_queues = 1;
41290 +       WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty);
41292 +       if (trans->trans_cfg->use_tfh) {
41293 +               trans->txqs.tfd.addr_size = 64;
41294 +               trans->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
41295 +               trans->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
41296 +       } else {
41297 +               trans->txqs.tfd.addr_size = 36;
41298 +               trans->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
41299 +               trans->txqs.tfd.size = sizeof(struct iwl_tfd);
41300 +       }
41301 +       trans->max_skb_frags = IWL_TRANS_MAX_FRAGS(trans);
41303 +       return trans;
41306 +int iwl_trans_init(struct iwl_trans *trans)
41308 +       int txcmd_size, txcmd_align;
41310 +       if (!trans->trans_cfg->gen2) {
41311                 txcmd_size = sizeof(struct iwl_tx_cmd);
41312                 txcmd_align = sizeof(void *);
41313 -       } else if (cfg_trans->device_family < IWL_DEVICE_FAMILY_AX210) {
41314 +       } else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
41315                 txcmd_size = sizeof(struct iwl_tx_cmd_gen2);
41316                 txcmd_align = 64;
41317         } else {
41318 @@ -46,17 +75,8 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
41319         txcmd_size += 36; /* biggest possible 802.11 header */
41321         /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */
41322 -       if (WARN_ON(cfg_trans->gen2 && txcmd_size >= txcmd_align))
41323 -               return ERR_PTR(-EINVAL);
41325 -#ifdef CONFIG_LOCKDEP
41326 -       lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
41327 -                        &__key, 0);
41328 -#endif
41330 -       trans->dev = dev;
41331 -       trans->ops = ops;
41332 -       trans->num_rx_queues = 1;
41333 +       if (WARN_ON(trans->trans_cfg->gen2 && txcmd_size >= txcmd_align))
41334 +               return -EINVAL;
41336         if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
41337                 trans->txqs.bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl);
41338 @@ -68,23 +88,16 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
41339          * allocate here.
41340          */
41341         if (trans->trans_cfg->gen2) {
41342 -               trans->txqs.bc_pool = dmam_pool_create("iwlwifi:bc", dev,
41343 +               trans->txqs.bc_pool = dmam_pool_create("iwlwifi:bc", trans->dev,
41344                                                        trans->txqs.bc_tbl_size,
41345                                                        256, 0);
41346                 if (!trans->txqs.bc_pool)
41347 -                       return NULL;
41348 +                       return -ENOMEM;
41349         }
41351 -       if (trans->trans_cfg->use_tfh) {
41352 -               trans->txqs.tfd.addr_size = 64;
41353 -               trans->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
41354 -               trans->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
41355 -       } else {
41356 -               trans->txqs.tfd.addr_size = 36;
41357 -               trans->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
41358 -               trans->txqs.tfd.size = sizeof(struct iwl_tfd);
41359 -       }
41360 -       trans->max_skb_frags = IWL_TRANS_MAX_FRAGS(trans);
41361 +       /* Some things must not change even if the config does */
41362 +       WARN_ON(trans->txqs.tfd.addr_size !=
41363 +               (trans->trans_cfg->use_tfh ? 64 : 36));
41365         snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
41366                  "iwl_cmd_pool:%s", dev_name(trans->dev));
41367 @@ -93,35 +106,35 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
41368                                   txcmd_size, txcmd_align,
41369                                   SLAB_HWCACHE_ALIGN, NULL);
41370         if (!trans->dev_cmd_pool)
41371 -               return NULL;
41373 -       WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty);
41374 +               return -ENOMEM;
41376         trans->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
41377         if (!trans->txqs.tso_hdr_page) {
41378                 kmem_cache_destroy(trans->dev_cmd_pool);
41379 -               return NULL;
41380 +               return -ENOMEM;
41381         }
41383         /* Initialize the wait queue for commands */
41384         init_waitqueue_head(&trans->wait_command_queue);
41386 -       return trans;
41387 +       return 0;
41390  void iwl_trans_free(struct iwl_trans *trans)
41392         int i;
41394 -       for_each_possible_cpu(i) {
41395 -               struct iwl_tso_hdr_page *p =
41396 -                       per_cpu_ptr(trans->txqs.tso_hdr_page, i);
41397 +       if (trans->txqs.tso_hdr_page) {
41398 +               for_each_possible_cpu(i) {
41399 +                       struct iwl_tso_hdr_page *p =
41400 +                               per_cpu_ptr(trans->txqs.tso_hdr_page, i);
41402 -               if (p->page)
41403 -                       __free_page(p->page);
41404 -       }
41405 +                       if (p && p->page)
41406 +                               __free_page(p->page);
41407 +               }
41409 -       free_percpu(trans->txqs.tso_hdr_page);
41410 +               free_percpu(trans->txqs.tso_hdr_page);
41411 +       }
41413         kmem_cache_destroy(trans->dev_cmd_pool);
41415 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
41416 index 4a5822c1be13..3e0df6fbb642 100644
41417 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
41418 +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
41419 @@ -1438,6 +1438,7 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
41420                           struct device *dev,
41421                           const struct iwl_trans_ops *ops,
41422                           const struct iwl_cfg_trans_params *cfg_trans);
41423 +int iwl_trans_init(struct iwl_trans *trans);
41424  void iwl_trans_free(struct iwl_trans *trans);
41426  /*****************************************************
41427 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
41428 index 8772b65c9dab..2d58cb969918 100644
41429 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
41430 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
41431 @@ -1,7 +1,7 @@
41432  // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
41433  /*
41434   * Copyright (C) 2017 Intel Deutschland GmbH
41435 - * Copyright (C) 2018-2020 Intel Corporation
41436 + * Copyright (C) 2018-2021 Intel Corporation
41437   */
41438  #include "rs.h"
41439  #include "fw-api.h"
41440 @@ -72,19 +72,15 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
41441         bool vht_ena = vht_cap->vht_supported;
41442         u16 flags = 0;
41444 +       /* get STBC flags */
41445         if (mvm->cfg->ht_params->stbc &&
41446             (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1)) {
41447 -               if (he_cap->has_he) {
41448 -                       if (he_cap->he_cap_elem.phy_cap_info[2] &
41449 -                           IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
41450 -                               flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
41452 -                       if (he_cap->he_cap_elem.phy_cap_info[7] &
41453 -                           IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ)
41454 -                               flags |= IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK;
41455 -               } else if ((ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) ||
41456 -                          (vht_ena &&
41457 -                           (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)))
41458 +               if (he_cap->has_he && he_cap->he_cap_elem.phy_cap_info[2] &
41459 +                                     IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
41460 +                       flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
41461 +               else if (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
41462 +                       flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
41463 +               else if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)
41464                         flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
41465         }
41467 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
41468 index 558a0b2ef0fc..66faf7914bd8 100644
41469 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
41470 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
41471 @@ -17,10 +17,20 @@
41472  #include "iwl-prph.h"
41473  #include "internal.h"
41475 +#define TRANS_CFG_MARKER BIT(0)
41476 +#define _IS_A(cfg, _struct) __builtin_types_compatible_p(typeof(cfg),  \
41477 +                                                        struct _struct)
41478 +extern int _invalid_type;
41479 +#define _TRANS_CFG_MARKER(cfg)                                         \
41480 +       (__builtin_choose_expr(_IS_A(cfg, iwl_cfg_trans_params),        \
41481 +                              TRANS_CFG_MARKER,                        \
41482 +        __builtin_choose_expr(_IS_A(cfg, iwl_cfg), 0, _invalid_type)))
41483 +#define _ASSIGN_CFG(cfg) (_TRANS_CFG_MARKER(cfg) + (kernel_ulong_t)&(cfg))
41485  #define IWL_PCI_DEVICE(dev, subdev, cfg) \
41486         .vendor = PCI_VENDOR_ID_INTEL,  .device = (dev), \
41487         .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
41488 -       .driver_data = (kernel_ulong_t)&(cfg)
41489 +       .driver_data = _ASSIGN_CFG(cfg)
41491  /* Hardware specific file defines the PCI IDs table for that hardware module */
41492  static const struct pci_device_id iwl_hw_card_ids[] = {
41493 @@ -1075,19 +1085,22 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
41495  static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
41497 -       const struct iwl_cfg_trans_params *trans =
41498 -               (struct iwl_cfg_trans_params *)(ent->driver_data);
41499 +       const struct iwl_cfg_trans_params *trans;
41500         const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
41501         struct iwl_trans *iwl_trans;
41502         struct iwl_trans_pcie *trans_pcie;
41503         int i, ret;
41504 +       const struct iwl_cfg *cfg;
41506 +       trans = (void *)(ent->driver_data & ~TRANS_CFG_MARKER);
41508         /*
41509          * This is needed for backwards compatibility with the old
41510          * tables, so we don't need to change all the config structs
41511          * at the same time.  The cfg is used to compare with the old
41512          * full cfg structs.
41513          */
41514 -       const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
41515 +       cfg = (void *)(ent->driver_data & ~TRANS_CFG_MARKER);
41517         /* make sure trans is the first element in iwl_cfg */
41518         BUILD_BUG_ON(offsetof(struct iwl_cfg, trans));
41519 @@ -1202,11 +1215,19 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
41521  #endif
41522         /*
41523 -        * If we didn't set the cfg yet, assume the trans is actually
41524 -        * a full cfg from the old tables.
41525 +        * If we didn't set the cfg yet, the PCI ID table entry should have
41526 +        * been a full config - if yes, use it, otherwise fail.
41527          */
41528 -       if (!iwl_trans->cfg)
41529 +       if (!iwl_trans->cfg) {
41530 +               if (ent->driver_data & TRANS_CFG_MARKER) {
41531 +                       pr_err("No config found for PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n",
41532 +                              pdev->device, pdev->subsystem_device,
41533 +                              iwl_trans->hw_rev, iwl_trans->hw_rf_id);
41534 +                       ret = -EINVAL;
41535 +                       goto out_free_trans;
41536 +               }
41537                 iwl_trans->cfg = cfg;
41538 +       }
41540         /* if we don't have a name yet, copy name from the old cfg */
41541         if (!iwl_trans->name)
41542 @@ -1222,6 +1243,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
41543                 trans_pcie->num_rx_bufs = RX_QUEUE_SIZE;
41544         }
41546 +       ret = iwl_trans_init(iwl_trans);
41547 +       if (ret)
41548 +               goto out_free_trans;
41550         pci_set_drvdata(pdev, iwl_trans);
41551         iwl_trans->drv = iwl_drv_start(iwl_trans);
41553 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
41554 index 94ffc1ae484d..af9412bd697e 100644
41555 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
41556 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
41557 @@ -1,7 +1,7 @@
41558  // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
41559  /*
41560   * Copyright (C) 2017 Intel Deutschland GmbH
41561 - * Copyright (C) 2018-2020 Intel Corporation
41562 + * Copyright (C) 2018-2021 Intel Corporation
41563   */
41564  #include "iwl-trans.h"
41565  #include "iwl-prph.h"
41566 @@ -143,7 +143,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
41567         if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
41568                 IWL_DEBUG_INFO(trans,
41569                                "DEVICE_ENABLED bit was set and is now cleared\n");
41570 -               iwl_txq_gen2_tx_stop(trans);
41571 +               iwl_txq_gen2_tx_free(trans);
41572                 iwl_pcie_rx_stop(trans);
41573         }
41575 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
41576 index 4456abb9a074..34bde8c87324 100644
41577 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
41578 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
41579 @@ -40,6 +40,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
41580         const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
41581         u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
41582         struct iwl_tfh_tfd *tfd;
41583 +       unsigned long flags;
41585         copy_size = sizeof(struct iwl_cmd_header_wide);
41586         cmd_size = sizeof(struct iwl_cmd_header_wide);
41587 @@ -108,14 +109,14 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
41588                 goto free_dup_buf;
41589         }
41591 -       spin_lock_bh(&txq->lock);
41592 +       spin_lock_irqsave(&txq->lock, flags);
41594         idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
41595         tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
41596         memset(tfd, 0, sizeof(*tfd));
41598         if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
41599 -               spin_unlock_bh(&txq->lock);
41600 +               spin_unlock_irqrestore(&txq->lock, flags);
41602                 IWL_ERR(trans, "No space in command queue\n");
41603                 iwl_op_mode_cmd_queue_full(trans->op_mode);
41604 @@ -250,7 +251,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
41605         spin_unlock(&trans_pcie->reg_lock);
41607  out:
41608 -       spin_unlock_bh(&txq->lock);
41609 +       spin_unlock_irqrestore(&txq->lock, flags);
41610  free_dup_buf:
41611         if (idx < 0)
41612                 kfree(dup_buf);
41613 diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
41614 index 833f43d1ca7a..810dcb3df242 100644
41615 --- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c
41616 +++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
41617 @@ -13,30 +13,6 @@
41618  #include "iwl-scd.h"
41619  #include <linux/dmapool.h>
41622 - * iwl_txq_gen2_tx_stop - Stop all Tx DMA channels
41623 - */
41624 -void iwl_txq_gen2_tx_stop(struct iwl_trans *trans)
41626 -       int txq_id;
41628 -       /*
41629 -        * This function can be called before the op_mode disabled the
41630 -        * queues. This happens when we have an rfkill interrupt.
41631 -        * Since we stop Tx altogether - mark the queues as stopped.
41632 -        */
41633 -       memset(trans->txqs.queue_stopped, 0,
41634 -              sizeof(trans->txqs.queue_stopped));
41635 -       memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
41637 -       /* Unmap DMA from host system and free skb's */
41638 -       for (txq_id = 0; txq_id < ARRAY_SIZE(trans->txqs.txq); txq_id++) {
41639 -               if (!trans->txqs.txq[txq_id])
41640 -                       continue;
41641 -               iwl_txq_gen2_unmap(trans, txq_id);
41642 -       }
41645  /*
41646   * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array
41647   */
41648 @@ -1189,6 +1165,12 @@ static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
41649                 goto error_free_resp;
41650         }
41652 +       if (WARN_ONCE(trans->txqs.txq[qid],
41653 +                     "queue %d already allocated\n", qid)) {
41654 +               ret = -EIO;
41655 +               goto error_free_resp;
41656 +       }
41658         txq->id = qid;
41659         trans->txqs.txq[qid] = txq;
41660         wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
41661 diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
41662 index af1dbdf5617a..20efc62acf13 100644
41663 --- a/drivers/net/wireless/intel/iwlwifi/queue/tx.h
41664 +++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
41665 @@ -1,6 +1,6 @@
41666  /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
41667  /*
41668 - * Copyright (C) 2020 Intel Corporation
41669 + * Copyright (C) 2020-2021 Intel Corporation
41670   */
41671  #ifndef __iwl_trans_queue_tx_h__
41672  #define __iwl_trans_queue_tx_h__
41673 @@ -123,7 +123,6 @@ int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
41674  void iwl_txq_dyn_free(struct iwl_trans *trans, int queue);
41675  void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
41676  void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
41677 -void iwl_txq_gen2_tx_stop(struct iwl_trans *trans);
41678  void iwl_txq_gen2_tx_free(struct iwl_trans *trans);
41679  int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
41680                  bool cmd_queue);
41681 diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
41682 index c9f8c056aa51..84b32a5f01ee 100644
41683 --- a/drivers/net/wireless/marvell/mwl8k.c
41684 +++ b/drivers/net/wireless/marvell/mwl8k.c
41685 @@ -1473,6 +1473,7 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
41686         if (txq->skb == NULL) {
41687                 dma_free_coherent(&priv->pdev->dev, size, txq->txd,
41688                                   txq->txd_dma);
41689 +               txq->txd = NULL;
41690                 return -ENOMEM;
41691         }
41693 diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
41694 index 2f27c43ad76d..7196fa9047e6 100644
41695 --- a/drivers/net/wireless/mediatek/mt76/dma.c
41696 +++ b/drivers/net/wireless/mediatek/mt76/dma.c
41697 @@ -309,7 +309,7 @@ static int
41698  mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
41699                           struct sk_buff *skb, u32 tx_info)
41701 -       struct mt76_queue_buf buf;
41702 +       struct mt76_queue_buf buf = {};
41703         dma_addr_t addr;
41705         if (q->queued + 1 >= q->ndesc - 1)
41706 diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
41707 index 8bf45497cfca..36a430f09f64 100644
41708 --- a/drivers/net/wireless/mediatek/mt76/mt76.h
41709 +++ b/drivers/net/wireless/mediatek/mt76/mt76.h
41710 @@ -222,6 +222,7 @@ struct mt76_wcid {
41712         u16 idx;
41713         u8 hw_key_idx;
41714 +       u8 hw_key_idx2;
41716         u8 sta:1;
41717         u8 ext_phy:1;
41718 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
41719 index 2eab23898c77..6dbaaf95ee38 100644
41720 --- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
41721 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
41722 @@ -86,6 +86,7 @@ static int mt7615_check_eeprom(struct mt76_dev *dev)
41723         switch (val) {
41724         case 0x7615:
41725         case 0x7622:
41726 +       case 0x7663:
41727                 return 0;
41728         default:
41729                 return -EINVAL;
41730 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
41731 index 59fdd0fc2ad4..8dccb589b756 100644
41732 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
41733 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
41734 @@ -690,7 +690,7 @@ mt7615_txp_skb_unmap_fw(struct mt76_dev *dev, struct mt7615_fw_txp *txp)
41736         int i;
41738 -       for (i = 1; i < txp->nbuf; i++)
41739 +       for (i = 0; i < txp->nbuf; i++)
41740                 dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
41741                                  le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
41743 @@ -966,6 +966,7 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
41744         struct mt7615_dev *dev = phy->dev;
41745         struct mt7615_rate_desc rd;
41746         u32 w5, w27, addr;
41747 +       u16 idx = sta->vif->mt76.omac_idx;
41749         if (!mt76_is_mmio(&dev->mt76)) {
41750                 mt7615_mac_queue_rate_update(phy, sta, probe_rate, rates);
41751 @@ -1017,7 +1018,10 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
41753         mt76_wr(dev, addr + 27 * 4, w27);
41755 -       mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
41756 +       idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
41757 +       addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
41759 +       mt76_set(dev, addr, MT_LPON_TCR_MODE); /* TSF read */
41760         sta->rate_set_tsf = mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0);
41761         sta->rate_set_tsf |= rd.rateset;
41763 @@ -1033,7 +1037,7 @@ EXPORT_SYMBOL_GPL(mt7615_mac_set_rates);
41764  static int
41765  mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
41766                            struct ieee80211_key_conf *key,
41767 -                          enum mt7615_cipher_type cipher,
41768 +                          enum mt7615_cipher_type cipher, u16 cipher_mask,
41769                            enum set_key_cmd cmd)
41771         u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4;
41772 @@ -1050,22 +1054,22 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
41773                         memcpy(data + 16, key->key + 24, 8);
41774                         memcpy(data + 24, key->key + 16, 8);
41775                 } else {
41776 -                       if (cipher != MT_CIPHER_BIP_CMAC_128 && wcid->cipher)
41777 -                               memmove(data + 16, data, 16);
41778 -                       if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher)
41779 +                       if (cipher_mask == BIT(cipher))
41780                                 memcpy(data, key->key, key->keylen);
41781 -                       else if (cipher == MT_CIPHER_BIP_CMAC_128)
41782 +                       else if (cipher != MT_CIPHER_BIP_CMAC_128)
41783 +                               memcpy(data, key->key, 16);
41784 +                       if (cipher == MT_CIPHER_BIP_CMAC_128)
41785                                 memcpy(data + 16, key->key, 16);
41786                 }
41787         } else {
41788 -               if (wcid->cipher & ~BIT(cipher)) {
41789 -                       if (cipher != MT_CIPHER_BIP_CMAC_128)
41790 -                               memmove(data, data + 16, 16);
41791 +               if (cipher == MT_CIPHER_BIP_CMAC_128)
41792                         memset(data + 16, 0, 16);
41793 -               } else {
41794 +               else if (cipher_mask)
41795 +                       memset(data, 0, 16);
41796 +               if (!cipher_mask)
41797                         memset(data, 0, sizeof(data));
41798 -               }
41799         }
41801         mt76_wr_copy(dev, addr, data, sizeof(data));
41803         return 0;
41804 @@ -1073,7 +1077,7 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
41806  static int
41807  mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
41808 -                         enum mt7615_cipher_type cipher,
41809 +                         enum mt7615_cipher_type cipher, u16 cipher_mask,
41810                           int keyidx, enum set_key_cmd cmd)
41812         u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1;
41813 @@ -1083,20 +1087,23 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
41815         w0 = mt76_rr(dev, addr);
41816         w1 = mt76_rr(dev, addr + 4);
41817 -       if (cmd == SET_KEY) {
41818 -               w0 |= MT_WTBL_W0_RX_KEY_VALID |
41819 -                     FIELD_PREP(MT_WTBL_W0_RX_IK_VALID,
41820 -                                cipher == MT_CIPHER_BIP_CMAC_128);
41821 -               if (cipher != MT_CIPHER_BIP_CMAC_128 ||
41822 -                   !wcid->cipher)
41823 -                       w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx);
41824 -       }  else {
41825 -               if (!(wcid->cipher & ~BIT(cipher)))
41826 -                       w0 &= ~(MT_WTBL_W0_RX_KEY_VALID |
41827 -                               MT_WTBL_W0_KEY_IDX);
41828 -               if (cipher == MT_CIPHER_BIP_CMAC_128)
41829 -                       w0 &= ~MT_WTBL_W0_RX_IK_VALID;
41831 +       if (cipher_mask)
41832 +               w0 |= MT_WTBL_W0_RX_KEY_VALID;
41833 +       else
41834 +               w0 &= ~(MT_WTBL_W0_RX_KEY_VALID | MT_WTBL_W0_KEY_IDX);
41835 +       if (cipher_mask & BIT(MT_CIPHER_BIP_CMAC_128))
41836 +               w0 |= MT_WTBL_W0_RX_IK_VALID;
41837 +       else
41838 +               w0 &= ~MT_WTBL_W0_RX_IK_VALID;
41840 +       if (cmd == SET_KEY &&
41841 +           (cipher != MT_CIPHER_BIP_CMAC_128 ||
41842 +            cipher_mask == BIT(cipher))) {
41843 +               w0 &= ~MT_WTBL_W0_KEY_IDX;
41844 +               w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx);
41845         }
41847         mt76_wr(dev, MT_WTBL_RICR0, w0);
41848         mt76_wr(dev, MT_WTBL_RICR1, w1);
41850 @@ -1109,24 +1116,25 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
41852  static void
41853  mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
41854 -                             enum mt7615_cipher_type cipher,
41855 +                             enum mt7615_cipher_type cipher, u16 cipher_mask,
41856                               enum set_key_cmd cmd)
41858         u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx);
41860 -       if (cmd == SET_KEY) {
41861 -               if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher)
41862 -                       mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
41863 -                                FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher));
41864 -       } else {
41865 -               if (cipher != MT_CIPHER_BIP_CMAC_128 &&
41866 -                   wcid->cipher & BIT(MT_CIPHER_BIP_CMAC_128))
41867 -                       mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
41868 -                                FIELD_PREP(MT_WTBL_W2_KEY_TYPE,
41869 -                                           MT_CIPHER_BIP_CMAC_128));
41870 -               else if (!(wcid->cipher & ~BIT(cipher)))
41871 -                       mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE);
41872 +       if (!cipher_mask) {
41873 +               mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE);
41874 +               return;
41875         }
41877 +       if (cmd != SET_KEY)
41878 +               return;
41880 +       if (cipher == MT_CIPHER_BIP_CMAC_128 &&
41881 +           cipher_mask & ~BIT(MT_CIPHER_BIP_CMAC_128))
41882 +               return;
41884 +       mt76_rmw(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE,
41885 +                FIELD_PREP(MT_WTBL_W2_KEY_TYPE, cipher));
41888  int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
41889 @@ -1135,25 +1143,30 @@ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
41890                               enum set_key_cmd cmd)
41892         enum mt7615_cipher_type cipher;
41893 +       u16 cipher_mask = wcid->cipher;
41894         int err;
41896         cipher = mt7615_mac_get_cipher(key->cipher);
41897         if (cipher == MT_CIPHER_NONE)
41898                 return -EOPNOTSUPP;
41900 -       mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cmd);
41901 -       err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cmd);
41902 +       if (cmd == SET_KEY)
41903 +               cipher_mask |= BIT(cipher);
41904 +       else
41905 +               cipher_mask &= ~BIT(cipher);
41907 +       mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cipher_mask, cmd);
41908 +       err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cipher_mask,
41909 +                                        cmd);
41910         if (err < 0)
41911                 return err;
41913 -       err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx, cmd);
41914 +       err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, cipher_mask,
41915 +                                       key->keyidx, cmd);
41916         if (err < 0)
41917                 return err;
41919 -       if (cmd == SET_KEY)
41920 -               wcid->cipher |= BIT(cipher);
41921 -       else
41922 -               wcid->cipher &= ~BIT(cipher);
41923 +       wcid->cipher = cipher_mask;
41925         return 0;
41927 @@ -1821,10 +1834,8 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
41928         int i, aggr;
41929         u32 val, val2;
41931 -       memset(mib, 0, sizeof(*mib));
41933 -       mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
41934 -                                         MT_MIB_SDR3_FCS_ERR_MASK);
41935 +       mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
41936 +                                          MT_MIB_SDR3_FCS_ERR_MASK);
41938         val = mt76_get_field(dev, MT_MIB_SDR14(ext_phy),
41939                              MT_MIB_AMPDU_MPDU_COUNT);
41940 @@ -1837,24 +1848,16 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
41941         aggr = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
41942         for (i = 0; i < 4; i++) {
41943                 val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
41945 -               val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
41946 -               if (val2 > mib->ack_fail_cnt)
41947 -                       mib->ack_fail_cnt = val2;
41949 -               val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
41950 -               if (val2 > mib->ba_miss_cnt)
41951 -                       mib->ba_miss_cnt = val2;
41952 +               mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
41953 +               mib->ack_fail_cnt += FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK,
41954 +                                              val);
41956                 val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
41957 -               val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
41958 -               if (val2 > mib->rts_retries_cnt) {
41959 -                       mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
41960 -                       mib->rts_retries_cnt = val2;
41961 -               }
41962 +               mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
41963 +               mib->rts_retries_cnt += FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK,
41964 +                                                 val);
41966                 val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
41968                 dev->mt76.aggr_stats[aggr++] += val & 0xffff;
41969                 dev->mt76.aggr_stats[aggr++] += val >> 16;
41970         }
41971 @@ -1976,15 +1979,17 @@ void mt7615_dma_reset(struct mt7615_dev *dev)
41972         mt76_clear(dev, MT_WPDMA_GLO_CFG,
41973                    MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
41974                    MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
41976         usleep_range(1000, 2000);
41978 -       mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true);
41979         for (i = 0; i < __MT_TXQ_MAX; i++)
41980                 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
41982 -       mt76_for_each_q_rx(&dev->mt76, i) {
41983 +       for (i = 0; i < __MT_MCUQ_MAX; i++)
41984 +               mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
41986 +       mt76_for_each_q_rx(&dev->mt76, i)
41987                 mt76_queue_rx_reset(dev, i);
41988 -       }
41990         mt76_set(dev, MT_WPDMA_GLO_CFG,
41991                  MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
41992 @@ -2000,8 +2005,12 @@ void mt7615_tx_token_put(struct mt7615_dev *dev)
41993         spin_lock_bh(&dev->token_lock);
41994         idr_for_each_entry(&dev->token, txwi, id) {
41995                 mt7615_txp_skb_unmap(&dev->mt76, txwi);
41996 -               if (txwi->skb)
41997 -                       dev_kfree_skb_any(txwi->skb);
41998 +               if (txwi->skb) {
41999 +                       struct ieee80211_hw *hw;
42001 +                       hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb);
42002 +                       ieee80211_free_txskb(hw, txwi->skb);
42003 +               }
42004                 mt76_put_txwi(&dev->mt76, txwi);
42005         }
42006         spin_unlock_bh(&dev->token_lock);
42007 @@ -2304,8 +2313,10 @@ void mt7615_coredump_work(struct work_struct *work)
42008                         break;
42010                 skb_pull(skb, sizeof(struct mt7615_mcu_rxd));
42011 -               if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ)
42012 -                       break;
42013 +               if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
42014 +                       dev_kfree_skb(skb);
42015 +                       continue;
42016 +               }
42018                 memcpy(data, skb->data, skb->len);
42019                 data += skb->len;
42020 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
42021 index 25faf486d279..d334491667a4 100644
42022 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
42023 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
42024 @@ -217,8 +217,6 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
42025         ret = mt7615_mcu_add_dev_info(phy, vif, true);
42026         if (ret)
42027                 goto out;
42029 -       mt7615_mac_set_beacon_filter(phy, vif, true);
42030  out:
42031         mt7615_mutex_release(dev);
42033 @@ -244,7 +242,6 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
42035         mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid);
42037 -       mt7615_mac_set_beacon_filter(phy, vif, false);
42038         mt7615_mcu_add_dev_info(phy, vif, false);
42040         rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
42041 @@ -337,7 +334,8 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
42042         struct mt7615_sta *msta = sta ? (struct mt7615_sta *)sta->drv_priv :
42043                                   &mvif->sta;
42044         struct mt76_wcid *wcid = &msta->wcid;
42045 -       int idx = key->keyidx, err;
42046 +       int idx = key->keyidx, err = 0;
42047 +       u8 *wcid_keyidx = &wcid->hw_key_idx;
42049         /* The hardware does not support per-STA RX GTK, fallback
42050          * to software mode for these.
42051 @@ -352,6 +350,7 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
42052         /* fall back to sw encryption for unsupported ciphers */
42053         switch (key->cipher) {
42054         case WLAN_CIPHER_SUITE_AES_CMAC:
42055 +               wcid_keyidx = &wcid->hw_key_idx2;
42056                 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
42057                 break;
42058         case WLAN_CIPHER_SUITE_TKIP:
42059 @@ -369,12 +368,13 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
42061         mt7615_mutex_acquire(dev);
42063 -       if (cmd == SET_KEY) {
42064 -               key->hw_key_idx = wcid->idx;
42065 -               wcid->hw_key_idx = idx;
42066 -       } else if (idx == wcid->hw_key_idx) {
42067 -               wcid->hw_key_idx = -1;
42068 -       }
42069 +       if (cmd == SET_KEY)
42070 +               *wcid_keyidx = idx;
42071 +       else if (idx == *wcid_keyidx)
42072 +               *wcid_keyidx = -1;
42073 +       else
42074 +               goto out;
42076         mt76_wcid_key_setup(&dev->mt76, wcid,
42077                             cmd == SET_KEY ? key : NULL);
42079 @@ -383,6 +383,7 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
42080         else
42081                 err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
42083 +out:
42084         mt7615_mutex_release(dev);
42086         return err;
42087 @@ -544,6 +545,9 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
42088         if (changed & BSS_CHANGED_ARP_FILTER)
42089                 mt7615_mcu_update_arp_filter(hw, vif, info);
42091 +       if (changed & BSS_CHANGED_ASSOC)
42092 +               mt7615_mac_set_beacon_filter(phy, vif, info->assoc);
42094         mt7615_mutex_release(dev);
42097 @@ -803,26 +807,38 @@ mt7615_get_stats(struct ieee80211_hw *hw,
42098         struct mt7615_phy *phy = mt7615_hw_phy(hw);
42099         struct mib_stats *mib = &phy->mib;
42101 +       mt7615_mutex_acquire(phy->dev);
42103         stats->dot11RTSSuccessCount = mib->rts_cnt;
42104         stats->dot11RTSFailureCount = mib->rts_retries_cnt;
42105         stats->dot11FCSErrorCount = mib->fcs_err_cnt;
42106         stats->dot11ACKFailureCount = mib->ack_fail_cnt;
42108 +       memset(mib, 0, sizeof(*mib));
42110 +       mt7615_mutex_release(phy->dev);
42112         return 0;
42115  static u64
42116  mt7615_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
42118 +       struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
42119         struct mt7615_dev *dev = mt7615_hw_dev(hw);
42120         union {
42121                 u64 t64;
42122                 u32 t32[2];
42123         } tsf;
42124 +       u16 idx = mvif->mt76.omac_idx;
42125 +       u32 reg;
42127 +       idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
42128 +       reg = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
42130         mt7615_mutex_acquire(dev);
42132 -       mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
42133 +       mt76_set(dev, reg, MT_LPON_TCR_MODE); /* TSF read */
42134         tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0);
42135         tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1);
42137 @@ -835,18 +851,24 @@ static void
42138  mt7615_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
42139                u64 timestamp)
42141 +       struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
42142         struct mt7615_dev *dev = mt7615_hw_dev(hw);
42143         union {
42144                 u64 t64;
42145                 u32 t32[2];
42146         } tsf = { .t64 = timestamp, };
42147 +       u16 idx = mvif->mt76.omac_idx;
42148 +       u32 reg;
42150 +       idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
42151 +       reg = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
42153         mt7615_mutex_acquire(dev);
42155         mt76_wr(dev, MT_LPON_UTTR0, tsf.t32[0]);
42156         mt76_wr(dev, MT_LPON_UTTR1, tsf.t32[1]);
42157         /* TSF software overwrite */
42158 -       mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_WRITE);
42159 +       mt76_set(dev, reg, MT_LPON_TCR_WRITE);
42161         mt7615_mutex_release(dev);
42163 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
42164 index 631596fc2f36..198e9025b681 100644
42165 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
42166 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
42167 @@ -291,12 +291,20 @@ static int mt7615_mcu_drv_pmctrl(struct mt7615_dev *dev)
42168         u32 addr;
42169         int err;
42171 -       addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST;
42172 +       if (is_mt7663(mdev)) {
42173 +               /* Clear firmware own via N9 eint */
42174 +               mt76_wr(dev, MT_PCIE_DOORBELL_PUSH, MT_CFG_LPCR_HOST_DRV_OWN);
42175 +               mt76_poll(dev, MT_CONN_ON_MISC, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000);
42177 +               addr = MT_CONN_HIF_ON_LPCTL;
42178 +       } else {
42179 +               addr = MT_CFG_LPCR_HOST;
42180 +       }
42182         mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN);
42184         mt7622_trigger_hif_int(dev, true);
42186 -       addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
42187         err = !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000);
42189         mt7622_trigger_hif_int(dev, false);
42190 @@ -1040,6 +1048,9 @@ mt7615_mcu_sta_ba(struct mt7615_dev *dev,
42192         wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
42193                                                   WTBL_SET, sta_wtbl, &skb);
42194 +       if (IS_ERR(wtbl_hdr))
42195 +               return PTR_ERR(wtbl_hdr);
42197         mt76_connac_mcu_wtbl_ba_tlv(&dev->mt76, skb, params, enable, tx,
42198                                     sta_wtbl, wtbl_hdr);
42200 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
42201 index 491841bc6291..4bc0c379c579 100644
42202 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
42203 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
42204 @@ -133,11 +133,11 @@ struct mt7615_vif {
42205  };
42207  struct mib_stats {
42208 -       u16 ack_fail_cnt;
42209 -       u16 fcs_err_cnt;
42210 -       u16 rts_cnt;
42211 -       u16 rts_retries_cnt;
42212 -       u16 ba_miss_cnt;
42213 +       u32 ack_fail_cnt;
42214 +       u32 fcs_err_cnt;
42215 +       u32 rts_cnt;
42216 +       u32 rts_retries_cnt;
42217 +       u32 ba_miss_cnt;
42218         unsigned long aggr_per;
42219  };
42221 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
42222 index 72395925ddee..15b417d6d889 100644
42223 --- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
42224 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
42225 @@ -163,10 +163,9 @@ void mt7615_unregister_device(struct mt7615_dev *dev)
42226         mt76_unregister_device(&dev->mt76);
42227         if (mcu_running)
42228                 mt7615_mcu_exit(dev);
42229 -       mt7615_dma_cleanup(dev);
42231         mt7615_tx_token_put(dev);
42233 +       mt7615_dma_cleanup(dev);
42234         tasklet_disable(&dev->irq_tasklet);
42236         mt76_free_device(&dev->mt76);
42237 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
42238 index 6e5db015b32c..6e4710d3ddd3 100644
42239 --- a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
42240 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
42241 @@ -447,9 +447,10 @@ enum mt7615_reg_base {
42243  #define MT_LPON(_n)                    ((dev)->reg_map[MT_LPON_BASE] + (_n))
42245 -#define MT_LPON_T0CR                   MT_LPON(0x010)
42246 -#define MT_LPON_T0CR_MODE              GENMASK(1, 0)
42247 -#define MT_LPON_T0CR_WRITE             BIT(0)
42248 +#define MT_LPON_TCR0(_n)               MT_LPON(0x010 + ((_n) * 4))
42249 +#define MT_LPON_TCR2(_n)               MT_LPON(0x0f8 + ((_n) - 2) * 4)
42250 +#define MT_LPON_TCR_MODE               GENMASK(1, 0)
42251 +#define MT_LPON_TCR_WRITE              BIT(0)
42253  #define MT_LPON_UTTR0                  MT_LPON(0x018)
42254  #define MT_LPON_UTTR1                  MT_LPON(0x01c)
42255 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
42256 index 9fb506f2ace6..4393dd21ebbb 100644
42257 --- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
42258 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
42259 @@ -218,12 +218,15 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
42260         int qid, err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0;
42261         bool mcu = q == dev->q_mcu[MT_MCUQ_WM];
42262         struct mt76_sdio *sdio = &dev->sdio;
42263 +       u8 pad;
42265         qid = mcu ? ARRAY_SIZE(sdio->xmit_buf) - 1 : q->qid;
42266         while (q->first != q->head) {
42267                 struct mt76_queue_entry *e = &q->entry[q->first];
42268                 struct sk_buff *iter;
42270 +               smp_rmb();
42272                 if (!test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) {
42273                         __skb_put_zero(e->skb, 4);
42274                         err = __mt7663s_xmit_queue(dev, e->skb->data,
42275 @@ -234,7 +237,8 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
42276                         goto next;
42277                 }
42279 -               if (len + e->skb->len + 4 > MT76S_XMIT_BUF_SZ)
42280 +               pad = roundup(e->skb->len, 4) - e->skb->len;
42281 +               if (len + e->skb->len + pad + 4 > MT76S_XMIT_BUF_SZ)
42282                         break;
42284                 if (mt7663s_tx_pick_quota(sdio, mcu, e->buf_sz, &pse_sz,
42285 @@ -252,6 +256,11 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
42286                         len += iter->len;
42287                         nframes++;
42288                 }
42290 +               if (unlikely(pad)) {
42291 +                       memset(sdio->xmit_buf[qid] + len, 0, pad);
42292 +                       len += pad;
42293 +               }
42294  next:
42295                 q->first = (q->first + 1) % q->ndesc;
42296                 e->done = true;
42297 diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
42298 index 203256862dfd..f8d3673c2cae 100644
42299 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
42300 +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
42301 @@ -67,6 +67,7 @@ static int mt7663_usb_sdio_set_rates(struct mt7615_dev *dev,
42302         struct mt7615_rate_desc *rate = &wrd->rate;
42303         struct mt7615_sta *sta = wrd->sta;
42304         u32 w5, w27, addr, val;
42305 +       u16 idx;
42307         lockdep_assert_held(&dev->mt76.mutex);
42309 @@ -118,7 +119,11 @@ static int mt7663_usb_sdio_set_rates(struct mt7615_dev *dev,
42311         sta->rate_probe = sta->rateset[rate->rateset].probe_rate.idx != -1;
42313 -       mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
42314 +       idx = sta->vif->mt76.omac_idx;
42315 +       idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
42316 +       addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);
42318 +       mt76_set(dev, addr, MT_LPON_TCR_MODE); /* TSF read */
42319         val = mt76_rr(dev, MT_LPON_UTTR0);
42320         sta->rate_set_tsf = (val & ~BIT(0)) | rate->rateset;
42322 diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
42323 index 6cbccfb05f8b..cefd33b74a87 100644
42324 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
42325 +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
42326 @@ -833,6 +833,9 @@ int mt76_connac_mcu_add_sta_cmd(struct mt76_phy *phy,
42327         wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, wcid,
42328                                                   WTBL_RESET_AND_SET,
42329                                                   sta_wtbl, &skb);
42330 +       if (IS_ERR(wtbl_hdr))
42331 +               return PTR_ERR(wtbl_hdr);
42333         if (enable) {
42334                 mt76_connac_mcu_wtbl_generic_tlv(dev, skb, vif, sta, sta_wtbl,
42335                                                  wtbl_hdr);
42336 @@ -946,6 +949,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
42338         switch (vif->type) {
42339         case NL80211_IFTYPE_MESH_POINT:
42340 +       case NL80211_IFTYPE_MONITOR:
42341         case NL80211_IFTYPE_AP:
42342                 basic_req.basic.conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
42343                 break;
42344 @@ -1195,6 +1199,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
42345                         .center_chan = ieee80211_frequency_to_channel(freq1),
42346                         .center_chan2 = ieee80211_frequency_to_channel(freq2),
42347                         .tx_streams = hweight8(phy->antenna_mask),
42348 +                       .ht_op_info = 4, /* set HT 40M allowed */
42349                         .rx_streams = phy->chainmask,
42350                         .short_st = true,
42351                 },
42352 @@ -1287,6 +1292,7 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
42353         case NL80211_CHAN_WIDTH_20:
42354         default:
42355                 rlm_req.rlm.bw = CMD_CBW_20MHZ;
42356 +               rlm_req.rlm.ht_op_info = 0;
42357                 break;
42358         }
42360 @@ -1306,7 +1312,7 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
42362         struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
42363         struct cfg80211_scan_request *sreq = &scan_req->req;
42364 -       int n_ssids = 0, err, i, duration = MT76_CONNAC_SCAN_CHANNEL_TIME;
42365 +       int n_ssids = 0, err, i, duration;
42366         int ext_channels_num = max_t(int, sreq->n_channels - 32, 0);
42367         struct ieee80211_channel **scan_list = sreq->channels;
42368         struct mt76_dev *mdev = phy->dev;
42369 @@ -1343,6 +1349,7 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
42370         req->ssid_type_ext = n_ssids ? BIT(0) : 0;
42371         req->ssids_num = n_ssids;
42373 +       duration = is_mt7921(phy->dev) ? 0 : MT76_CONNAC_SCAN_CHANNEL_TIME;
42374         /* increase channel time for passive scan */
42375         if (!sreq->n_ssids)
42376                 duration *= 2;
42377 diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
42378 index ab671e21f882..02db5d66735d 100644
42379 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
42380 +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
42381 @@ -447,6 +447,10 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
42382             !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
42383                 return -EOPNOTSUPP;
42385 +       /* MT76x0 GTK offloading does not work with more than one VIF */
42386 +       if (is_mt76x0(dev) && !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
42387 +               return -EOPNOTSUPP;
42389         msta = sta ? (struct mt76x02_sta *)sta->drv_priv : NULL;
42390         wcid = msta ? &msta->wcid : &mvif->group_wcid;
42392 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
42393 index 77dcd71e49a5..2f706620686e 100644
42394 --- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
42395 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
42396 @@ -124,7 +124,7 @@ mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy,
42397                 range[i] = mt76_rr(dev, MT_MIB_ARNG(ext_phy, i));
42399         for (i = 0; i < ARRAY_SIZE(bound); i++)
42400 -               bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i) + 1;
42401 +               bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i % 4) + 1;
42403         seq_printf(file, "\nPhy %d\n", ext_phy);
42405 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
42406 index 660398ac53c2..738ecf8f4fa2 100644
42407 --- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
42408 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
42409 @@ -124,7 +124,7 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
42410                                    struct ieee80211_channel *chan,
42411                                    u8 chain_idx)
42413 -       int index;
42414 +       int index, target_power;
42415         bool tssi_on;
42417         if (chain_idx > 3)
42418 @@ -133,15 +133,22 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
42419         tssi_on = mt7915_tssi_enabled(dev, chan->band);
42421         if (chan->band == NL80211_BAND_2GHZ) {
42422 -               index = MT_EE_TX0_POWER_2G + chain_idx * 3 + !tssi_on;
42423 +               index = MT_EE_TX0_POWER_2G + chain_idx * 3;
42424 +               target_power = mt7915_eeprom_read(dev, index);
42426 +               if (!tssi_on)
42427 +                       target_power += mt7915_eeprom_read(dev, index + 1);
42428         } else {
42429 -               int group = tssi_on ?
42430 -                           mt7915_get_channel_group(chan->hw_value) : 8;
42431 +               int group = mt7915_get_channel_group(chan->hw_value);
42433 +               index = MT_EE_TX0_POWER_5G + chain_idx * 12;
42434 +               target_power = mt7915_eeprom_read(dev, index + group);
42436 -               index = MT_EE_TX0_POWER_5G + chain_idx * 12 + group;
42437 +               if (!tssi_on)
42438 +                       target_power += mt7915_eeprom_read(dev, index + 8);
42439         }
42441 -       return mt7915_eeprom_read(dev, index);
42442 +       return target_power;
42445  static const u8 sku_cck_delta_map[] = {
42446 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
42447 index ad4e5b95158b..c7d4268d860a 100644
42448 --- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
42449 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
42450 @@ -4,6 +4,7 @@
42451  #include <linux/etherdevice.h>
42452  #include "mt7915.h"
42453  #include "mac.h"
42454 +#include "mcu.h"
42455  #include "eeprom.h"
42457  #define CCK_RATE(_idx, _rate) {                                                \
42458 @@ -283,9 +284,50 @@ static void mt7915_init_work(struct work_struct *work)
42459         mt7915_register_ext_phy(dev);
42462 +static void mt7915_wfsys_reset(struct mt7915_dev *dev)
42464 +       u32 val = MT_TOP_PWR_KEY | MT_TOP_PWR_SW_PWR_ON | MT_TOP_PWR_PWR_ON;
42465 +       u32 reg = mt7915_reg_map_l1(dev, MT_TOP_MISC);
42467 +#define MT_MCU_DUMMY_RANDOM    GENMASK(15, 0)
42468 +#define MT_MCU_DUMMY_DEFAULT   GENMASK(31, 16)
42470 +       mt76_wr(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_RANDOM);
42472 +       /* change to software control */
42473 +       val |= MT_TOP_PWR_SW_RST;
42474 +       mt76_wr(dev, MT_TOP_PWR_CTRL, val);
42476 +       /* reset wfsys */
42477 +       val &= ~MT_TOP_PWR_SW_RST;
42478 +       mt76_wr(dev, MT_TOP_PWR_CTRL, val);
42480 +       /* release wfsys then mcu re-excutes romcode */
42481 +       val |= MT_TOP_PWR_SW_RST;
42482 +       mt76_wr(dev, MT_TOP_PWR_CTRL, val);
42484 +       /* switch to hw control */
42485 +       val &= ~MT_TOP_PWR_SW_RST;
42486 +       val |= MT_TOP_PWR_HW_CTRL;
42487 +       mt76_wr(dev, MT_TOP_PWR_CTRL, val);
42489 +       /* check whether mcu resets to default */
42490 +       if (!mt76_poll_msec(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_DEFAULT,
42491 +                           MT_MCU_DUMMY_DEFAULT, 1000)) {
42492 +               dev_err(dev->mt76.dev, "wifi subsystem reset failure\n");
42493 +               return;
42494 +       }
42496 +       /* wfsys reset won't clear host registers */
42497 +       mt76_clear(dev, reg, MT_TOP_MISC_FW_STATE);
42499 +       msleep(100);
42502  static int mt7915_init_hardware(struct mt7915_dev *dev)
42504         int ret, idx;
42505 +       u32 val;
42507         mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
42509 @@ -295,6 +337,12 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
42511         dev->dbdc_support = !!(mt7915_l1_rr(dev, MT_HW_BOUND) & BIT(5));
42513 +       val = mt76_rr(dev, mt7915_reg_map_l1(dev, MT_TOP_MISC));
42515 +       /* If MCU was already running, it is likely in a bad state */
42516 +       if (FIELD_GET(MT_TOP_MISC_FW_STATE, val) > FW_STATE_FW_DOWNLOAD)
42517 +               mt7915_wfsys_reset(dev);
42519         ret = mt7915_dma_init(dev);
42520         if (ret)
42521                 return ret;
42522 @@ -308,8 +356,14 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
42523         mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
42525         ret = mt7915_mcu_init(dev);
42526 -       if (ret)
42527 -               return ret;
42528 +       if (ret) {
42529 +               /* Reset and try again */
42530 +               mt7915_wfsys_reset(dev);
42532 +               ret = mt7915_mcu_init(dev);
42533 +               if (ret)
42534 +                       return ret;
42535 +       }
42537         ret = mt7915_eeprom_init(dev);
42538         if (ret < 0)
42539 @@ -675,9 +729,8 @@ void mt7915_unregister_device(struct mt7915_dev *dev)
42540         mt7915_unregister_ext_phy(dev);
42541         mt76_unregister_device(&dev->mt76);
42542         mt7915_mcu_exit(dev);
42543 -       mt7915_dma_cleanup(dev);
42545         mt7915_tx_token_put(dev);
42546 +       mt7915_dma_cleanup(dev);
42548         mt76_free_device(&dev->mt76);
42550 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
42551 index e5a258958ac9..819670767521 100644
42552 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
42553 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
42554 @@ -1091,7 +1091,7 @@ void mt7915_txp_skb_unmap(struct mt76_dev *dev,
42555         int i;
42557         txp = mt7915_txwi_to_txp(dev, t);
42558 -       for (i = 1; i < txp->nbuf; i++)
42559 +       for (i = 0; i < txp->nbuf; i++)
42560                 dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
42561                                  le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
42563 @@ -1470,9 +1470,8 @@ mt7915_update_beacons(struct mt7915_dev *dev)
42566  static void
42567 -mt7915_dma_reset(struct mt7915_phy *phy)
42568 +mt7915_dma_reset(struct mt7915_dev *dev)
42570 -       struct mt7915_dev *dev = phy->dev;
42571         struct mt76_phy *mphy_ext = dev->mt76.phy2;
42572         u32 hif1_ofs = MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE;
42573         int i;
42574 @@ -1489,18 +1488,20 @@ mt7915_dma_reset(struct mt7915_phy *phy)
42575                            (MT_WFDMA1_GLO_CFG_TX_DMA_EN |
42576                             MT_WFDMA1_GLO_CFG_RX_DMA_EN));
42577         }
42579         usleep_range(1000, 2000);
42581 -       mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], true);
42582         for (i = 0; i < __MT_TXQ_MAX; i++) {
42583 -               mt76_queue_tx_cleanup(dev, phy->mt76->q_tx[i], true);
42584 +               mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
42585                 if (mphy_ext)
42586                         mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[i], true);
42587         }
42589 -       mt76_for_each_q_rx(&dev->mt76, i) {
42590 +       for (i = 0; i < __MT_MCUQ_MAX; i++)
42591 +               mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
42593 +       mt76_for_each_q_rx(&dev->mt76, i)
42594                 mt76_queue_rx_reset(dev, i);
42595 -       }
42597         /* re-init prefetch settings after reset */
42598         mt7915_dma_prefetch(dev);
42599 @@ -1584,7 +1585,7 @@ void mt7915_mac_reset_work(struct work_struct *work)
42600         idr_init(&dev->token);
42602         if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
42603 -               mt7915_dma_reset(&dev->phy);
42604 +               mt7915_dma_reset(dev);
42606                 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
42607                 mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
42608 @@ -1633,39 +1634,30 @@ mt7915_mac_update_mib_stats(struct mt7915_phy *phy)
42609         bool ext_phy = phy != &dev->phy;
42610         int i, aggr0, aggr1;
42612 -       memset(mib, 0, sizeof(*mib));
42614 -       mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
42615 -                                         MT_MIB_SDR3_FCS_ERR_MASK);
42616 +       mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
42617 +                                          MT_MIB_SDR3_FCS_ERR_MASK);
42619         aggr0 = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
42620         for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
42621 -               u32 val, val2;
42622 +               u32 val;
42624                 val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
42626 -               val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
42627 -               if (val2 > mib->ack_fail_cnt)
42628 -                       mib->ack_fail_cnt = val2;
42630 -               val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
42631 -               if (val2 > mib->ba_miss_cnt)
42632 -                       mib->ba_miss_cnt = val2;
42633 +               mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
42634 +               mib->ack_fail_cnt +=
42635 +                       FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
42637                 val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
42638 -               val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
42639 -               if (val2 > mib->rts_retries_cnt) {
42640 -                       mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
42641 -                       mib->rts_retries_cnt = val2;
42642 -               }
42643 +               mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
42644 +               mib->rts_retries_cnt +=
42645 +                       FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
42647                 val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
42648 -               val2 = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
42650                 dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
42651                 dev->mt76.aggr_stats[aggr0++] += val >> 16;
42652 -               dev->mt76.aggr_stats[aggr1++] += val2 & 0xffff;
42653 -               dev->mt76.aggr_stats[aggr1++] += val2 >> 16;
42655 +               val = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
42656 +               dev->mt76.aggr_stats[aggr1++] += val & 0xffff;
42657 +               dev->mt76.aggr_stats[aggr1++] += val >> 16;
42658         }
42661 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
42662 index d4969b2e1ffb..bf032d943f74 100644
42663 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
42664 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
42665 @@ -317,7 +317,9 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
42666         struct mt7915_sta *msta = sta ? (struct mt7915_sta *)sta->drv_priv :
42667                                   &mvif->sta;
42668         struct mt76_wcid *wcid = &msta->wcid;
42669 +       u8 *wcid_keyidx = &wcid->hw_key_idx;
42670         int idx = key->keyidx;
42671 +       int err = 0;
42673         /* The hardware does not support per-STA RX GTK, fallback
42674          * to software mode for these.
42675 @@ -332,6 +334,7 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
42676         /* fall back to sw encryption for unsupported ciphers */
42677         switch (key->cipher) {
42678         case WLAN_CIPHER_SUITE_AES_CMAC:
42679 +               wcid_keyidx = &wcid->hw_key_idx2;
42680                 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
42681                 break;
42682         case WLAN_CIPHER_SUITE_TKIP:
42683 @@ -347,16 +350,24 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
42684                 return -EOPNOTSUPP;
42685         }
42687 -       if (cmd == SET_KEY) {
42688 -               key->hw_key_idx = wcid->idx;
42689 -               wcid->hw_key_idx = idx;
42690 -       } else if (idx == wcid->hw_key_idx) {
42691 -               wcid->hw_key_idx = -1;
42692 -       }
42693 +       mutex_lock(&dev->mt76.mutex);
42695 +       if (cmd == SET_KEY)
42696 +               *wcid_keyidx = idx;
42697 +       else if (idx == *wcid_keyidx)
42698 +               *wcid_keyidx = -1;
42699 +       else
42700 +               goto out;
42702         mt76_wcid_key_setup(&dev->mt76, wcid,
42703                             cmd == SET_KEY ? key : NULL);
42705 -       return mt7915_mcu_add_key(dev, vif, msta, key, cmd);
42706 +       err = mt7915_mcu_add_key(dev, vif, msta, key, cmd);
42708 +out:
42709 +       mutex_unlock(&dev->mt76.mutex);
42711 +       return err;
42714  static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
42715 @@ -717,13 +728,19 @@ mt7915_get_stats(struct ieee80211_hw *hw,
42716                  struct ieee80211_low_level_stats *stats)
42718         struct mt7915_phy *phy = mt7915_hw_phy(hw);
42719 +       struct mt7915_dev *dev = mt7915_hw_dev(hw);
42720         struct mib_stats *mib = &phy->mib;
42722 +       mutex_lock(&dev->mt76.mutex);
42723         stats->dot11RTSSuccessCount = mib->rts_cnt;
42724         stats->dot11RTSFailureCount = mib->rts_retries_cnt;
42725         stats->dot11FCSErrorCount = mib->fcs_err_cnt;
42726         stats->dot11ACKFailureCount = mib->ack_fail_cnt;
42728 +       memset(mib, 0, sizeof(*mib));
42730 +       mutex_unlock(&dev->mt76.mutex);
42732         return 0;
42735 @@ -833,9 +850,12 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw,
42736         struct mt7915_phy *phy = mt7915_hw_phy(hw);
42737         struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
42738         struct mt7915_sta_stats *stats = &msta->stats;
42739 +       struct rate_info rxrate = {};
42741 -       if (mt7915_mcu_get_rx_rate(phy, vif, sta, &sinfo->rxrate) == 0)
42742 +       if (!mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) {
42743 +               sinfo->rxrate = rxrate;
42744                 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
42745 +       }
42747         if (!stats->tx_rate.legacy && !stats->tx_rate.flags)
42748                 return;
42749 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
42750 index 195929242b72..f069a5a03e14 100644
42751 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
42752 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
42753 @@ -351,54 +351,62 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
42754         dev->hw_pattern++;
42757 -static void
42758 +static int
42759  mt7915_mcu_tx_rate_parse(struct mt76_phy *mphy, struct mt7915_mcu_ra_info *ra,
42760                          struct rate_info *rate, u16 r)
42762         struct ieee80211_supported_band *sband;
42763         u16 ru_idx = le16_to_cpu(ra->ru_idx);
42764 -       u16 flags = 0;
42765 +       bool cck = false;
42767         rate->mcs = FIELD_GET(MT_RA_RATE_MCS, r);
42768         rate->nss = FIELD_GET(MT_RA_RATE_NSS, r) + 1;
42770         switch (FIELD_GET(MT_RA_RATE_TX_MODE, r)) {
42771         case MT_PHY_TYPE_CCK:
42772 +               cck = true;
42773 +               fallthrough;
42774         case MT_PHY_TYPE_OFDM:
42775                 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
42776                         sband = &mphy->sband_5g.sband;
42777                 else
42778                         sband = &mphy->sband_2g.sband;
42780 +               rate->mcs = mt76_get_rate(mphy->dev, sband, rate->mcs, cck);
42781                 rate->legacy = sband->bitrates[rate->mcs].bitrate;
42782                 break;
42783         case MT_PHY_TYPE_HT:
42784         case MT_PHY_TYPE_HT_GF:
42785                 rate->mcs += (rate->nss - 1) * 8;
42786 -               flags |= RATE_INFO_FLAGS_MCS;
42787 +               if (rate->mcs > 31)
42788 +                       return -EINVAL;
42790 +               rate->flags = RATE_INFO_FLAGS_MCS;
42791                 if (ra->gi)
42792 -                       flags |= RATE_INFO_FLAGS_SHORT_GI;
42793 +                       rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
42794                 break;
42795         case MT_PHY_TYPE_VHT:
42796 -               flags |= RATE_INFO_FLAGS_VHT_MCS;
42797 +               if (rate->mcs > 9)
42798 +                       return -EINVAL;
42800 +               rate->flags = RATE_INFO_FLAGS_VHT_MCS;
42801                 if (ra->gi)
42802 -                       flags |= RATE_INFO_FLAGS_SHORT_GI;
42803 +                       rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
42804                 break;
42805         case MT_PHY_TYPE_HE_SU:
42806         case MT_PHY_TYPE_HE_EXT_SU:
42807         case MT_PHY_TYPE_HE_TB:
42808         case MT_PHY_TYPE_HE_MU:
42809 +               if (ra->gi > NL80211_RATE_INFO_HE_GI_3_2 || rate->mcs > 11)
42810 +                       return -EINVAL;
42812                 rate->he_gi = ra->gi;
42813                 rate->he_dcm = FIELD_GET(MT_RA_RATE_DCM_EN, r);
42815 -               flags |= RATE_INFO_FLAGS_HE_MCS;
42816 +               rate->flags = RATE_INFO_FLAGS_HE_MCS;
42817                 break;
42818         default:
42819 -               break;
42820 +               return -EINVAL;
42821         }
42822 -       rate->flags = flags;
42824         if (ru_idx) {
42825                 switch (ru_idx) {
42826 @@ -435,6 +443,8 @@ mt7915_mcu_tx_rate_parse(struct mt76_phy *mphy, struct mt7915_mcu_ra_info *ra,
42827                         break;
42828                 }
42829         }
42831 +       return 0;
42834  static void
42835 @@ -465,12 +475,12 @@ mt7915_mcu_tx_rate_report(struct mt7915_dev *dev, struct sk_buff *skb)
42836                 mphy = dev->mt76.phy2;
42838         /* current rate */
42839 -       mt7915_mcu_tx_rate_parse(mphy, ra, &rate, curr);
42840 -       stats->tx_rate = rate;
42841 +       if (!mt7915_mcu_tx_rate_parse(mphy, ra, &rate, curr))
42842 +               stats->tx_rate = rate;
42844         /* probing rate */
42845 -       mt7915_mcu_tx_rate_parse(mphy, ra, &prob_rate, probe);
42846 -       stats->prob_rate = prob_rate;
42847 +       if (!mt7915_mcu_tx_rate_parse(mphy, ra, &prob_rate, probe))
42848 +               stats->prob_rate = prob_rate;
42850         if (attempts) {
42851                 u16 success = le16_to_cpu(ra->success);
42852 @@ -1188,6 +1198,9 @@ mt7915_mcu_sta_ba(struct mt7915_dev *dev,
42854         wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
42855                                              &skb);
42856 +       if (IS_ERR(wtbl_hdr))
42857 +               return PTR_ERR(wtbl_hdr);
42859         mt7915_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr);
42861         ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
42862 @@ -1704,6 +1717,9 @@ int mt7915_mcu_sta_update_hdr_trans(struct mt7915_dev *dev,
42863                 return -ENOMEM;
42865         wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, NULL, &skb);
42866 +       if (IS_ERR(wtbl_hdr))
42867 +               return PTR_ERR(wtbl_hdr);
42869         mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, NULL, wtbl_hdr);
42871         return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD(WTBL_UPDATE),
42872 @@ -1728,6 +1744,9 @@ int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
42874         wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
42875                                              &skb);
42876 +       if (IS_ERR(wtbl_hdr))
42877 +               return PTR_ERR(wtbl_hdr);
42879         mt7915_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_hdr);
42881         return mt76_mcu_skb_send_msg(&dev->mt76, skb,
42882 @@ -2253,6 +2272,9 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
42884         wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET,
42885                                              sta_wtbl, &skb);
42886 +       if (IS_ERR(wtbl_hdr))
42887 +               return PTR_ERR(wtbl_hdr);
42889         if (enable) {
42890                 mt7915_mcu_wtbl_generic_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr);
42891                 mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr);
42892 @@ -2742,21 +2764,8 @@ static int mt7915_load_ram(struct mt7915_dev *dev)
42894  static int mt7915_load_firmware(struct mt7915_dev *dev)
42896 +       u32 reg = mt7915_reg_map_l1(dev, MT_TOP_MISC);
42897         int ret;
42898 -       u32 val, reg = mt7915_reg_map_l1(dev, MT_TOP_MISC);
42900 -       val = FIELD_PREP(MT_TOP_MISC_FW_STATE, FW_STATE_FW_DOWNLOAD);
42902 -       if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE, val, 1000)) {
42903 -               /* restart firmware once */
42904 -               __mt76_mcu_restart(&dev->mt76);
42905 -               if (!mt76_poll_msec(dev, reg, MT_TOP_MISC_FW_STATE,
42906 -                                   val, 1000)) {
42907 -                       dev_err(dev->mt76.dev,
42908 -                               "Firmware is not ready for download\n");
42909 -                       return -EIO;
42910 -               }
42911 -       }
42913         ret = mt7915_load_patch(dev);
42914         if (ret)
42915 @@ -3501,9 +3510,8 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
42916         struct ieee80211_supported_band *sband;
42917         struct mt7915_mcu_phy_rx_info *res;
42918         struct sk_buff *skb;
42919 -       u16 flags = 0;
42920         int ret;
42921 -       int i;
42922 +       bool cck = false;
42924         ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(PHY_STAT_INFO),
42925                                         &req, sizeof(req), true, &skb);
42926 @@ -3517,48 +3525,53 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
42928         switch (res->mode) {
42929         case MT_PHY_TYPE_CCK:
42930 +               cck = true;
42931 +               fallthrough;
42932         case MT_PHY_TYPE_OFDM:
42933                 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
42934                         sband = &mphy->sband_5g.sband;
42935                 else
42936                         sband = &mphy->sband_2g.sband;
42938 -               for (i = 0; i < sband->n_bitrates; i++) {
42939 -                       if (rate->mcs != (sband->bitrates[i].hw_value & 0xf))
42940 -                               continue;
42942 -                       rate->legacy = sband->bitrates[i].bitrate;
42943 -                       break;
42944 -               }
42945 +               rate->mcs = mt76_get_rate(&dev->mt76, sband, rate->mcs, cck);
42946 +               rate->legacy = sband->bitrates[rate->mcs].bitrate;
42947                 break;
42948         case MT_PHY_TYPE_HT:
42949         case MT_PHY_TYPE_HT_GF:
42950 -               if (rate->mcs > 31)
42951 -                       return -EINVAL;
42953 -               flags |= RATE_INFO_FLAGS_MCS;
42954 +               if (rate->mcs > 31) {
42955 +                       ret = -EINVAL;
42956 +                       goto out;
42957 +               }
42959 +               rate->flags = RATE_INFO_FLAGS_MCS;
42960                 if (res->gi)
42961 -                       flags |= RATE_INFO_FLAGS_SHORT_GI;
42962 +                       rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
42963                 break;
42964         case MT_PHY_TYPE_VHT:
42965 -               flags |= RATE_INFO_FLAGS_VHT_MCS;
42966 +               if (rate->mcs > 9) {
42967 +                       ret = -EINVAL;
42968 +                       goto out;
42969 +               }
42971 +               rate->flags = RATE_INFO_FLAGS_VHT_MCS;
42972                 if (res->gi)
42973 -                       flags |= RATE_INFO_FLAGS_SHORT_GI;
42974 +                       rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
42975                 break;
42976         case MT_PHY_TYPE_HE_SU:
42977         case MT_PHY_TYPE_HE_EXT_SU:
42978         case MT_PHY_TYPE_HE_TB:
42979         case MT_PHY_TYPE_HE_MU:
42980 +               if (res->gi > NL80211_RATE_INFO_HE_GI_3_2 || rate->mcs > 11) {
42981 +                       ret = -EINVAL;
42982 +                       goto out;
42983 +               }
42984                 rate->he_gi = res->gi;
42986 -               flags |= RATE_INFO_FLAGS_HE_MCS;
42987 +               rate->flags = RATE_INFO_FLAGS_HE_MCS;
42988                 break;
42989         default:
42990 -               break;
42991 +               ret = -EINVAL;
42992 +               goto out;
42993         }
42994 -       rate->flags = flags;
42996         switch (res->bw) {
42997         case IEEE80211_STA_RX_BW_160:
42998 @@ -3575,7 +3588,8 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
42999                 break;
43000         }
43002 +out:
43003         dev_kfree_skb(skb);
43005 -       return 0;
43006 +       return ret;
43008 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
43009 index 5c7eefdf2013..1160d1bf8a7c 100644
43010 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
43011 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
43012 @@ -108,11 +108,11 @@ struct mt7915_vif {
43013  };
43015  struct mib_stats {
43016 -       u16 ack_fail_cnt;
43017 -       u16 fcs_err_cnt;
43018 -       u16 rts_cnt;
43019 -       u16 rts_retries_cnt;
43020 -       u16 ba_miss_cnt;
43021 +       u32 ack_fail_cnt;
43022 +       u32 fcs_err_cnt;
43023 +       u32 rts_cnt;
43024 +       u32 rts_retries_cnt;
43025 +       u32 ba_miss_cnt;
43026  };
43028  struct mt7915_hif {
43029 diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
43030 index ed0c9a24bb53..dfb8880657bf 100644
43031 --- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
43032 +++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
43033 @@ -4,6 +4,11 @@
43034  #ifndef __MT7915_REGS_H
43035  #define __MT7915_REGS_H
43037 +/* MCU WFDMA0 */
43038 +#define MT_MCU_WFDMA0_BASE             0x2000
43039 +#define MT_MCU_WFDMA0(ofs)             (MT_MCU_WFDMA0_BASE + (ofs))
43040 +#define MT_MCU_WFDMA0_DUMMY_CR         MT_MCU_WFDMA0(0x120)
43042  /* MCU WFDMA1 */
43043  #define MT_MCU_WFDMA1_BASE             0x3000
43044  #define MT_MCU_WFDMA1(ofs)             (MT_MCU_WFDMA1_BASE + (ofs))
43045 @@ -396,6 +401,14 @@
43046  #define MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1      BIT(1)
43047  #define MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO       BIT(2)
43049 +#define MT_TOP_RGU_BASE                                0xf0000
43050 +#define MT_TOP_PWR_CTRL                                (MT_TOP_RGU_BASE + (0x0))
43051 +#define MT_TOP_PWR_KEY                         (0x5746 << 16)
43052 +#define MT_TOP_PWR_SW_RST                      BIT(0)
43053 +#define MT_TOP_PWR_SW_PWR_ON                   GENMASK(3, 2)
43054 +#define MT_TOP_PWR_HW_CTRL                     BIT(4)
43055 +#define MT_TOP_PWR_PWR_ON                      BIT(7)
43057  #define MT_INFRA_CFG_BASE              0xf1000
43058  #define MT_INFRA(ofs)                  (MT_INFRA_CFG_BASE + (ofs))
43060 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
43061 index 0dc8e25e18e4..87a7ea12f3b3 100644
43062 --- a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
43063 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
43064 @@ -9,10 +9,13 @@ mt7921_fw_debug_set(void *data, u64 val)
43066         struct mt7921_dev *dev = data;
43068 -       dev->fw_debug = (u8)val;
43069 +       mt7921_mutex_acquire(dev);
43071 +       dev->fw_debug = (u8)val;
43072         mt7921_mcu_fw_log_2_host(dev, dev->fw_debug);
43074 +       mt7921_mutex_release(dev);
43076         return 0;
43079 @@ -44,14 +47,13 @@ mt7921_ampdu_stat_read_phy(struct mt7921_phy *phy,
43080                 range[i] = mt76_rr(dev, MT_MIB_ARNG(0, i));
43082         for (i = 0; i < ARRAY_SIZE(bound); i++)
43083 -               bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i) + 1;
43084 +               bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i % 4) + 1;
43086         seq_printf(file, "\nPhy0\n");
43088         seq_printf(file, "Length: %8d | ", bound[0]);
43089         for (i = 0; i < ARRAY_SIZE(bound) - 1; i++)
43090 -               seq_printf(file, "%3d -%3d | ",
43091 -                          bound[i] + 1, bound[i + 1]);
43092 +               seq_printf(file, "%3d  %3d | ", bound[i] + 1, bound[i + 1]);
43094         seq_puts(file, "\nCount:  ");
43095         for (i = 0; i < ARRAY_SIZE(bound); i++)
43096 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
43097 index 3f9097481a5e..a6d2a25b3495 100644
43098 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
43099 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
43100 @@ -400,7 +400,9 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
43102         /* RXD Group 3 - P-RXV */
43103         if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
43104 -               u32 v0, v1, v2;
43105 +               u8 stbc, gi;
43106 +               u32 v0, v1;
43107 +               bool cck;
43109                 rxv = rxd;
43110                 rxd += 2;
43111 @@ -409,7 +411,6 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
43113                 v0 = le32_to_cpu(rxv[0]);
43114                 v1 = le32_to_cpu(rxv[1]);
43115 -               v2 = le32_to_cpu(rxv[2]);
43117                 if (v0 & MT_PRXV_HT_AD_CODE)
43118                         status->enc_flags |= RX_ENC_FLAG_LDPC;
43119 @@ -429,87 +430,87 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
43120                                              status->chain_signal[i]);
43121                 }
43123 -               /* RXD Group 5 - C-RXV */
43124 -               if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
43125 -                       u8 stbc = FIELD_GET(MT_CRXV_HT_STBC, v2);
43126 -                       u8 gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2);
43127 -                       bool cck = false;
43128 +               stbc = FIELD_GET(MT_PRXV_STBC, v0);
43129 +               gi = FIELD_GET(MT_PRXV_SGI, v0);
43130 +               cck = false;
43132 -                       rxd += 18;
43133 -                       if ((u8 *)rxd - skb->data >= skb->len)
43134 -                               return -EINVAL;
43135 +               idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
43136 +               mode = FIELD_GET(MT_PRXV_TX_MODE, v0);
43138 -                       idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
43139 -                       mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
43141 -                       switch (mode) {
43142 -                       case MT_PHY_TYPE_CCK:
43143 -                               cck = true;
43144 -                               fallthrough;
43145 -                       case MT_PHY_TYPE_OFDM:
43146 -                               i = mt76_get_rate(&dev->mt76, sband, i, cck);
43147 -                               break;
43148 -                       case MT_PHY_TYPE_HT_GF:
43149 -                       case MT_PHY_TYPE_HT:
43150 -                               status->encoding = RX_ENC_HT;
43151 -                               if (i > 31)
43152 -                                       return -EINVAL;
43153 -                               break;
43154 -                       case MT_PHY_TYPE_VHT:
43155 -                               status->nss =
43156 -                                       FIELD_GET(MT_PRXV_NSTS, v0) + 1;
43157 -                               status->encoding = RX_ENC_VHT;
43158 -                               if (i > 9)
43159 -                                       return -EINVAL;
43160 -                               break;
43161 -                       case MT_PHY_TYPE_HE_MU:
43162 -                               status->flag |= RX_FLAG_RADIOTAP_HE_MU;
43163 -                               fallthrough;
43164 -                       case MT_PHY_TYPE_HE_SU:
43165 -                       case MT_PHY_TYPE_HE_EXT_SU:
43166 -                       case MT_PHY_TYPE_HE_TB:
43167 -                               status->nss =
43168 -                                       FIELD_GET(MT_PRXV_NSTS, v0) + 1;
43169 -                               status->encoding = RX_ENC_HE;
43170 -                               status->flag |= RX_FLAG_RADIOTAP_HE;
43171 -                               i &= GENMASK(3, 0);
43173 -                               if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
43174 -                                       status->he_gi = gi;
43176 -                               status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
43177 -                               break;
43178 -                       default:
43179 +               switch (mode) {
43180 +               case MT_PHY_TYPE_CCK:
43181 +                       cck = true;
43182 +                       fallthrough;
43183 +               case MT_PHY_TYPE_OFDM:
43184 +                       i = mt76_get_rate(&dev->mt76, sband, i, cck);
43185 +                       break;
43186 +               case MT_PHY_TYPE_HT_GF:
43187 +               case MT_PHY_TYPE_HT:
43188 +                       status->encoding = RX_ENC_HT;
43189 +                       if (i > 31)
43190                                 return -EINVAL;
43191 -                       }
43192 -                       status->rate_idx = i;
43194 -                       switch (FIELD_GET(MT_CRXV_FRAME_MODE, v2)) {
43195 -                       case IEEE80211_STA_RX_BW_20:
43196 -                               break;
43197 -                       case IEEE80211_STA_RX_BW_40:
43198 -                               if (mode & MT_PHY_TYPE_HE_EXT_SU &&
43199 -                                   (idx & MT_PRXV_TX_ER_SU_106T)) {
43200 -                                       status->bw = RATE_INFO_BW_HE_RU;
43201 -                                       status->he_ru =
43202 -                                               NL80211_RATE_INFO_HE_RU_ALLOC_106;
43203 -                               } else {
43204 -                                       status->bw = RATE_INFO_BW_40;
43205 -                               }
43206 -                               break;
43207 -                       case IEEE80211_STA_RX_BW_80:
43208 -                               status->bw = RATE_INFO_BW_80;
43209 -                               break;
43210 -                       case IEEE80211_STA_RX_BW_160:
43211 -                               status->bw = RATE_INFO_BW_160;
43212 -                               break;
43213 -                       default:
43214 +                       break;
43215 +               case MT_PHY_TYPE_VHT:
43216 +                       status->nss =
43217 +                               FIELD_GET(MT_PRXV_NSTS, v0) + 1;
43218 +                       status->encoding = RX_ENC_VHT;
43219 +                       if (i > 9)
43220                                 return -EINVAL;
43221 +                       break;
43222 +               case MT_PHY_TYPE_HE_MU:
43223 +                       status->flag |= RX_FLAG_RADIOTAP_HE_MU;
43224 +                       fallthrough;
43225 +               case MT_PHY_TYPE_HE_SU:
43226 +               case MT_PHY_TYPE_HE_EXT_SU:
43227 +               case MT_PHY_TYPE_HE_TB:
43228 +                       status->nss =
43229 +                               FIELD_GET(MT_PRXV_NSTS, v0) + 1;
43230 +                       status->encoding = RX_ENC_HE;
43231 +                       status->flag |= RX_FLAG_RADIOTAP_HE;
43232 +                       i &= GENMASK(3, 0);
43234 +                       if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
43235 +                               status->he_gi = gi;
43237 +                       status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
43238 +                       break;
43239 +               default:
43240 +                       return -EINVAL;
43241 +               }
43243 +               status->rate_idx = i;
43245 +               switch (FIELD_GET(MT_PRXV_FRAME_MODE, v0)) {
43246 +               case IEEE80211_STA_RX_BW_20:
43247 +                       break;
43248 +               case IEEE80211_STA_RX_BW_40:
43249 +                       if (mode & MT_PHY_TYPE_HE_EXT_SU &&
43250 +                           (idx & MT_PRXV_TX_ER_SU_106T)) {
43251 +                               status->bw = RATE_INFO_BW_HE_RU;
43252 +                               status->he_ru =
43253 +                                       NL80211_RATE_INFO_HE_RU_ALLOC_106;
43254 +                       } else {
43255 +                               status->bw = RATE_INFO_BW_40;
43256                         }
43257 +                       break;
43258 +               case IEEE80211_STA_RX_BW_80:
43259 +                       status->bw = RATE_INFO_BW_80;
43260 +                       break;
43261 +               case IEEE80211_STA_RX_BW_160:
43262 +                       status->bw = RATE_INFO_BW_160;
43263 +                       break;
43264 +               default:
43265 +                       return -EINVAL;
43266 +               }
43268 -                       status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
43269 -                       if (mode < MT_PHY_TYPE_HE_SU && gi)
43270 -                               status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
43271 +               status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
43272 +               if (mode < MT_PHY_TYPE_HE_SU && gi)
43273 +                       status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
43275 +               if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
43276 +                       rxd += 18;
43277 +                       if ((u8 *)rxd - skb->data >= skb->len)
43278 +                               return -EINVAL;
43279                 }
43280         }
43282 @@ -1317,31 +1318,20 @@ mt7921_mac_update_mib_stats(struct mt7921_phy *phy)
43283         struct mib_stats *mib = &phy->mib;
43284         int i, aggr0 = 0, aggr1;
43286 -       memset(mib, 0, sizeof(*mib));
43288 -       mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(0),
43289 -                                         MT_MIB_SDR3_FCS_ERR_MASK);
43290 +       mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(0),
43291 +                                          MT_MIB_SDR3_FCS_ERR_MASK);
43292 +       mib->ack_fail_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR3(0),
43293 +                                           MT_MIB_ACK_FAIL_COUNT_MASK);
43294 +       mib->ba_miss_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR2(0),
43295 +                                          MT_MIB_BA_FAIL_COUNT_MASK);
43296 +       mib->rts_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR0(0),
43297 +                                      MT_MIB_RTS_COUNT_MASK);
43298 +       mib->rts_retries_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR1(0),
43299 +                                              MT_MIB_RTS_FAIL_COUNT_MASK);
43301         for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
43302                 u32 val, val2;
43304 -               val = mt76_rr(dev, MT_MIB_MB_SDR1(0, i));
43306 -               val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
43307 -               if (val2 > mib->ack_fail_cnt)
43308 -                       mib->ack_fail_cnt = val2;
43310 -               val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
43311 -               if (val2 > mib->ba_miss_cnt)
43312 -                       mib->ba_miss_cnt = val2;
43314 -               val = mt76_rr(dev, MT_MIB_MB_SDR0(0, i));
43315 -               val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
43316 -               if (val2 > mib->rts_retries_cnt) {
43317 -                       mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
43318 -                       mib->rts_retries_cnt = val2;
43319 -               }
43321                 val = mt76_rr(dev, MT_TX_AGG_CNT(0, i));
43322                 val2 = mt76_rr(dev, MT_TX_AGG_CNT2(0, i));
43324 @@ -1503,8 +1493,10 @@ void mt7921_coredump_work(struct work_struct *work)
43325                         break;
43327                 skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
43328 -               if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ)
43329 -                       break;
43330 +               if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
43331 +                       dev_kfree_skb(skb);
43332 +                       continue;
43333 +               }
43335                 memcpy(data, skb->data, skb->len);
43336                 data += skb->len;
43337 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
43338 index a0c1fa0f20e4..109c8849d106 100644
43339 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
43340 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
43341 @@ -97,18 +97,24 @@ enum rx_pkt_type {
43342  #define MT_RXD3_NORMAL_PF_MODE         BIT(29)
43343  #define MT_RXD3_NORMAL_PF_STS          GENMASK(31, 30)
43345 -/* P-RXV */
43346 +/* P-RXV DW0 */
43347  #define MT_PRXV_TX_RATE                        GENMASK(6, 0)
43348  #define MT_PRXV_TX_DCM                 BIT(4)
43349  #define MT_PRXV_TX_ER_SU_106T          BIT(5)
43350  #define MT_PRXV_NSTS                   GENMASK(9, 7)
43351  #define MT_PRXV_HT_AD_CODE             BIT(11)
43352 +#define MT_PRXV_FRAME_MODE             GENMASK(14, 12)
43353 +#define MT_PRXV_SGI                    GENMASK(16, 15)
43354 +#define MT_PRXV_STBC                   GENMASK(23, 22)
43355 +#define MT_PRXV_TX_MODE                        GENMASK(27, 24)
43356  #define MT_PRXV_HE_RU_ALLOC_L          GENMASK(31, 28)
43357 -#define MT_PRXV_HE_RU_ALLOC_H          GENMASK(3, 0)
43359 +/* P-RXV DW1 */
43360  #define MT_PRXV_RCPI3                  GENMASK(31, 24)
43361  #define MT_PRXV_RCPI2                  GENMASK(23, 16)
43362  #define MT_PRXV_RCPI1                  GENMASK(15, 8)
43363  #define MT_PRXV_RCPI0                  GENMASK(7, 0)
43364 +#define MT_PRXV_HE_RU_ALLOC_H          GENMASK(3, 0)
43366  /* C-RXV */
43367  #define MT_CRXV_HT_STBC                        GENMASK(1, 0)
43368 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
43369 index 729f6c42cdde..ada943c7a950 100644
43370 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
43371 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
43372 @@ -348,6 +348,7 @@ static void mt7921_remove_interface(struct ieee80211_hw *hw,
43373         if (vif == phy->monitor_vif)
43374                 phy->monitor_vif = NULL;
43376 +       mt7921_mutex_acquire(dev);
43377         mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid);
43379         if (dev->pm.enable) {
43380 @@ -360,7 +361,6 @@ static void mt7921_remove_interface(struct ieee80211_hw *hw,
43382         rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
43384 -       mt7921_mutex_acquire(dev);
43385         dev->mt76.vif_mask &= ~BIT(mvif->mt76.idx);
43386         phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx);
43387         mt7921_mutex_release(dev);
43388 @@ -413,7 +413,8 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
43389         struct mt7921_sta *msta = sta ? (struct mt7921_sta *)sta->drv_priv :
43390                                   &mvif->sta;
43391         struct mt76_wcid *wcid = &msta->wcid;
43392 -       int idx = key->keyidx;
43393 +       u8 *wcid_keyidx = &wcid->hw_key_idx;
43394 +       int idx = key->keyidx, err = 0;
43396         /* The hardware does not support per-STA RX GTK, fallback
43397          * to software mode for these.
43398 @@ -429,6 +430,7 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
43399         switch (key->cipher) {
43400         case WLAN_CIPHER_SUITE_AES_CMAC:
43401                 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
43402 +               wcid_keyidx = &wcid->hw_key_idx2;
43403                 break;
43404         case WLAN_CIPHER_SUITE_TKIP:
43405         case WLAN_CIPHER_SUITE_CCMP:
43406 @@ -443,16 +445,23 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
43407                 return -EOPNOTSUPP;
43408         }
43410 -       if (cmd == SET_KEY) {
43411 -               key->hw_key_idx = wcid->idx;
43412 -               wcid->hw_key_idx = idx;
43413 -       } else if (idx == wcid->hw_key_idx) {
43414 -               wcid->hw_key_idx = -1;
43415 -       }
43416 +       mt7921_mutex_acquire(dev);
43418 +       if (cmd == SET_KEY)
43419 +               *wcid_keyidx = idx;
43420 +       else if (idx == *wcid_keyidx)
43421 +               *wcid_keyidx = -1;
43422 +       else
43423 +               goto out;
43425         mt76_wcid_key_setup(&dev->mt76, wcid,
43426                             cmd == SET_KEY ? key : NULL);
43428 -       return mt7921_mcu_add_key(dev, vif, msta, key, cmd);
43429 +       err = mt7921_mcu_add_key(dev, vif, msta, key, cmd);
43430 +out:
43431 +       mt7921_mutex_release(dev);
43433 +       return err;
43436  static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
43437 @@ -587,6 +596,9 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw,
43438         if (changed & BSS_CHANGED_PS)
43439                 mt7921_mcu_uni_bss_ps(dev, vif);
43441 +       if (changed & BSS_CHANGED_ARP_FILTER)
43442 +               mt7921_mcu_update_arp_filter(hw, vif, info);
43444         mt7921_mutex_release(dev);
43447 @@ -814,11 +826,17 @@ mt7921_get_stats(struct ieee80211_hw *hw,
43448         struct mt7921_phy *phy = mt7921_hw_phy(hw);
43449         struct mib_stats *mib = &phy->mib;
43451 +       mt7921_mutex_acquire(phy->dev);
43453         stats->dot11RTSSuccessCount = mib->rts_cnt;
43454         stats->dot11RTSFailureCount = mib->rts_retries_cnt;
43455         stats->dot11FCSErrorCount = mib->fcs_err_cnt;
43456         stats->dot11ACKFailureCount = mib->ack_fail_cnt;
43458 +       memset(mib, 0, sizeof(*mib));
43460 +       mt7921_mutex_release(phy->dev);
43462         return 0;
43465 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
43466 index b5cc72e7e81c..62afbad77596 100644
43467 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
43468 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
43469 @@ -1304,3 +1304,47 @@ mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
43470                 mt76_clear(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
43471         }
43474 +int mt7921_mcu_update_arp_filter(struct ieee80211_hw *hw,
43475 +                                struct ieee80211_vif *vif,
43476 +                                struct ieee80211_bss_conf *info)
43478 +       struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
43479 +       struct mt7921_dev *dev = mt7921_hw_dev(hw);
43480 +       struct sk_buff *skb;
43481 +       int i, len = min_t(int, info->arp_addr_cnt,
43482 +                          IEEE80211_BSS_ARP_ADDR_LIST_LEN);
43483 +       struct {
43484 +               struct {
43485 +                       u8 bss_idx;
43486 +                       u8 pad[3];
43487 +               } __packed hdr;
43488 +               struct mt76_connac_arpns_tlv arp;
43489 +       } req_hdr = {
43490 +               .hdr = {
43491 +                       .bss_idx = mvif->mt76.idx,
43492 +               },
43493 +               .arp = {
43494 +                       .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP),
43495 +                       .len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)),
43496 +                       .ips_num = len,
43497 +                       .mode = 2,  /* update */
43498 +                       .option = 1,
43499 +               },
43500 +       };
43502 +       skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
43503 +                                sizeof(req_hdr) + len * sizeof(__be32));
43504 +       if (!skb)
43505 +               return -ENOMEM;
43507 +       skb_put_data(skb, &req_hdr, sizeof(req_hdr));
43508 +       for (i = 0; i < len; i++) {
43509 +               u8 *addr = (u8 *)skb_put(skb, sizeof(__be32));
43511 +               memcpy(addr, &info->arp_addr_list[i], sizeof(__be32));
43512 +       }
43514 +       return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_UNI_CMD_OFFLOAD,
43515 +                                    true);
43517 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
43518 index 46e6aeec35ae..25a1a6acb6ba 100644
43519 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
43520 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
43521 @@ -102,11 +102,11 @@ struct mt7921_vif {
43522  };
43524  struct mib_stats {
43525 -       u16 ack_fail_cnt;
43526 -       u16 fcs_err_cnt;
43527 -       u16 rts_cnt;
43528 -       u16 rts_retries_cnt;
43529 -       u16 ba_miss_cnt;
43530 +       u32 ack_fail_cnt;
43531 +       u32 fcs_err_cnt;
43532 +       u32 rts_cnt;
43533 +       u32 rts_retries_cnt;
43534 +       u32 ba_miss_cnt;
43535  };
43537  struct mt7921_phy {
43538 @@ -339,4 +339,7 @@ int mt7921_mac_set_beacon_filter(struct mt7921_phy *phy,
43539                                  bool enable);
43540  void mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif);
43541  void mt7921_coredump_work(struct work_struct *work);
43542 +int mt7921_mcu_update_arp_filter(struct ieee80211_hw *hw,
43543 +                                struct ieee80211_vif *vif,
43544 +                                struct ieee80211_bss_conf *info);
43545  #endif
43546 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
43547 index 5570b4a50531..80f6f29892a4 100644
43548 --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
43549 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
43550 @@ -137,7 +137,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
43552         mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
43554 -       mt7921_l1_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
43555 +       mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
43557         ret = devm_request_irq(mdev->dev, pdev->irq, mt7921_irq_handler,
43558                                IRQF_SHARED, KBUILD_MODNAME, dev);
43559 @@ -146,10 +146,12 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
43561         ret = mt7921_register_device(dev);
43562         if (ret)
43563 -               goto err_free_dev;
43564 +               goto err_free_irq;
43566         return 0;
43568 +err_free_irq:
43569 +       devm_free_irq(&pdev->dev, pdev->irq, dev);
43570  err_free_dev:
43571         mt76_free_device(&dev->mt76);
43572  err_free_pci_vec:
43573 @@ -193,7 +195,6 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
43574         mt76_for_each_q_rx(mdev, i) {
43575                 napi_disable(&mdev->napi[i]);
43576         }
43577 -       tasklet_kill(&dev->irq_tasklet);
43579         pci_enable_wake(pdev, pci_choose_state(pdev, state), true);
43581 @@ -208,13 +209,16 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
43583         /* disable interrupt */
43584         mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
43585 +       mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
43586 +       synchronize_irq(pdev->irq);
43587 +       tasklet_kill(&dev->irq_tasklet);
43589 -       pci_save_state(pdev);
43590 -       err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
43591 +       err = mt7921_mcu_fw_pmctrl(dev);
43592         if (err)
43593                 goto restore;
43595 -       err = mt7921_mcu_drv_pmctrl(dev);
43596 +       pci_save_state(pdev);
43597 +       err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
43598         if (err)
43599                 goto restore;
43601 @@ -237,18 +241,18 @@ static int mt7921_pci_resume(struct pci_dev *pdev)
43602         struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
43603         int i, err;
43605 -       err = mt7921_mcu_fw_pmctrl(dev);
43606 -       if (err < 0)
43607 -               return err;
43609         err = pci_set_power_state(pdev, PCI_D0);
43610         if (err)
43611                 return err;
43613         pci_restore_state(pdev);
43615 +       err = mt7921_mcu_drv_pmctrl(dev);
43616 +       if (err < 0)
43617 +               return err;
43619         /* enable interrupt */
43620 -       mt7921_l1_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
43621 +       mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
43622         mt7921_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
43623                           MT_INT_MCU_CMD);
43625 diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
43626 index 6dad7f6ab09d..73878d3e2495 100644
43627 --- a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
43628 +++ b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
43629 @@ -96,8 +96,8 @@
43630  #define MT_WF_MIB_BASE(_band)          ((_band) ? 0xa4800 : 0x24800)
43631  #define MT_WF_MIB(_band, ofs)          (MT_WF_MIB_BASE(_band) + (ofs))
43633 -#define MT_MIB_SDR3(_band)             MT_WF_MIB(_band, 0x014)
43634 -#define MT_MIB_SDR3_FCS_ERR_MASK       GENMASK(15, 0)
43635 +#define MT_MIB_SDR3(_band)             MT_WF_MIB(_band, 0x698)
43636 +#define MT_MIB_SDR3_FCS_ERR_MASK       GENMASK(31, 16)
43638  #define MT_MIB_SDR9(_band)             MT_WF_MIB(_band, 0x02c)
43639  #define MT_MIB_SDR9_BUSY_MASK          GENMASK(23, 0)
43640 @@ -121,16 +121,21 @@
43641  #define MT_MIB_RTS_RETRIES_COUNT_MASK  GENMASK(31, 16)
43642  #define MT_MIB_RTS_COUNT_MASK          GENMASK(15, 0)
43644 -#define MT_MIB_MB_SDR1(_band, n)       MT_WF_MIB(_band, 0x104 + ((n) << 4))
43645 -#define MT_MIB_BA_MISS_COUNT_MASK      GENMASK(15, 0)
43646 -#define MT_MIB_ACK_FAIL_COUNT_MASK     GENMASK(31, 16)
43647 +#define MT_MIB_MB_BSDR0(_band)         MT_WF_MIB(_band, 0x688)
43648 +#define MT_MIB_RTS_COUNT_MASK          GENMASK(15, 0)
43649 +#define MT_MIB_MB_BSDR1(_band)         MT_WF_MIB(_band, 0x690)
43650 +#define MT_MIB_RTS_FAIL_COUNT_MASK     GENMASK(15, 0)
43651 +#define MT_MIB_MB_BSDR2(_band)         MT_WF_MIB(_band, 0x518)
43652 +#define MT_MIB_BA_FAIL_COUNT_MASK      GENMASK(15, 0)
43653 +#define MT_MIB_MB_BSDR3(_band)         MT_WF_MIB(_band, 0x520)
43654 +#define MT_MIB_ACK_FAIL_COUNT_MASK     GENMASK(15, 0)
43656  #define MT_MIB_MB_SDR2(_band, n)       MT_WF_MIB(_band, 0x108 + ((n) << 4))
43657  #define MT_MIB_FRAME_RETRIES_COUNT_MASK        GENMASK(15, 0)
43659 -#define MT_TX_AGG_CNT(_band, n)                MT_WF_MIB(_band, 0x0a8 + ((n) << 2))
43660 -#define MT_TX_AGG_CNT2(_band, n)       MT_WF_MIB(_band, 0x164 + ((n) << 2))
43661 -#define MT_MIB_ARNG(_band, n)          MT_WF_MIB(_band, 0x4b8 + ((n) << 2))
43662 +#define MT_TX_AGG_CNT(_band, n)                MT_WF_MIB(_band, 0x7dc + ((n) << 2))
43663 +#define MT_TX_AGG_CNT2(_band, n)       MT_WF_MIB(_band, 0x7ec + ((n) << 2))
43664 +#define MT_MIB_ARNG(_band, n)          MT_WF_MIB(_band, 0x0b0 + ((n) << 2))
43665  #define MT_MIB_ARNCR_RANGE(val, n)     (((val) >> ((n) << 3)) & GENMASK(7, 0))
43667  #define MT_WTBLON_TOP_BASE             0x34000
43668 @@ -357,11 +362,11 @@
43669  #define MT_INFRA_CFG_BASE              0xfe000
43670  #define MT_INFRA(ofs)                  (MT_INFRA_CFG_BASE + (ofs))
43672 -#define MT_HIF_REMAP_L1                        MT_INFRA(0x260)
43673 +#define MT_HIF_REMAP_L1                        MT_INFRA(0x24c)
43674  #define MT_HIF_REMAP_L1_MASK           GENMASK(15, 0)
43675  #define MT_HIF_REMAP_L1_OFFSET         GENMASK(15, 0)
43676  #define MT_HIF_REMAP_L1_BASE           GENMASK(31, 16)
43677 -#define MT_HIF_REMAP_BASE_L1           0xe0000
43678 +#define MT_HIF_REMAP_BASE_L1           0x40000
43680  #define MT_SWDEF_BASE                  0x41f200
43681  #define MT_SWDEF(ofs)                  (MT_SWDEF_BASE + (ofs))
43682 @@ -384,7 +389,7 @@
43683  #define MT_HW_CHIPID                   0x70010200
43684  #define MT_HW_REV                      0x70010204
43686 -#define MT_PCIE_MAC_BASE               0x74030000
43687 +#define MT_PCIE_MAC_BASE               0x10000
43688  #define MT_PCIE_MAC(ofs)               (MT_PCIE_MAC_BASE + (ofs))
43689  #define MT_PCIE_MAC_INT_ENABLE         MT_PCIE_MAC(0x188)
43691 diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
43692 index 0b6facb17ff7..a18d2896ee1f 100644
43693 --- a/drivers/net/wireless/mediatek/mt76/sdio.c
43694 +++ b/drivers/net/wireless/mediatek/mt76/sdio.c
43695 @@ -256,6 +256,9 @@ mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
43697         q->entry[q->head].skb = tx_info.skb;
43698         q->entry[q->head].buf_sz = len;
43700 +       smp_wmb();
43702         q->head = (q->head + 1) % q->ndesc;
43703         q->queued++;
43705 diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
43706 index b8fe8adc43a3..451ed60c6296 100644
43707 --- a/drivers/net/wireless/mediatek/mt76/tx.c
43708 +++ b/drivers/net/wireless/mediatek/mt76/tx.c
43709 @@ -461,11 +461,11 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
43710         int ret = 0;
43712         while (1) {
43713 +               int n_frames = 0;
43715                 if (test_bit(MT76_STATE_PM, &phy->state) ||
43716 -                   test_bit(MT76_RESET, &phy->state)) {
43717 -                       ret = -EBUSY;
43718 -                       break;
43719 -               }
43720 +                   test_bit(MT76_RESET, &phy->state))
43721 +                       return -EBUSY;
43723                 if (dev->queue_ops->tx_cleanup &&
43724                     q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) {
43725 @@ -497,11 +497,16 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
43726                 }
43728                 if (!mt76_txq_stopped(q))
43729 -                       ret += mt76_txq_send_burst(phy, q, mtxq);
43730 +                       n_frames = mt76_txq_send_burst(phy, q, mtxq);
43732                 spin_unlock_bh(&q->lock);
43734                 ieee80211_return_txq(phy->hw, txq, false);
43736 +               if (unlikely(n_frames < 0))
43737 +                       return n_frames;
43739 +               ret += n_frames;
43740         }
43742         return ret;
43743 diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.c b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
43744 index c868582c5d22..aa3b64902cf9 100644
43745 --- a/drivers/net/wireless/mediatek/mt7601u/eeprom.c
43746 +++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
43747 @@ -99,7 +99,7 @@ mt7601u_has_tssi(struct mt7601u_dev *dev, u8 *eeprom)
43749         u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
43751 -       return ~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN);
43752 +       return (u16)~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN);
43755  static void
43756 diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
43757 index 1b205e7d97a8..37f40039e4ca 100644
43758 --- a/drivers/net/wireless/microchip/wilc1000/netdev.c
43759 +++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
43760 @@ -575,7 +575,6 @@ static int wilc_mac_open(struct net_device *ndev)
43762         struct wilc_vif *vif = netdev_priv(ndev);
43763         struct wilc *wl = vif->wilc;
43764 -       unsigned char mac_add[ETH_ALEN] = {0};
43765         int ret = 0;
43766         struct mgmt_frame_regs mgmt_regs = {};
43768 @@ -598,9 +597,12 @@ static int wilc_mac_open(struct net_device *ndev)
43770         wilc_set_operation_mode(vif, wilc_get_vif_idx(vif), vif->iftype,
43771                                 vif->idx);
43772 -       wilc_get_mac_address(vif, mac_add);
43773 -       netdev_dbg(ndev, "Mac address: %pM\n", mac_add);
43774 -       ether_addr_copy(ndev->dev_addr, mac_add);
43776 +       if (is_valid_ether_addr(ndev->dev_addr))
43777 +               wilc_set_mac_address(vif, ndev->dev_addr);
43778 +       else
43779 +               wilc_get_mac_address(vif, ndev->dev_addr);
43780 +       netdev_dbg(ndev, "Mac address: %pM\n", ndev->dev_addr);
43782         if (!is_valid_ether_addr(ndev->dev_addr)) {
43783                 netdev_err(ndev, "Wrong MAC address\n");
43784 @@ -639,7 +641,14 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
43785         int srcu_idx;
43787         if (!is_valid_ether_addr(addr->sa_data))
43788 -               return -EINVAL;
43789 +               return -EADDRNOTAVAIL;
43791 +       if (!vif->mac_opened) {
43792 +               eth_commit_mac_addr_change(dev, p);
43793 +               return 0;
43794 +       }
43796 +       /* Verify MAC Address is not already in use: */
43798         srcu_idx = srcu_read_lock(&wilc->srcu);
43799         list_for_each_entry_rcu(tmp_vif, &wilc->vif_list, list) {
43800 @@ -647,7 +656,7 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
43801                 if (ether_addr_equal(addr->sa_data, mac_addr)) {
43802                         if (vif != tmp_vif) {
43803                                 srcu_read_unlock(&wilc->srcu, srcu_idx);
43804 -                               return -EINVAL;
43805 +                               return -EADDRNOTAVAIL;
43806                         }
43807                         srcu_read_unlock(&wilc->srcu, srcu_idx);
43808                         return 0;
43809 @@ -659,9 +668,7 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
43810         if (result)
43811                 return result;
43813 -       ether_addr_copy(vif->bssid, addr->sa_data);
43814 -       ether_addr_copy(vif->ndev->dev_addr, addr->sa_data);
43816 +       eth_commit_mac_addr_change(dev, p);
43817         return result;
43820 diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
43821 index 351ff909ab1c..e14b9fc2c67a 100644
43822 --- a/drivers/net/wireless/microchip/wilc1000/sdio.c
43823 +++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
43824 @@ -947,7 +947,7 @@ static int wilc_sdio_sync_ext(struct wilc *wilc, int nint)
43825                         for (i = 0; (i < 3) && (nint > 0); i++, nint--)
43826                                 reg |= BIT(i);
43828 -                       ret = wilc_sdio_read_reg(wilc, WILC_INTR2_ENABLE, &reg);
43829 +                       ret = wilc_sdio_write_reg(wilc, WILC_INTR2_ENABLE, reg);
43830                         if (ret) {
43831                                 dev_err(&func->dev,
43832                                         "Failed write reg (%08x)...\n",
43833 diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
43834 index c775c177933b..8dc80574d08d 100644
43835 --- a/drivers/net/wireless/quantenna/qtnfmac/event.c
43836 +++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
43837 @@ -570,8 +570,10 @@ qtnf_event_handle_external_auth(struct qtnf_vif *vif,
43838                 return 0;
43840         if (ev->ssid_len) {
43841 -               memcpy(auth.ssid.ssid, ev->ssid, ev->ssid_len);
43842 -               auth.ssid.ssid_len = ev->ssid_len;
43843 +               int len = clamp_val(ev->ssid_len, 0, IEEE80211_MAX_SSID_LEN);
43845 +               memcpy(auth.ssid.ssid, ev->ssid, len);
43846 +               auth.ssid.ssid_len = len;
43847         }
43849         auth.key_mgmt_suite = le32_to_cpu(ev->akm_suite);
43850 diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
43851 index 27c8a5d96520..fcaaf664cbec 100644
43852 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
43853 +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
43854 @@ -249,7 +249,7 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = {
43855         0x824, 0x00030FE0,
43856         0x828, 0x00000000,
43857         0x82C, 0x002081DD,
43858 -       0x830, 0x2AAA8E24,
43859 +       0x830, 0x2AAAEEC8,
43860         0x834, 0x0037A706,
43861         0x838, 0x06489B44,
43862         0x83C, 0x0000095B,
43863 @@ -324,10 +324,10 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = {
43864         0x9D8, 0x00000000,
43865         0x9DC, 0x00000000,
43866         0x9E0, 0x00005D00,
43867 -       0x9E4, 0x00000002,
43868 +       0x9E4, 0x00000003,
43869         0x9E8, 0x00000001,
43870         0xA00, 0x00D047C8,
43871 -       0xA04, 0x01FF000C,
43872 +       0xA04, 0x01FF800C,
43873         0xA08, 0x8C8A8300,
43874         0xA0C, 0x2E68000F,
43875         0xA10, 0x9500BB78,
43876 @@ -1320,7 +1320,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
43877                 0x083, 0x00021800,
43878                 0x084, 0x00028000,
43879                 0x085, 0x00048000,
43880 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
43881 +               0x086, 0x0009483A,
43882 +       0xA0000000,     0x00000000,
43883                 0x086, 0x00094838,
43884 +       0xB0000000,     0x00000000,
43885                 0x087, 0x00044980,
43886                 0x088, 0x00048000,
43887                 0x089, 0x0000D480,
43888 @@ -1409,36 +1413,32 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
43889                 0x03C, 0x000CA000,
43890                 0x0EF, 0x00000000,
43891                 0x0EF, 0x00001100,
43892 -       0xFF0F0104, 0xABCD,
43893 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
43894                 0x034, 0x0004ADF3,
43895                 0x034, 0x00049DF0,
43896 -       0xFF0F0204, 0xCDEF,
43897 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
43898                 0x034, 0x0004ADF3,
43899                 0x034, 0x00049DF0,
43900 -       0xFF0F0404, 0xCDEF,
43901 -               0x034, 0x0004ADF3,
43902 -               0x034, 0x00049DF0,
43903 -       0xFF0F0200, 0xCDEF,
43904 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
43905                 0x034, 0x0004ADF5,
43906                 0x034, 0x00049DF2,
43907 -       0xFF0F02C0, 0xCDEF,
43908 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
43909 +               0x034, 0x0004A0F3,
43910 +               0x034, 0x000490B1,
43911 +               0x9000040c,     0x00000000,     0x40000000,     0x00000000,
43912                 0x034, 0x0004A0F3,
43913                 0x034, 0x000490B1,
43914 -       0xCDCDCDCD, 0xCDCD,
43915 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
43916 +               0x034, 0x0004ADF5,
43917 +               0x034, 0x00049DF2,
43918 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
43919 +               0x034, 0x0004ADF3,
43920 +               0x034, 0x00049DF0,
43921 +       0xA0000000,     0x00000000,
43922                 0x034, 0x0004ADF7,
43923                 0x034, 0x00049DF3,
43924 -       0xFF0F0104, 0xDEAD,
43925 -       0xFF0F0104, 0xABCD,
43926 -               0x034, 0x00048DED,
43927 -               0x034, 0x00047DEA,
43928 -               0x034, 0x00046DE7,
43929 -               0x034, 0x00045CE9,
43930 -               0x034, 0x00044CE6,
43931 -               0x034, 0x000438C6,
43932 -               0x034, 0x00042886,
43933 -               0x034, 0x00041486,
43934 -               0x034, 0x00040447,
43935 -       0xFF0F0204, 0xCDEF,
43936 +       0xB0000000,     0x00000000,
43937 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
43938                 0x034, 0x00048DED,
43939                 0x034, 0x00047DEA,
43940                 0x034, 0x00046DE7,
43941 @@ -1448,7 +1448,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
43942                 0x034, 0x00042886,
43943                 0x034, 0x00041486,
43944                 0x034, 0x00040447,
43945 -       0xFF0F0404, 0xCDEF,
43946 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
43947                 0x034, 0x00048DED,
43948                 0x034, 0x00047DEA,
43949                 0x034, 0x00046DE7,
43950 @@ -1458,7 +1458,17 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
43951                 0x034, 0x00042886,
43952                 0x034, 0x00041486,
43953                 0x034, 0x00040447,
43954 -       0xFF0F02C0, 0xCDEF,
43955 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
43956 +               0x034, 0x000480AE,
43957 +               0x034, 0x000470AB,
43958 +               0x034, 0x0004608B,
43959 +               0x034, 0x00045069,
43960 +               0x034, 0x00044048,
43961 +               0x034, 0x00043045,
43962 +               0x034, 0x00042026,
43963 +               0x034, 0x00041023,
43964 +               0x034, 0x00040002,
43965 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
43966                 0x034, 0x000480AE,
43967                 0x034, 0x000470AB,
43968                 0x034, 0x0004608B,
43969 @@ -1468,7 +1478,17 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
43970                 0x034, 0x00042026,
43971                 0x034, 0x00041023,
43972                 0x034, 0x00040002,
43973 -       0xCDCDCDCD, 0xCDCD,
43974 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
43975 +               0x034, 0x00048DED,
43976 +               0x034, 0x00047DEA,
43977 +               0x034, 0x00046DE7,
43978 +               0x034, 0x00045CE9,
43979 +               0x034, 0x00044CE6,
43980 +               0x034, 0x000438C6,
43981 +               0x034, 0x00042886,
43982 +               0x034, 0x00041486,
43983 +               0x034, 0x00040447,
43984 +       0xA0000000,     0x00000000,
43985                 0x034, 0x00048DEF,
43986                 0x034, 0x00047DEC,
43987                 0x034, 0x00046DE9,
43988 @@ -1478,38 +1498,36 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
43989                 0x034, 0x0004248A,
43990                 0x034, 0x0004108D,
43991                 0x034, 0x0004008A,
43992 -       0xFF0F0104, 0xDEAD,
43993 -       0xFF0F0200, 0xABCD,
43994 +       0xB0000000,     0x00000000,
43995 +       0x80000210,     0x00000000,     0x40000000,     0x00000000,
43996                 0x034, 0x0002ADF4,
43997 -       0xFF0F02C0, 0xCDEF,
43998 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
43999 +               0x034, 0x0002A0F3,
44000 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
44001                 0x034, 0x0002A0F3,
44002 -       0xCDCDCDCD, 0xCDCD,
44003 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
44004 +               0x034, 0x0002ADF4,
44005 +       0xA0000000,     0x00000000,
44006                 0x034, 0x0002ADF7,
44007 -       0xFF0F0200, 0xDEAD,
44008 -       0xFF0F0104, 0xABCD,
44009 -               0x034, 0x00029DF4,
44010 -       0xFF0F0204, 0xCDEF,
44011 +       0xB0000000,     0x00000000,
44012 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
44013                 0x034, 0x00029DF4,
44014 -       0xFF0F0404, 0xCDEF,
44015 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
44016                 0x034, 0x00029DF4,
44017 -       0xFF0F0200, 0xCDEF,
44018 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
44019                 0x034, 0x00029DF1,
44020 -       0xFF0F02C0, 0xCDEF,
44021 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
44022 +               0x034, 0x000290F0,
44023 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
44024                 0x034, 0x000290F0,
44025 -       0xCDCDCDCD, 0xCDCD,
44026 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
44027 +               0x034, 0x00029DF1,
44028 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
44029 +               0x034, 0x00029DF4,
44030 +       0xA0000000,     0x00000000,
44031                 0x034, 0x00029DF2,
44032 -       0xFF0F0104, 0xDEAD,
44033 -       0xFF0F0104, 0xABCD,
44034 -               0x034, 0x00028DF1,
44035 -               0x034, 0x00027DEE,
44036 -               0x034, 0x00026DEB,
44037 -               0x034, 0x00025CEC,
44038 -               0x034, 0x00024CE9,
44039 -               0x034, 0x000238CA,
44040 -               0x034, 0x00022889,
44041 -               0x034, 0x00021489,
44042 -               0x034, 0x0002044A,
44043 -       0xFF0F0204, 0xCDEF,
44044 +       0xB0000000,     0x00000000,
44045 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
44046                 0x034, 0x00028DF1,
44047                 0x034, 0x00027DEE,
44048                 0x034, 0x00026DEB,
44049 @@ -1519,7 +1537,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
44050                 0x034, 0x00022889,
44051                 0x034, 0x00021489,
44052                 0x034, 0x0002044A,
44053 -       0xFF0F0404, 0xCDEF,
44054 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
44055                 0x034, 0x00028DF1,
44056                 0x034, 0x00027DEE,
44057                 0x034, 0x00026DEB,
44058 @@ -1529,7 +1547,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
44059                 0x034, 0x00022889,
44060                 0x034, 0x00021489,
44061                 0x034, 0x0002044A,
44062 -       0xFF0F02C0, 0xCDEF,
44063 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
44064                 0x034, 0x000280AF,
44065                 0x034, 0x000270AC,
44066                 0x034, 0x0002608B,
44067 @@ -1539,7 +1557,27 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
44068                 0x034, 0x00022026,
44069                 0x034, 0x00021023,
44070                 0x034, 0x00020002,
44071 -       0xCDCDCDCD, 0xCDCD,
44072 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
44073 +               0x034, 0x000280AF,
44074 +               0x034, 0x000270AC,
44075 +               0x034, 0x0002608B,
44076 +               0x034, 0x00025069,
44077 +               0x034, 0x00024048,
44078 +               0x034, 0x00023045,
44079 +               0x034, 0x00022026,
44080 +               0x034, 0x00021023,
44081 +               0x034, 0x00020002,
44082 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
44083 +               0x034, 0x00028DF1,
44084 +               0x034, 0x00027DEE,
44085 +               0x034, 0x00026DEB,
44086 +               0x034, 0x00025CEC,
44087 +               0x034, 0x00024CE9,
44088 +               0x034, 0x000238CA,
44089 +               0x034, 0x00022889,
44090 +               0x034, 0x00021489,
44091 +               0x034, 0x0002044A,
44092 +       0xA0000000,     0x00000000,
44093                 0x034, 0x00028DEE,
44094                 0x034, 0x00027DEB,
44095                 0x034, 0x00026CCD,
44096 @@ -1549,27 +1587,24 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
44097                 0x034, 0x00022849,
44098                 0x034, 0x00021449,
44099                 0x034, 0x0002004D,
44100 -       0xFF0F0104, 0xDEAD,
44101 -       0xFF0F02C0, 0xABCD,
44102 +       0xB0000000,     0x00000000,
44103 +       0x8000020c,     0x00000000,     0x40000000,     0x00000000,
44104 +               0x034, 0x0000A0D7,
44105 +               0x034, 0x000090D3,
44106 +               0x034, 0x000080B1,
44107 +               0x034, 0x000070AE,
44108 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
44109                 0x034, 0x0000A0D7,
44110                 0x034, 0x000090D3,
44111                 0x034, 0x000080B1,
44112                 0x034, 0x000070AE,
44113 -       0xCDCDCDCD, 0xCDCD,
44114 +       0xA0000000,     0x00000000,
44115                 0x034, 0x0000ADF7,
44116                 0x034, 0x00009DF4,
44117                 0x034, 0x00008DF1,
44118                 0x034, 0x00007DEE,
44119 -       0xFF0F02C0, 0xDEAD,
44120 -       0xFF0F0104, 0xABCD,
44121 -               0x034, 0x00006DEB,
44122 -               0x034, 0x00005CEC,
44123 -               0x034, 0x00004CE9,
44124 -               0x034, 0x000038CA,
44125 -               0x034, 0x00002889,
44126 -               0x034, 0x00001489,
44127 -               0x034, 0x0000044A,
44128 -       0xFF0F0204, 0xCDEF,
44129 +       0xB0000000,     0x00000000,
44130 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
44131                 0x034, 0x00006DEB,
44132                 0x034, 0x00005CEC,
44133                 0x034, 0x00004CE9,
44134 @@ -1577,7 +1612,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
44135                 0x034, 0x00002889,
44136                 0x034, 0x00001489,
44137                 0x034, 0x0000044A,
44138 -       0xFF0F0404, 0xCDEF,
44139 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
44140                 0x034, 0x00006DEB,
44141                 0x034, 0x00005CEC,
44142                 0x034, 0x00004CE9,
44143 @@ -1585,7 +1620,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
44144                 0x034, 0x00002889,
44145                 0x034, 0x00001489,
44146                 0x034, 0x0000044A,
44147 -       0xFF0F02C0, 0xCDEF,
44148 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
44149                 0x034, 0x0000608D,
44150                 0x034, 0x0000506B,
44151                 0x034, 0x0000404A,
44152 @@ -1593,7 +1628,23 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
44153                 0x034, 0x00002044,
44154                 0x034, 0x00001025,
44155                 0x034, 0x00000004,
44156 -       0xCDCDCDCD, 0xCDCD,
44157 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
44158 +               0x034, 0x0000608D,
44159 +               0x034, 0x0000506B,
44160 +               0x034, 0x0000404A,
44161 +               0x034, 0x00003047,
44162 +               0x034, 0x00002044,
44163 +               0x034, 0x00001025,
44164 +               0x034, 0x00000004,
44165 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
44166 +               0x034, 0x00006DEB,
44167 +               0x034, 0x00005CEC,
44168 +               0x034, 0x00004CE9,
44169 +               0x034, 0x000038CA,
44170 +               0x034, 0x00002889,
44171 +               0x034, 0x00001489,
44172 +               0x034, 0x0000044A,
44173 +       0xA0000000,     0x00000000,
44174                 0x034, 0x00006DCD,
44175                 0x034, 0x00005CCD,
44176                 0x034, 0x00004CCA,
44177 @@ -1601,11 +1652,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
44178                 0x034, 0x00002888,
44179                 0x034, 0x00001488,
44180                 0x034, 0x00000486,
44181 -       0xFF0F0104, 0xDEAD,
44182 +       0xB0000000,     0x00000000,
44183                 0x0EF, 0x00000000,
44184                 0x018, 0x0001712A,
44185                 0x0EF, 0x00000040,
44186 -       0xFF0F0104, 0xABCD,
44187 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
44188                 0x035, 0x00000187,
44189                 0x035, 0x00008187,
44190                 0x035, 0x00010187,
44191 @@ -1615,7 +1666,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
44192                 0x035, 0x00040188,
44193                 0x035, 0x00048188,
44194                 0x035, 0x00050188,
44195 -       0xFF0F0204, 0xCDEF,
44196 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
44197                 0x035, 0x00000187,
44198                 0x035, 0x00008187,
44199                 0x035, 0x00010187,
44200 @@ -1625,7 +1676,37 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
44201                 0x035, 0x00040188,
44202                 0x035, 0x00048188,
44203                 0x035, 0x00050188,
44204 -       0xFF0F0404, 0xCDEF,
44205 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
44206 +               0x035, 0x00000128,
44207 +               0x035, 0x00008128,
44208 +               0x035, 0x00010128,
44209 +               0x035, 0x000201C8,
44210 +               0x035, 0x000281C8,
44211 +               0x035, 0x000301C8,
44212 +               0x035, 0x000401C8,
44213 +               0x035, 0x000481C8,
44214 +               0x035, 0x000501C8,
44215 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
44216 +               0x035, 0x00000145,
44217 +               0x035, 0x00008145,
44218 +               0x035, 0x00010145,
44219 +               0x035, 0x00020196,
44220 +               0x035, 0x00028196,
44221 +               0x035, 0x00030196,
44222 +               0x035, 0x000401C7,
44223 +               0x035, 0x000481C7,
44224 +               0x035, 0x000501C7,
44225 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
44226 +               0x035, 0x00000128,
44227 +               0x035, 0x00008128,
44228 +               0x035, 0x00010128,
44229 +               0x035, 0x000201C8,
44230 +               0x035, 0x000281C8,
44231 +               0x035, 0x000301C8,
44232 +               0x035, 0x000401C8,
44233 +               0x035, 0x000481C8,
44234 +               0x035, 0x000501C8,
44235 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
44236                 0x035, 0x00000187,
44237                 0x035, 0x00008187,
44238                 0x035, 0x00010187,
44239 @@ -1635,7 +1716,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
44240                 0x035, 0x00040188,
44241                 0x035, 0x00048188,
44242                 0x035, 0x00050188,
44243 -       0xCDCDCDCD, 0xCDCD,
44244 +       0xA0000000,     0x00000000,
44245                 0x035, 0x00000145,
44246                 0x035, 0x00008145,
44247                 0x035, 0x00010145,
44248 @@ -1645,11 +1726,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
44249                 0x035, 0x000401C7,
44250                 0x035, 0x000481C7,
44251                 0x035, 0x000501C7,
44252 -       0xFF0F0104, 0xDEAD,
44253 +       0xB0000000,     0x00000000,
44254                 0x0EF, 0x00000000,
44255                 0x018, 0x0001712A,
44256                 0x0EF, 0x00000010,
44257 -       0xFF0F0104, 0xABCD,
44258 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
44259                 0x036, 0x00085733,
44260                 0x036, 0x0008D733,
44261                 0x036, 0x00095733,
44262 @@ -1662,7 +1743,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
44263                 0x036, 0x000CE4B4,
44264                 0x036, 0x000D64B4,
44265                 0x036, 0x000DE4B4,
44266 -       0xFF0F0204, 0xCDEF,
44267 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
44268                 0x036, 0x00085733,
44269                 0x036, 0x0008D733,
44270                 0x036, 0x00095733,
44271 @@ -1675,7 +1756,46 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
44272                 0x036, 0x000CE4B4,
44273                 0x036, 0x000D64B4,
44274                 0x036, 0x000DE4B4,
44275 -       0xFF0F0404, 0xCDEF,
44276 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
44277 +               0x036, 0x000063B5,
44278 +               0x036, 0x0000E3B5,
44279 +               0x036, 0x000163B5,
44280 +               0x036, 0x0001E3B5,
44281 +               0x036, 0x000263B5,
44282 +               0x036, 0x0002E3B5,
44283 +               0x036, 0x000363B5,
44284 +               0x036, 0x0003E3B5,
44285 +               0x036, 0x000463B5,
44286 +               0x036, 0x0004E3B5,
44287 +               0x036, 0x000563B5,
44288 +               0x036, 0x0005E3B5,
44289 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
44290 +               0x036, 0x000056B3,
44291 +               0x036, 0x0000D6B3,
44292 +               0x036, 0x000156B3,
44293 +               0x036, 0x0001D6B3,
44294 +               0x036, 0x00026634,
44295 +               0x036, 0x0002E634,
44296 +               0x036, 0x00036634,
44297 +               0x036, 0x0003E634,
44298 +               0x036, 0x000467B4,
44299 +               0x036, 0x0004E7B4,
44300 +               0x036, 0x000567B4,
44301 +               0x036, 0x0005E7B4,
44302 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
44303 +               0x036, 0x000063B5,
44304 +               0x036, 0x0000E3B5,
44305 +               0x036, 0x000163B5,
44306 +               0x036, 0x0001E3B5,
44307 +               0x036, 0x000263B5,
44308 +               0x036, 0x0002E3B5,
44309 +               0x036, 0x000363B5,
44310 +               0x036, 0x0003E3B5,
44311 +               0x036, 0x000463B5,
44312 +               0x036, 0x0004E3B5,
44313 +               0x036, 0x000563B5,
44314 +               0x036, 0x0005E3B5,
44315 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
44316                 0x036, 0x00085733,
44317                 0x036, 0x0008D733,
44318                 0x036, 0x00095733,
44319 @@ -1688,7 +1808,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
44320                 0x036, 0x000CE4B4,
44321                 0x036, 0x000D64B4,
44322                 0x036, 0x000DE4B4,
44323 -       0xCDCDCDCD, 0xCDCD,
44324 +       0xA0000000,     0x00000000,
44325                 0x036, 0x000056B3,
44326                 0x036, 0x0000D6B3,
44327                 0x036, 0x000156B3,
44328 @@ -1701,103 +1821,162 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
44329                 0x036, 0x0004E7B4,
44330                 0x036, 0x000567B4,
44331                 0x036, 0x0005E7B4,
44332 -       0xFF0F0104, 0xDEAD,
44333 +       0xB0000000,     0x00000000,
44334                 0x0EF, 0x00000000,
44335                 0x0EF, 0x00000008,
44336 -       0xFF0F0104, 0xABCD,
44337 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
44338                 0x03C, 0x000001C8,
44339                 0x03C, 0x00000492,
44340 -       0xFF0F0204, 0xCDEF,
44341 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
44342                 0x03C, 0x000001C8,
44343                 0x03C, 0x00000492,
44344 -       0xFF0F0404, 0xCDEF,
44345 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
44346 +               0x03C, 0x000001B6,
44347 +               0x03C, 0x00000492,
44348 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
44349 +               0x03C, 0x0000022A,
44350 +               0x03C, 0x00000594,
44351 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
44352 +               0x03C, 0x000001B6,
44353 +               0x03C, 0x00000492,
44354 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
44355                 0x03C, 0x000001C8,
44356                 0x03C, 0x00000492,
44357 -       0xCDCDCDCD, 0xCDCD,
44358 +       0xA0000000,     0x00000000,
44359                 0x03C, 0x0000022A,
44360                 0x03C, 0x00000594,
44361 -       0xFF0F0104, 0xDEAD,
44362 -       0xFF0F0104, 0xABCD,
44363 +       0xB0000000,     0x00000000,
44364 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
44365                 0x03C, 0x00000800,
44366 -       0xFF0F0204, 0xCDEF,
44367 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
44368                 0x03C, 0x00000800,
44369 -       0xFF0F0404, 0xCDEF,
44370 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
44371                 0x03C, 0x00000800,
44372 -       0xFF0F02C0, 0xCDEF,
44373 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
44374                 0x03C, 0x00000820,
44375 -       0xCDCDCDCD, 0xCDCD,
44376 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
44377 +               0x03C, 0x00000820,
44378 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
44379 +               0x03C, 0x00000800,
44380 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
44381 +               0x03C, 0x00000800,
44382 +       0xA0000000,     0x00000000,
44383                 0x03C, 0x00000900,
44384 -       0xFF0F0104, 0xDEAD,
44385 +       0xB0000000,     0x00000000,
44386                 0x0EF, 0x00000000,
44387                 0x018, 0x0001712A,
44388                 0x0EF, 0x00000002,
44389 -       0xFF0F0104, 0xABCD,
44390 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
44391                 0x008, 0x0004E400,
44392 -       0xFF0F0204, 0xCDEF,
44393 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
44394                 0x008, 0x0004E400,
44395 -       0xFF0F0404, 0xCDEF,
44396 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
44397 +               0x008, 0x00002000,
44398 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
44399 +               0x008, 0x00002000,
44400 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
44401 +               0x008, 0x00002000,
44402 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
44403 +               0x008, 0x00002000,
44404 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
44405                 0x008, 0x0004E400,
44406 -       0xCDCDCDCD, 0xCDCD,
44407 +       0xA0000000,     0x00000000,
44408                 0x008, 0x00002000,
44409 -       0xFF0F0104, 0xDEAD,
44410 +       0xB0000000,     0x00000000,
44411                 0x0EF, 0x00000000,
44412                 0x0DF, 0x000000C0,
44413 -               0x01F, 0x00040064,
44414 -       0xFF0F0104, 0xABCD,
44415 +               0x01F, 0x00000064,
44416 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
44417                 0x058, 0x000A7284,
44418                 0x059, 0x000600EC,
44419 -       0xFF0F0204, 0xCDEF,
44420 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
44421                 0x058, 0x000A7284,
44422                 0x059, 0x000600EC,
44423 -       0xFF0F0404, 0xCDEF,
44424 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
44425 +               0x058, 0x00081184,
44426 +               0x059, 0x0006016C,
44427 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
44428 +               0x058, 0x00081184,
44429 +               0x059, 0x0006016C,
44430 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
44431 +               0x058, 0x00081184,
44432 +               0x059, 0x0006016C,
44433 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
44434                 0x058, 0x000A7284,
44435                 0x059, 0x000600EC,
44436 -       0xCDCDCDCD, 0xCDCD,
44437 +       0xA0000000,     0x00000000,
44438                 0x058, 0x00081184,
44439                 0x059, 0x0006016C,
44440 -       0xFF0F0104, 0xDEAD,
44441 -       0xFF0F0104, 0xABCD,
44442 +       0xB0000000,     0x00000000,
44443 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
44444                 0x061, 0x000E8D73,
44445                 0x062, 0x00093FC5,
44446 -       0xFF0F0204, 0xCDEF,
44447 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
44448                 0x061, 0x000E8D73,
44449                 0x062, 0x00093FC5,
44450 -       0xFF0F0404, 0xCDEF,
44451 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
44452 +               0x061, 0x000EFD83,
44453 +               0x062, 0x00093FCC,
44454 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
44455 +               0x061, 0x000EAD53,
44456 +               0x062, 0x00093BC4,
44457 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
44458 +               0x061, 0x000EFD83,
44459 +               0x062, 0x00093FCC,
44460 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
44461                 0x061, 0x000E8D73,
44462                 0x062, 0x00093FC5,
44463 -       0xCDCDCDCD, 0xCDCD,
44464 +       0xA0000000,     0x00000000,
44465                 0x061, 0x000EAD53,
44466                 0x062, 0x00093BC4,
44467 -       0xFF0F0104, 0xDEAD,
44468 -       0xFF0F0104, 0xABCD,
44469 +       0xB0000000,     0x00000000,
44470 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
44471                 0x063, 0x000110E9,
44472 -       0xFF0F0204, 0xCDEF,
44473 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
44474                 0x063, 0x000110E9,
44475 -       0xFF0F0404, 0xCDEF,
44476 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
44477 +               0x063, 0x000110EB,
44478 +       0x9000020c,     0x00000000,     0x40000000,     0x00000000,
44479                 0x063, 0x000110E9,
44480 -       0xFF0F0200, 0xCDEF,
44481 -               0x063, 0x000710E9,
44482 -       0xFF0F02C0, 0xCDEF,
44483 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
44484                 0x063, 0x000110E9,
44485 -       0xCDCDCDCD, 0xCDCD,
44486 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
44487 +               0x063, 0x000110EB,
44488 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
44489 +               0x063, 0x000110E9,
44490 +       0xA0000000,     0x00000000,
44491                 0x063, 0x000714E9,
44492 -       0xFF0F0104, 0xDEAD,
44493 -       0xFF0F0104, 0xABCD,
44494 +       0xB0000000,     0x00000000,
44495 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
44496 +               0x064, 0x0001C27C,
44497 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
44498 +               0x064, 0x0001C27C,
44499 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
44500                 0x064, 0x0001C27C,
44501 -       0xFF0F0204, 0xCDEF,
44502 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
44503 +               0x064, 0x0001C67C,
44504 +       0x90000200,     0x00000000,     0x40000000,     0x00000000,
44505                 0x064, 0x0001C27C,
44506 -       0xFF0F0404, 0xCDEF,
44507 +       0x90000410,     0x00000000,     0x40000000,     0x00000000,
44508                 0x064, 0x0001C27C,
44509 -       0xCDCDCDCD, 0xCDCD,
44510 +       0xA0000000,     0x00000000,
44511                 0x064, 0x0001C67C,
44512 -       0xFF0F0104, 0xDEAD,
44513 -       0xFF0F0200, 0xABCD,
44514 +       0xB0000000,     0x00000000,
44515 +       0x80000111,     0x00000000,     0x40000000,     0x00000000,
44516 +               0x065, 0x00091016,
44517 +       0x90000110,     0x00000000,     0x40000000,     0x00000000,
44518 +               0x065, 0x00091016,
44519 +       0x90000210,     0x00000000,     0x40000000,     0x00000000,
44520                 0x065, 0x00093016,
44521 -       0xFF0F02C0, 0xCDEF,
44522 +               0x9000020c,     0x00000000,     0x40000000,     0x00000000,
44523                 0x065, 0x00093015,
44524 -       0xCDCDCDCD, 0xCDCD,
44525 +               0x9000040c,     0x00000000,     0x40000000,     0x00000000,
44526 +               0x065, 0x00093015,
44527 +               0x90000200,     0x00000000,     0x40000000,     0x00000000,
44528 +               0x065, 0x00093016,
44529 +               0xA0000000,     0x00000000,
44530                 0x065, 0x00091016,
44531 -       0xFF0F0200, 0xDEAD,
44532 +               0xB0000000,     0x00000000,
44533                 0x018, 0x00000006,
44534                 0x0EF, 0x00002000,
44535                 0x03B, 0x0003824B,
44536 @@ -1895,9 +2074,10 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
44537                 0x0B4, 0x0001214C,
44538                 0x0B7, 0x0003000C,
44539                 0x01C, 0x000539D2,
44540 +               0x0C4, 0x000AFE00,
44541                 0x018, 0x0001F12A,
44542 -               0x0FE, 0x00000000,
44543 -               0x0FE, 0x00000000,
44544 +               0xFFE, 0x00000000,
44545 +               0xFFE, 0x00000000,
44546                 0x018, 0x0001712A,
44548  };
44549 @@ -2017,6 +2197,7 @@ u32 RTL8812AE_MAC_REG_ARRAY[] = {
44550  u32 RTL8812AE_MAC_1T_ARRAYLEN = ARRAY_SIZE(RTL8812AE_MAC_REG_ARRAY);
44552  u32 RTL8821AE_MAC_REG_ARRAY[] = {
44553 +               0x421, 0x0000000F,
44554                 0x428, 0x0000000A,
44555                 0x429, 0x00000010,
44556                 0x430, 0x00000000,
44557 @@ -2485,7 +2666,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
44558                 0x81C, 0xA6360001,
44559                 0x81C, 0xA5380001,
44560                 0x81C, 0xA43A0001,
44561 -               0x81C, 0xA33C0001,
44562 +               0x81C, 0x683C0001,
44563                 0x81C, 0x673E0001,
44564                 0x81C, 0x66400001,
44565                 0x81C, 0x65420001,
44566 @@ -2519,7 +2700,66 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
44567                 0x81C, 0x017A0001,
44568                 0x81C, 0x017C0001,
44569                 0x81C, 0x017E0001,
44570 -       0xFF0F02C0, 0xABCD,
44571 +       0x8000020c,     0x00000000,     0x40000000,     0x00000000,
44572 +               0x81C, 0xFB000101,
44573 +               0x81C, 0xFA020101,
44574 +               0x81C, 0xF9040101,
44575 +               0x81C, 0xF8060101,
44576 +               0x81C, 0xF7080101,
44577 +               0x81C, 0xF60A0101,
44578 +               0x81C, 0xF50C0101,
44579 +               0x81C, 0xF40E0101,
44580 +               0x81C, 0xF3100101,
44581 +               0x81C, 0xF2120101,
44582 +               0x81C, 0xF1140101,
44583 +               0x81C, 0xF0160101,
44584 +               0x81C, 0xEF180101,
44585 +               0x81C, 0xEE1A0101,
44586 +               0x81C, 0xED1C0101,
44587 +               0x81C, 0xEC1E0101,
44588 +               0x81C, 0xEB200101,
44589 +               0x81C, 0xEA220101,
44590 +               0x81C, 0xE9240101,
44591 +               0x81C, 0xE8260101,
44592 +               0x81C, 0xE7280101,
44593 +               0x81C, 0xE62A0101,
44594 +               0x81C, 0xE52C0101,
44595 +               0x81C, 0xE42E0101,
44596 +               0x81C, 0xE3300101,
44597 +               0x81C, 0xA5320101,
44598 +               0x81C, 0xA4340101,
44599 +               0x81C, 0xA3360101,
44600 +               0x81C, 0x87380101,
44601 +               0x81C, 0x863A0101,
44602 +               0x81C, 0x853C0101,
44603 +               0x81C, 0x843E0101,
44604 +               0x81C, 0x69400101,
44605 +               0x81C, 0x68420101,
44606 +               0x81C, 0x67440101,
44607 +               0x81C, 0x66460101,
44608 +               0x81C, 0x49480101,
44609 +               0x81C, 0x484A0101,
44610 +               0x81C, 0x474C0101,
44611 +               0x81C, 0x2A4E0101,
44612 +               0x81C, 0x29500101,
44613 +               0x81C, 0x28520101,
44614 +               0x81C, 0x27540101,
44615 +               0x81C, 0x26560101,
44616 +               0x81C, 0x25580101,
44617 +               0x81C, 0x245A0101,
44618 +               0x81C, 0x235C0101,
44619 +               0x81C, 0x055E0101,
44620 +               0x81C, 0x04600101,
44621 +               0x81C, 0x03620101,
44622 +               0x81C, 0x02640101,
44623 +               0x81C, 0x01660101,
44624 +               0x81C, 0x01680101,
44625 +               0x81C, 0x016A0101,
44626 +               0x81C, 0x016C0101,
44627 +               0x81C, 0x016E0101,
44628 +               0x81C, 0x01700101,
44629 +               0x81C, 0x01720101,
44630 +       0x9000040c,     0x00000000,     0x40000000,     0x00000000,
44631                 0x81C, 0xFB000101,
44632                 0x81C, 0xFA020101,
44633                 0x81C, 0xF9040101,
44634 @@ -2578,7 +2818,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
44635                 0x81C, 0x016E0101,
44636                 0x81C, 0x01700101,
44637                 0x81C, 0x01720101,
44638 -       0xCDCDCDCD, 0xCDCD,
44639 +       0xA0000000,     0x00000000,
44640                 0x81C, 0xFF000101,
44641                 0x81C, 0xFF020101,
44642                 0x81C, 0xFE040101,
44643 @@ -2637,7 +2877,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
44644                 0x81C, 0x046E0101,
44645                 0x81C, 0x03700101,
44646                 0x81C, 0x02720101,
44647 -       0xFF0F02C0, 0xDEAD,
44648 +       0xB0000000,     0x00000000,
44649                 0x81C, 0x01740101,
44650                 0x81C, 0x01760101,
44651                 0x81C, 0x01780101,
44652 diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
44653 index 948cb79050ea..e7d51ac9b689 100644
44654 --- a/drivers/net/wireless/realtek/rtw88/debug.c
44655 +++ b/drivers/net/wireless/realtek/rtw88/debug.c
44656 @@ -270,7 +270,7 @@ static ssize_t rtw_debugfs_set_rsvd_page(struct file *filp,
44658         if (num != 2) {
44659                 rtw_warn(rtwdev, "invalid arguments\n");
44660 -               return num;
44661 +               return -EINVAL;
44662         }
44664         debugfs_priv->rsvd_page.page_offset = offset;
44665 diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
44666 index 35afea91fd29..92b9cf1f9525 100644
44667 --- a/drivers/net/wireless/realtek/rtw88/main.h
44668 +++ b/drivers/net/wireless/realtek/rtw88/main.h
44669 @@ -1166,6 +1166,7 @@ struct rtw_chip_info {
44670         bool en_dis_dpd;
44671         u16 dpd_ratemask;
44672         u8 iqk_threshold;
44673 +       u8 lck_threshold;
44674         const struct rtw_pwr_track_tbl *pwr_track_tbl;
44676         u8 bfer_su_max_num;
44677 @@ -1534,6 +1535,7 @@ struct rtw_dm_info {
44678         u32 rrsr_mask_min;
44679         u8 thermal_avg[RTW_RF_PATH_MAX];
44680         u8 thermal_meter_k;
44681 +       u8 thermal_meter_lck;
44682         s8 delta_power_index[RTW_RF_PATH_MAX];
44683         s8 delta_power_index_last[RTW_RF_PATH_MAX];
44684         u8 default_ofdm_index;
44685 diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
44686 index 786a48649946..6b5c885798a4 100644
44687 --- a/drivers/net/wireless/realtek/rtw88/pci.c
44688 +++ b/drivers/net/wireless/realtek/rtw88/pci.c
44689 @@ -581,23 +581,30 @@ static int rtw_pci_start(struct rtw_dev *rtwdev)
44691         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
44693 +       rtw_pci_napi_start(rtwdev);
44695         spin_lock_bh(&rtwpci->irq_lock);
44696 +       rtwpci->running = true;
44697         rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
44698         spin_unlock_bh(&rtwpci->irq_lock);
44700 -       rtw_pci_napi_start(rtwdev);
44702         return 0;
44705  static void rtw_pci_stop(struct rtw_dev *rtwdev)
44707         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
44708 +       struct pci_dev *pdev = rtwpci->pdev;
44710 +       spin_lock_bh(&rtwpci->irq_lock);
44711 +       rtwpci->running = false;
44712 +       rtw_pci_disable_interrupt(rtwdev, rtwpci);
44713 +       spin_unlock_bh(&rtwpci->irq_lock);
44715 +       synchronize_irq(pdev->irq);
44716         rtw_pci_napi_stop(rtwdev);
44718         spin_lock_bh(&rtwpci->irq_lock);
44719 -       rtw_pci_disable_interrupt(rtwdev, rtwpci);
44720         rtw_pci_dma_release(rtwdev, rtwpci);
44721         spin_unlock_bh(&rtwpci->irq_lock);
44723 @@ -1138,7 +1145,8 @@ static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
44724                 rtw_fw_c2h_cmd_isr(rtwdev);
44726         /* all of the jobs for this interrupt have been done */
44727 -       rtw_pci_enable_interrupt(rtwdev, rtwpci, rx);
44728 +       if (rtwpci->running)
44729 +               rtw_pci_enable_interrupt(rtwdev, rtwpci, rx);
44730         spin_unlock_bh(&rtwpci->irq_lock);
44732         return IRQ_HANDLED;
44733 @@ -1558,7 +1566,8 @@ static int rtw_pci_napi_poll(struct napi_struct *napi, int budget)
44734         if (work_done < budget) {
44735                 napi_complete_done(napi, work_done);
44736                 spin_lock_bh(&rtwpci->irq_lock);
44737 -               rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
44738 +               if (rtwpci->running)
44739 +                       rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
44740                 spin_unlock_bh(&rtwpci->irq_lock);
44741                 /* When ISR happens during polling and before napi_complete
44742                  * while no further data is received. Data on the dma_ring will
44743 diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h
44744 index e76fc549a788..0ffae887527a 100644
44745 --- a/drivers/net/wireless/realtek/rtw88/pci.h
44746 +++ b/drivers/net/wireless/realtek/rtw88/pci.h
44747 @@ -211,6 +211,7 @@ struct rtw_pci {
44748         spinlock_t irq_lock;
44749         u32 irq_mask[4];
44750         bool irq_enabled;
44751 +       bool running;
44753         /* napi structure */
44754         struct net_device netdev;
44755 diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
44756 index e114ddecac09..21e77fcfa4d5 100644
44757 --- a/drivers/net/wireless/realtek/rtw88/phy.c
44758 +++ b/drivers/net/wireless/realtek/rtw88/phy.c
44759 @@ -1584,7 +1584,7 @@ void rtw_phy_load_tables(struct rtw_dev *rtwdev)
44761  EXPORT_SYMBOL(rtw_phy_load_tables);
44763 -static u8 rtw_get_channel_group(u8 channel)
44764 +static u8 rtw_get_channel_group(u8 channel, u8 rate)
44766         switch (channel) {
44767         default:
44768 @@ -1628,6 +1628,7 @@ static u8 rtw_get_channel_group(u8 channel)
44769         case 106:
44770                 return 4;
44771         case 14:
44772 +               return rate <= DESC_RATE11M ? 5 : 4;
44773         case 108:
44774         case 110:
44775         case 112:
44776 @@ -1879,7 +1880,7 @@ void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw,
44777         s8 *remnant = &pwr_param->pwr_remnant;
44779         pwr_idx = &rtwdev->efuse.txpwr_idx_table[path];
44780 -       group = rtw_get_channel_group(ch);
44781 +       group = rtw_get_channel_group(ch, rate);
44783         /* base power index for 2.4G/5G */
44784         if (IS_CH_2G_BAND(ch)) {
44785 @@ -2219,6 +2220,20 @@ s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev,
44787  EXPORT_SYMBOL(rtw_phy_pwrtrack_get_pwridx);
44789 +bool rtw_phy_pwrtrack_need_lck(struct rtw_dev *rtwdev)
44791 +       struct rtw_dm_info *dm_info = &rtwdev->dm_info;
44792 +       u8 delta_lck;
44794 +       delta_lck = abs(dm_info->thermal_avg[0] - dm_info->thermal_meter_lck);
44795 +       if (delta_lck >= rtwdev->chip->lck_threshold) {
44796 +               dm_info->thermal_meter_lck = dm_info->thermal_avg[0];
44797 +               return true;
44798 +       }
44799 +       return false;
44801 +EXPORT_SYMBOL(rtw_phy_pwrtrack_need_lck);
44803  bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev)
44805         struct rtw_dm_info *dm_info = &rtwdev->dm_info;
44806 diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h
44807 index a4fcfb878550..a0742a69446d 100644
44808 --- a/drivers/net/wireless/realtek/rtw88/phy.h
44809 +++ b/drivers/net/wireless/realtek/rtw88/phy.h
44810 @@ -55,6 +55,7 @@ u8 rtw_phy_pwrtrack_get_delta(struct rtw_dev *rtwdev, u8 path);
44811  s8 rtw_phy_pwrtrack_get_pwridx(struct rtw_dev *rtwdev,
44812                                struct rtw_swing_table *swing_table,
44813                                u8 tbl_path, u8 therm_path, u8 delta);
44814 +bool rtw_phy_pwrtrack_need_lck(struct rtw_dev *rtwdev);
44815  bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev);
44816  void rtw_phy_config_swing_table(struct rtw_dev *rtwdev,
44817                                 struct rtw_swing_table *swing_table);
44818 diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
44819 index ea518aa78552..819af34dac34 100644
44820 --- a/drivers/net/wireless/realtek/rtw88/reg.h
44821 +++ b/drivers/net/wireless/realtek/rtw88/reg.h
44822 @@ -652,8 +652,13 @@
44823  #define RF_TXATANK     0x64
44824  #define RF_TRXIQ       0x66
44825  #define RF_RXIQGEN     0x8d
44826 +#define RF_SYN_PFD     0xb0
44827  #define RF_XTALX2      0xb8
44828 +#define RF_SYN_CTRL    0xbb
44829  #define RF_MALSEL      0xbe
44830 +#define RF_SYN_AAC     0xc9
44831 +#define RF_AAC_CTRL    0xca
44832 +#define RF_FAST_LCK    0xcc
44833  #define RF_RCKD                0xde
44834  #define RF_TXADBG      0xde
44835  #define RF_LUTDBG      0xdf
44836 diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
44837 index dd560c28abb2..448922cb2e63 100644
44838 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
44839 +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
44840 @@ -1126,6 +1126,7 @@ static void rtw8822c_pwrtrack_init(struct rtw_dev *rtwdev)
44842         dm_info->pwr_trk_triggered = false;
44843         dm_info->thermal_meter_k = rtwdev->efuse.thermal_meter_k;
44844 +       dm_info->thermal_meter_lck = rtwdev->efuse.thermal_meter_k;
44847  static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev)
44848 @@ -2108,6 +2109,26 @@ static void rtw8822c_false_alarm_statistics(struct rtw_dev *rtwdev)
44849         rtw_write32_set(rtwdev, REG_RX_BREAK, BIT_COM_RX_GCK_EN);
44852 +static void rtw8822c_do_lck(struct rtw_dev *rtwdev)
44854 +       u32 val;
44856 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_CTRL, RFREG_MASK, 0x80010);
44857 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_PFD, RFREG_MASK, 0x1F0FA);
44858 +       fsleep(1);
44859 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_AAC_CTRL, RFREG_MASK, 0x80000);
44860 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_AAC, RFREG_MASK, 0x80001);
44861 +       read_poll_timeout(rtw_read_rf, val, val != 0x1, 1000, 100000,
44862 +                         true, rtwdev, RF_PATH_A, RF_AAC_CTRL, 0x1000);
44863 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_SYN_PFD, RFREG_MASK, 0x1F0F8);
44864 +       rtw_write_rf(rtwdev, RF_PATH_B, RF_SYN_CTRL, RFREG_MASK, 0x80010);
44866 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_FAST_LCK, RFREG_MASK, 0x0f000);
44867 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_FAST_LCK, RFREG_MASK, 0x4f000);
44868 +       fsleep(1);
44869 +       rtw_write_rf(rtwdev, RF_PATH_A, RF_FAST_LCK, RFREG_MASK, 0x0f000);
44872  static void rtw8822c_do_iqk(struct rtw_dev *rtwdev)
44874         struct rtw_iqk_para para = {0};
44875 @@ -3538,11 +3559,12 @@ static void __rtw8822c_pwr_track(struct rtw_dev *rtwdev)
44877         rtw_phy_config_swing_table(rtwdev, &swing_table);
44879 +       if (rtw_phy_pwrtrack_need_lck(rtwdev))
44880 +               rtw8822c_do_lck(rtwdev);
44882         for (i = 0; i < rtwdev->hal.rf_path_num; i++)
44883                 rtw8822c_pwr_track_path(rtwdev, &swing_table, i);
44885 -       if (rtw_phy_pwrtrack_need_iqk(rtwdev))
44886 -               rtw8822c_do_iqk(rtwdev);
44889  static void rtw8822c_pwr_track(struct rtw_dev *rtwdev)
44890 @@ -4351,6 +4373,7 @@ struct rtw_chip_info rtw8822c_hw_spec = {
44891         .dpd_ratemask = DIS_DPD_RATEALL,
44892         .pwr_track_tbl = &rtw8822c_rtw_pwr_track_tbl,
44893         .iqk_threshold = 8,
44894 +       .lck_threshold = 8,
44895         .bfer_su_max_num = 2,
44896         .bfer_mu_max_num = 1,
44897         .rx_ldpc = true,
44898 diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
44899 index fe0287b22a25..e0c502bc4270 100644
44900 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
44901 +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
44902 @@ -1513,7 +1513,7 @@ static int rsi_restore(struct device *dev)
44904  static const struct dev_pm_ops rsi_pm_ops = {
44905         .suspend = rsi_suspend,
44906 -       .resume = rsi_resume,
44907 +       .resume_noirq = rsi_resume,
44908         .freeze = rsi_freeze,
44909         .thaw = rsi_thaw,
44910         .restore = rsi_restore,
44911 diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c
44912 index e14d88e558f0..85abd0a2d1c9 100644
44913 --- a/drivers/net/wireless/ti/wlcore/boot.c
44914 +++ b/drivers/net/wireless/ti/wlcore/boot.c
44915 @@ -72,6 +72,7 @@ static int wlcore_validate_fw_ver(struct wl1271 *wl)
44916         unsigned int *min_ver = (wl->fw_type == WL12XX_FW_TYPE_MULTI) ?
44917                 wl->min_mr_fw_ver : wl->min_sr_fw_ver;
44918         char min_fw_str[32] = "";
44919 +       int off = 0;
44920         int i;
44922         /* the chip must be exactly equal */
44923 @@ -105,13 +106,15 @@ static int wlcore_validate_fw_ver(struct wl1271 *wl)
44924         return 0;
44926  fail:
44927 -       for (i = 0; i < NUM_FW_VER; i++)
44928 +       for (i = 0; i < NUM_FW_VER && off < sizeof(min_fw_str); i++)
44929                 if (min_ver[i] == WLCORE_FW_VER_IGNORE)
44930 -                       snprintf(min_fw_str, sizeof(min_fw_str),
44931 -                                 "%s*.", min_fw_str);
44932 +                       off += snprintf(min_fw_str + off,
44933 +                                       sizeof(min_fw_str) - off,
44934 +                                       "*.");
44935                 else
44936 -                       snprintf(min_fw_str, sizeof(min_fw_str),
44937 -                                 "%s%u.", min_fw_str, min_ver[i]);
44938 +                       off += snprintf(min_fw_str + off,
44939 +                                       sizeof(min_fw_str) - off,
44940 +                                       "%u.", min_ver[i]);
44942         wl1271_error("Your WiFi FW version (%u.%u.%u.%u.%u) is invalid.\n"
44943                      "Please use at least FW %s\n"
44944 diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
44945 index b143293e694f..a9e13e6d65c5 100644
44946 --- a/drivers/net/wireless/ti/wlcore/debugfs.h
44947 +++ b/drivers/net/wireless/ti/wlcore/debugfs.h
44948 @@ -78,13 +78,14 @@ static ssize_t sub## _ ##name## _read(struct file *file,            \
44949         struct wl1271 *wl = file->private_data;                         \
44950         struct struct_type *stats = wl->stats.fw_stats;                 \
44951         char buf[DEBUGFS_FORMAT_BUFFER_SIZE] = "";                      \
44952 +       int pos = 0;                                                    \
44953         int i;                                                          \
44954                                                                         \
44955         wl1271_debugfs_update_stats(wl);                                \
44956                                                                         \
44957 -       for (i = 0; i < len; i++)                                       \
44958 -               snprintf(buf, sizeof(buf), "%s[%d] = %d\n",             \
44959 -                        buf, i, stats->sub.name[i]);                   \
44960 +       for (i = 0; i < len && pos < sizeof(buf); i++)                  \
44961 +               pos += snprintf(buf + pos, sizeof(buf) - pos,           \
44962 +                        "[%d] = %d\n", i, stats->sub.name[i]);         \
44963                                                                         \
44964         return wl1271_format_buffer(userbuf, count, ppos, "%s", buf);   \
44965  }                                                                      \
44966 diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
44967 index e98e04ee9a2c..59b7b93c5963 100644
44968 --- a/drivers/net/wireless/wl3501.h
44969 +++ b/drivers/net/wireless/wl3501.h
44970 @@ -379,16 +379,7 @@ struct wl3501_get_confirm {
44971         u8      mib_value[100];
44972  };
44974 -struct wl3501_join_req {
44975 -       u16                         next_blk;
44976 -       u8                          sig_id;
44977 -       u8                          reserved;
44978 -       struct iw_mgmt_data_rset    operational_rset;
44979 -       u16                         reserved2;
44980 -       u16                         timeout;
44981 -       u16                         probe_delay;
44982 -       u8                          timestamp[8];
44983 -       u8                          local_time[8];
44984 +struct wl3501_req {
44985         u16                         beacon_period;
44986         u16                         dtim_period;
44987         u16                         cap_info;
44988 @@ -401,6 +392,19 @@ struct wl3501_join_req {
44989         struct iw_mgmt_data_rset    bss_basic_rset;
44990  };
44992 +struct wl3501_join_req {
44993 +       u16                         next_blk;
44994 +       u8                          sig_id;
44995 +       u8                          reserved;
44996 +       struct iw_mgmt_data_rset    operational_rset;
44997 +       u16                         reserved2;
44998 +       u16                         timeout;
44999 +       u16                         probe_delay;
45000 +       u8                          timestamp[8];
45001 +       u8                          local_time[8];
45002 +       struct wl3501_req           req;
45005  struct wl3501_join_confirm {
45006         u16     next_blk;
45007         u8      sig_id;
45008 @@ -443,16 +447,7 @@ struct wl3501_scan_confirm {
45009         u16                         status;
45010         char                        timestamp[8];
45011         char                        localtime[8];
45012 -       u16                         beacon_period;
45013 -       u16                         dtim_period;
45014 -       u16                         cap_info;
45015 -       u8                          bss_type;
45016 -       u8                          bssid[ETH_ALEN];
45017 -       struct iw_mgmt_essid_pset   ssid;
45018 -       struct iw_mgmt_ds_pset      ds_pset;
45019 -       struct iw_mgmt_cf_pset      cf_pset;
45020 -       struct iw_mgmt_ibss_pset    ibss_pset;
45021 -       struct iw_mgmt_data_rset    bss_basic_rset;
45022 +       struct wl3501_req           req;
45023         u8                          rssi;
45024  };
45026 @@ -471,8 +466,10 @@ struct wl3501_md_req {
45027         u16     size;
45028         u8      pri;
45029         u8      service_class;
45030 -       u8      daddr[ETH_ALEN];
45031 -       u8      saddr[ETH_ALEN];
45032 +       struct {
45033 +               u8      daddr[ETH_ALEN];
45034 +               u8      saddr[ETH_ALEN];
45035 +       } addr;
45036  };
45038  struct wl3501_md_ind {
45039 @@ -484,8 +481,10 @@ struct wl3501_md_ind {
45040         u8      reception;
45041         u8      pri;
45042         u8      service_class;
45043 -       u8      daddr[ETH_ALEN];
45044 -       u8      saddr[ETH_ALEN];
45045 +       struct {
45046 +               u8      daddr[ETH_ALEN];
45047 +               u8      saddr[ETH_ALEN];
45048 +       } addr;
45049  };
45051  struct wl3501_md_confirm {
45052 diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
45053 index 8ca5789c7b37..672f5d5f3f2c 100644
45054 --- a/drivers/net/wireless/wl3501_cs.c
45055 +++ b/drivers/net/wireless/wl3501_cs.c
45056 @@ -469,6 +469,7 @@ static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len)
45057         struct wl3501_md_req sig = {
45058                 .sig_id = WL3501_SIG_MD_REQ,
45059         };
45060 +       size_t sig_addr_len = sizeof(sig.addr);
45061         u8 *pdata = (char *)data;
45062         int rc = -EIO;
45064 @@ -484,9 +485,9 @@ static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len)
45065                         goto out;
45066                 }
45067                 rc = 0;
45068 -               memcpy(&sig.daddr[0], pdata, 12);
45069 -               pktlen = len - 12;
45070 -               pdata += 12;
45071 +               memcpy(&sig.addr, pdata, sig_addr_len);
45072 +               pktlen = len - sig_addr_len;
45073 +               pdata += sig_addr_len;
45074                 sig.data = bf;
45075                 if (((*pdata) * 256 + (*(pdata + 1))) > 1500) {
45076                         u8 addr4[ETH_ALEN] = {
45077 @@ -589,7 +590,7 @@ static int wl3501_mgmt_join(struct wl3501_card *this, u16 stas)
45078         struct wl3501_join_req sig = {
45079                 .sig_id           = WL3501_SIG_JOIN_REQ,
45080                 .timeout          = 10,
45081 -               .ds_pset = {
45082 +               .req.ds_pset = {
45083                         .el = {
45084                                 .id  = IW_MGMT_INFO_ELEMENT_DS_PARAMETER_SET,
45085                                 .len = 1,
45086 @@ -598,7 +599,7 @@ static int wl3501_mgmt_join(struct wl3501_card *this, u16 stas)
45087                 },
45088         };
45090 -       memcpy(&sig.beacon_period, &this->bss_set[stas].beacon_period, 72);
45091 +       memcpy(&sig.req, &this->bss_set[stas].req, sizeof(sig.req));
45092         return wl3501_esbq_exec(this, &sig, sizeof(sig));
45095 @@ -666,35 +667,37 @@ static void wl3501_mgmt_scan_confirm(struct wl3501_card *this, u16 addr)
45096         if (sig.status == WL3501_STATUS_SUCCESS) {
45097                 pr_debug("success");
45098                 if ((this->net_type == IW_MODE_INFRA &&
45099 -                    (sig.cap_info & WL3501_MGMT_CAPABILITY_ESS)) ||
45100 +                    (sig.req.cap_info & WL3501_MGMT_CAPABILITY_ESS)) ||
45101                     (this->net_type == IW_MODE_ADHOC &&
45102 -                    (sig.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) ||
45103 +                    (sig.req.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) ||
45104                     this->net_type == IW_MODE_AUTO) {
45105                         if (!this->essid.el.len)
45106                                 matchflag = 1;
45107                         else if (this->essid.el.len == 3 &&
45108                                  !memcmp(this->essid.essid, "ANY", 3))
45109                                 matchflag = 1;
45110 -                       else if (this->essid.el.len != sig.ssid.el.len)
45111 +                       else if (this->essid.el.len != sig.req.ssid.el.len)
45112                                 matchflag = 0;
45113 -                       else if (memcmp(this->essid.essid, sig.ssid.essid,
45114 +                       else if (memcmp(this->essid.essid, sig.req.ssid.essid,
45115                                         this->essid.el.len))
45116                                 matchflag = 0;
45117                         else
45118                                 matchflag = 1;
45119                         if (matchflag) {
45120                                 for (i = 0; i < this->bss_cnt; i++) {
45121 -                                       if (ether_addr_equal_unaligned(this->bss_set[i].bssid, sig.bssid)) {
45122 +                                       if (ether_addr_equal_unaligned(this->bss_set[i].req.bssid,
45123 +                                                                      sig.req.bssid)) {
45124                                                 matchflag = 0;
45125                                                 break;
45126                                         }
45127                                 }
45128                         }
45129                         if (matchflag && (i < 20)) {
45130 -                               memcpy(&this->bss_set[i].beacon_period,
45131 -                                      &sig.beacon_period, 73);
45132 +                               memcpy(&this->bss_set[i].req,
45133 +                                      &sig.req, sizeof(sig.req));
45134                                 this->bss_cnt++;
45135                                 this->rssi = sig.rssi;
45136 +                               this->bss_set[i].rssi = sig.rssi;
45137                         }
45138                 }
45139         } else if (sig.status == WL3501_STATUS_TIMEOUT) {
45140 @@ -886,19 +889,19 @@ static void wl3501_mgmt_join_confirm(struct net_device *dev, u16 addr)
45141                         if (this->join_sta_bss < this->bss_cnt) {
45142                                 const int i = this->join_sta_bss;
45143                                 memcpy(this->bssid,
45144 -                                      this->bss_set[i].bssid, ETH_ALEN);
45145 -                               this->chan = this->bss_set[i].ds_pset.chan;
45146 +                                      this->bss_set[i].req.bssid, ETH_ALEN);
45147 +                               this->chan = this->bss_set[i].req.ds_pset.chan;
45148                                 iw_copy_mgmt_info_element(&this->keep_essid.el,
45149 -                                                    &this->bss_set[i].ssid.el);
45150 +                                                    &this->bss_set[i].req.ssid.el);
45151                                 wl3501_mgmt_auth(this);
45152                         }
45153                 } else {
45154                         const int i = this->join_sta_bss;
45156 -                       memcpy(&this->bssid, &this->bss_set[i].bssid, ETH_ALEN);
45157 -                       this->chan = this->bss_set[i].ds_pset.chan;
45158 +                       memcpy(&this->bssid, &this->bss_set[i].req.bssid, ETH_ALEN);
45159 +                       this->chan = this->bss_set[i].req.ds_pset.chan;
45160                         iw_copy_mgmt_info_element(&this->keep_essid.el,
45161 -                                                 &this->bss_set[i].ssid.el);
45162 +                                                 &this->bss_set[i].req.ssid.el);
45163                         wl3501_online(dev);
45164                 }
45165         } else {
45166 @@ -980,7 +983,8 @@ static inline void wl3501_md_ind_interrupt(struct net_device *dev,
45167         } else {
45168                 skb->dev = dev;
45169                 skb_reserve(skb, 2); /* IP headers on 16 bytes boundaries */
45170 -               skb_copy_to_linear_data(skb, (unsigned char *)&sig.daddr, 12);
45171 +               skb_copy_to_linear_data(skb, (unsigned char *)&sig.addr,
45172 +                                       sizeof(sig.addr));
45173                 wl3501_receive(this, skb->data, pkt_len);
45174                 skb_put(skb, pkt_len);
45175                 skb->protocol   = eth_type_trans(skb, dev);
45176 @@ -1571,30 +1575,30 @@ static int wl3501_get_scan(struct net_device *dev, struct iw_request_info *info,
45177         for (i = 0; i < this->bss_cnt; ++i) {
45178                 iwe.cmd                 = SIOCGIWAP;
45179                 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
45180 -               memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].bssid, ETH_ALEN);
45181 +               memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].req.bssid, ETH_ALEN);
45182                 current_ev = iwe_stream_add_event(info, current_ev,
45183                                                   extra + IW_SCAN_MAX_DATA,
45184                                                   &iwe, IW_EV_ADDR_LEN);
45185                 iwe.cmd           = SIOCGIWESSID;
45186                 iwe.u.data.flags  = 1;
45187 -               iwe.u.data.length = this->bss_set[i].ssid.el.len;
45188 +               iwe.u.data.length = this->bss_set[i].req.ssid.el.len;
45189                 current_ev = iwe_stream_add_point(info, current_ev,
45190                                                   extra + IW_SCAN_MAX_DATA,
45191                                                   &iwe,
45192 -                                                 this->bss_set[i].ssid.essid);
45193 +                                                 this->bss_set[i].req.ssid.essid);
45194                 iwe.cmd    = SIOCGIWMODE;
45195 -               iwe.u.mode = this->bss_set[i].bss_type;
45196 +               iwe.u.mode = this->bss_set[i].req.bss_type;
45197                 current_ev = iwe_stream_add_event(info, current_ev,
45198                                                   extra + IW_SCAN_MAX_DATA,
45199                                                   &iwe, IW_EV_UINT_LEN);
45200                 iwe.cmd = SIOCGIWFREQ;
45201 -               iwe.u.freq.m = this->bss_set[i].ds_pset.chan;
45202 +               iwe.u.freq.m = this->bss_set[i].req.ds_pset.chan;
45203                 iwe.u.freq.e = 0;
45204                 current_ev = iwe_stream_add_event(info, current_ev,
45205                                                   extra + IW_SCAN_MAX_DATA,
45206                                                   &iwe, IW_EV_FREQ_LEN);
45207                 iwe.cmd = SIOCGIWENCODE;
45208 -               if (this->bss_set[i].cap_info & WL3501_MGMT_CAPABILITY_PRIVACY)
45209 +               if (this->bss_set[i].req.cap_info & WL3501_MGMT_CAPABILITY_PRIVACY)
45210                         iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
45211                 else
45212                         iwe.u.data.flags = IW_ENCODE_DISABLED;
45213 diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
45214 index f1469ac8ff42..3fe5b81eda2d 100644
45215 --- a/drivers/nfc/pn533/pn533.c
45216 +++ b/drivers/nfc/pn533/pn533.c
45217 @@ -706,6 +706,9 @@ static bool pn533_target_type_a_is_valid(struct pn533_target_type_a *type_a,
45218         if (PN533_TYPE_A_SEL_CASCADE(type_a->sel_res) != 0)
45219                 return false;
45221 +       if (type_a->nfcid_len > NFC_NFCID1_MAXSIZE)
45222 +               return false;
45224         return true;
45227 diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
45228 index 0896e21642be..d5d7e0cdd78d 100644
45229 --- a/drivers/nvme/host/core.c
45230 +++ b/drivers/nvme/host/core.c
45231 @@ -2681,7 +2681,8 @@ static void nvme_set_latency_tolerance(struct device *dev, s32 val)
45233         if (ctrl->ps_max_latency_us != latency) {
45234                 ctrl->ps_max_latency_us = latency;
45235 -               nvme_configure_apst(ctrl);
45236 +               if (ctrl->state == NVME_CTRL_LIVE)
45237 +                       nvme_configure_apst(ctrl);
45238         }
45241 diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
45242 index a1d476e1ac02..ec1e454848e5 100644
45243 --- a/drivers/nvme/host/multipath.c
45244 +++ b/drivers/nvme/host/multipath.c
45245 @@ -668,6 +668,10 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
45246                 if (desc.state) {
45247                         /* found the group desc: update */
45248                         nvme_update_ns_ana_state(&desc, ns);
45249 +               } else {
45250 +                       /* group desc not found: trigger a re-read */
45251 +                       set_bit(NVME_NS_ANA_PENDING, &ns->flags);
45252 +                       queue_work(nvme_wq, &ns->ctrl->ana_work);
45253                 }
45254         } else {
45255                 ns->ana_state = NVME_ANA_OPTIMIZED; 
45256 diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
45257 index 7249ae74f71f..c92a15c3fbc5 100644
45258 --- a/drivers/nvme/host/pci.c
45259 +++ b/drivers/nvme/host/pci.c
45260 @@ -852,7 +852,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
45261                                 return nvme_setup_prp_simple(dev, req,
45262                                                              &cmnd->rw, &bv);
45264 -                       if (iod->nvmeq->qid &&
45265 +                       if (iod->nvmeq->qid && sgl_threshold &&
45266                             dev->ctrl.sgls & ((1 << 0) | (1 << 1)))
45267                                 return nvme_setup_sgl_simple(dev, req,
45268                                                              &cmnd->rw, &bv);
45269 diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
45270 index a0f00cb8f9f3..d7d7c81d0701 100644
45271 --- a/drivers/nvme/host/tcp.c
45272 +++ b/drivers/nvme/host/tcp.c
45273 @@ -874,7 +874,7 @@ static void nvme_tcp_state_change(struct sock *sk)
45275         struct nvme_tcp_queue *queue;
45277 -       read_lock(&sk->sk_callback_lock);
45278 +       read_lock_bh(&sk->sk_callback_lock);
45279         queue = sk->sk_user_data;
45280         if (!queue)
45281                 goto done;
45282 @@ -895,7 +895,7 @@ static void nvme_tcp_state_change(struct sock *sk)
45284         queue->state_change(sk);
45285  done:
45286 -       read_unlock(&sk->sk_callback_lock);
45287 +       read_unlock_bh(&sk->sk_callback_lock);
45290  static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
45291 diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
45292 index fe6b8aa90b53..5a1ab49908c3 100644
45293 --- a/drivers/nvme/target/admin-cmd.c
45294 +++ b/drivers/nvme/target/admin-cmd.c
45295 @@ -307,7 +307,7 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
45296         case NVME_LOG_ANA:
45297                 return nvmet_execute_get_log_page_ana(req);
45298         }
45299 -       pr_err("unhandled lid %d on qid %d\n",
45300 +       pr_debug("unhandled lid %d on qid %d\n",
45301                req->cmd->get_log_page.lid, req->sq->qid);
45302         req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
45303         nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
45304 @@ -659,7 +659,7 @@ static void nvmet_execute_identify(struct nvmet_req *req)
45305                 return nvmet_execute_identify_desclist(req);
45306         }
45308 -       pr_err("unhandled identify cns %d on qid %d\n",
45309 +       pr_debug("unhandled identify cns %d on qid %d\n",
45310                req->cmd->identify.cns, req->sq->qid);
45311         req->error_loc = offsetof(struct nvme_identify, cns);
45312         nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
45313 @@ -919,15 +919,21 @@ void nvmet_execute_async_event(struct nvmet_req *req)
45314  void nvmet_execute_keep_alive(struct nvmet_req *req)
45316         struct nvmet_ctrl *ctrl = req->sq->ctrl;
45317 +       u16 status = 0;
45319         if (!nvmet_check_transfer_len(req, 0))
45320                 return;
45322 +       if (!ctrl->kato) {
45323 +               status = NVME_SC_KA_TIMEOUT_INVALID;
45324 +               goto out;
45325 +       }
45327         pr_debug("ctrl %d update keep-alive timer for %d secs\n",
45328                 ctrl->cntlid, ctrl->kato);
45330         mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
45331 -       nvmet_req_complete(req, 0);
45332 +out:
45333 +       nvmet_req_complete(req, status);
45336  u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
45337 @@ -971,7 +977,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
45338                 return 0;
45339         }
45341 -       pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
45342 +       pr_debug("unhandled cmd %d on qid %d\n", cmd->common.opcode,
45343                req->sq->qid);
45344         req->error_loc = offsetof(struct nvme_common_command, opcode);
45345         return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
45346 diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
45347 index 682854e0e079..4845d12e374a 100644
45348 --- a/drivers/nvme/target/discovery.c
45349 +++ b/drivers/nvme/target/discovery.c
45350 @@ -178,12 +178,14 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
45351         if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
45352                 req->error_loc =
45353                         offsetof(struct nvme_get_log_page_command, lid);
45354 -               status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
45355 +               status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
45356                 goto out;
45357         }
45359         /* Spec requires dword aligned offsets */
45360         if (offset & 0x3) {
45361 +               req->error_loc =
45362 +                       offsetof(struct nvme_get_log_page_command, lpo);
45363                 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
45364                 goto out;
45365         }
45366 @@ -250,7 +252,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
45368         if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
45369                 req->error_loc = offsetof(struct nvme_identify, cns);
45370 -               status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
45371 +               status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
45372                 goto out;
45373         }
45375 diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
45376 index 9a8b3726a37c..429263ca9b97 100644
45377 --- a/drivers/nvme/target/io-cmd-bdev.c
45378 +++ b/drivers/nvme/target/io-cmd-bdev.c
45379 @@ -258,7 +258,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
45381         sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
45383 -       if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
45384 +       if (nvmet_use_inline_bvec(req)) {
45385                 bio = &req->b.inline_bio;
45386                 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
45387         } else {
45388 diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
45389 index 4b84edb49f22..5aad34b106dc 100644
45390 --- a/drivers/nvme/target/nvmet.h
45391 +++ b/drivers/nvme/target/nvmet.h
45392 @@ -614,4 +614,10 @@ static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
45393         return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
45396 +static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
45398 +       return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
45399 +              req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
45402  #endif /* _NVMET_H */
45403 diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
45404 index 2798944899b7..39b1473f7204 100644
45405 --- a/drivers/nvme/target/passthru.c
45406 +++ b/drivers/nvme/target/passthru.c
45407 @@ -194,7 +194,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
45408         if (req->sg_cnt > BIO_MAX_VECS)
45409                 return -EINVAL;
45411 -       if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
45412 +       if (nvmet_use_inline_bvec(req)) {
45413                 bio = &req->p.inline_bio;
45414                 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
45415         } else {
45416 diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
45417 index 6c1f3ab7649c..7d607f435e36 100644
45418 --- a/drivers/nvme/target/rdma.c
45419 +++ b/drivers/nvme/target/rdma.c
45420 @@ -700,7 +700,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
45422         struct nvmet_rdma_rsp *rsp =
45423                 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
45424 -       struct nvmet_rdma_queue *queue = cq->cq_context;
45425 +       struct nvmet_rdma_queue *queue = wc->qp->qp_context;
45427         nvmet_rdma_release_rsp(rsp);
45429 @@ -786,7 +786,7 @@ static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc)
45431         struct nvmet_rdma_rsp *rsp =
45432                 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, write_cqe);
45433 -       struct nvmet_rdma_queue *queue = cq->cq_context;
45434 +       struct nvmet_rdma_queue *queue = wc->qp->qp_context;
45435         struct rdma_cm_id *cm_id = rsp->queue->cm_id;
45436         u16 status;
45438 diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
45439 index d658c6e8263a..d958b5da9b88 100644
45440 --- a/drivers/nvme/target/tcp.c
45441 +++ b/drivers/nvme/target/tcp.c
45442 @@ -525,11 +525,36 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
45443         struct nvmet_tcp_cmd *cmd =
45444                 container_of(req, struct nvmet_tcp_cmd, req);
45445         struct nvmet_tcp_queue  *queue = cmd->queue;
45446 +       struct nvme_sgl_desc *sgl;
45447 +       u32 len;
45449 +       if (unlikely(cmd == queue->cmd)) {
45450 +               sgl = &cmd->req.cmd->common.dptr.sgl;
45451 +               len = le32_to_cpu(sgl->length);
45453 +               /*
45454 +                * Wait for inline data before processing the response.
45455 +                * Avoid using helpers, this might happen before
45456 +                * nvmet_req_init is completed.
45457 +                */
45458 +               if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
45459 +                   len && len < cmd->req.port->inline_data_size &&
45460 +                   nvme_is_write(cmd->req.cmd))
45461 +                       return;
45462 +       }
45464         llist_add(&cmd->lentry, &queue->resp_list);
45465         queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
45468 +static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
45470 +       if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
45471 +               nvmet_tcp_queue_response(&cmd->req);
45472 +       else
45473 +               cmd->req.execute(&cmd->req);
45476  static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
45478         u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
45479 @@ -961,7 +986,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
45480                         le32_to_cpu(req->cmd->common.dptr.sgl.length));
45482                 nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
45483 -               return -EAGAIN;
45484 +               return 0;
45485         }
45487         ret = nvmet_tcp_map_data(queue->cmd);
45488 @@ -1104,10 +1129,8 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
45489                 return 0;
45490         }
45492 -       if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
45493 -           cmd->rbytes_done == cmd->req.transfer_len) {
45494 -               cmd->req.execute(&cmd->req);
45495 -       }
45496 +       if (cmd->rbytes_done == cmd->req.transfer_len)
45497 +               nvmet_tcp_execute_request(cmd);
45499         nvmet_prepare_receive_pdu(queue);
45500         return 0;
45501 @@ -1144,9 +1167,9 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
45502                 goto out;
45503         }
45505 -       if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
45506 -           cmd->rbytes_done == cmd->req.transfer_len)
45507 -               cmd->req.execute(&cmd->req);
45508 +       if (cmd->rbytes_done == cmd->req.transfer_len)
45509 +               nvmet_tcp_execute_request(cmd);
45511         ret = 0;
45512  out:
45513         nvmet_prepare_receive_pdu(queue);
45514 @@ -1434,7 +1457,7 @@ static void nvmet_tcp_state_change(struct sock *sk)
45516         struct nvmet_tcp_queue *queue;
45518 -       write_lock_bh(&sk->sk_callback_lock);
45519 +       read_lock_bh(&sk->sk_callback_lock);
45520         queue = sk->sk_user_data;
45521         if (!queue)
45522                 goto done;
45523 @@ -1452,7 +1475,7 @@ static void nvmet_tcp_state_change(struct sock *sk)
45524                         queue->idx, sk->sk_state);
45525         }
45526  done:
45527 -       write_unlock_bh(&sk->sk_callback_lock);
45528 +       read_unlock_bh(&sk->sk_callback_lock);
45531  static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
45532 diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
45533 index 75d2594c16e1..267a0d9e99ba 100644
45534 --- a/drivers/nvmem/Kconfig
45535 +++ b/drivers/nvmem/Kconfig
45536 @@ -272,6 +272,7 @@ config SPRD_EFUSE
45538  config NVMEM_RMEM
45539         tristate "Reserved Memory Based Driver Support"
45540 +       depends on HAS_IOMEM
45541         help
45542           This driver maps reserved memory into an nvmem device. It might be
45543           useful to expose information left by firmware in memory.
45544 diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
45545 index 6cace24dfbf7..100d69d8f2e1 100644
45546 --- a/drivers/nvmem/qfprom.c
45547 +++ b/drivers/nvmem/qfprom.c
45548 @@ -127,6 +127,16 @@ static void qfprom_disable_fuse_blowing(const struct qfprom_priv *priv,
45550         int ret;
45552 +       /*
45553 +        * This may be a shared rail and may be able to run at a lower rate
45554 +        * when we're not blowing fuses.  At the moment, the regulator framework
45555 +        * applies voltage constraints even on disabled rails, so remove our
45556 +        * constraints and allow the rail to be adjusted by other users.
45557 +        */
45558 +       ret = regulator_set_voltage(priv->vcc, 0, INT_MAX);
45559 +       if (ret)
45560 +               dev_warn(priv->dev, "Failed to set 0 voltage (ignoring)\n");
45562         ret = regulator_disable(priv->vcc);
45563         if (ret)
45564                 dev_warn(priv->dev, "Failed to disable regulator (ignoring)\n");
45565 @@ -172,6 +182,17 @@ static int qfprom_enable_fuse_blowing(const struct qfprom_priv *priv,
45566                 goto err_clk_prepared;
45567         }
45569 +       /*
45570 +        * Hardware requires 1.8V min for fuse blowing; this may be
45571 +        * a rail shared do don't specify a max--regulator constraints
45572 +        * will handle.
45573 +        */
45574 +       ret = regulator_set_voltage(priv->vcc, 1800000, INT_MAX);
45575 +       if (ret) {
45576 +               dev_err(priv->dev, "Failed to set 1.8 voltage\n");
45577 +               goto err_clk_rate_set;
45578 +       }
45580         ret = regulator_enable(priv->vcc);
45581         if (ret) {
45582                 dev_err(priv->dev, "Failed to enable regulator\n");
45583 diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
45584 index 23effe5e50ec..2d132949572d 100644
45585 --- a/drivers/of/overlay.c
45586 +++ b/drivers/of/overlay.c
45587 @@ -796,6 +796,7 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
45588                 if (!fragment->target) {
45589                         of_node_put(fragment->overlay);
45590                         ret = -EINVAL;
45591 +                       of_node_put(node);
45592                         goto err_free_fragments;
45593                 }
45595 diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c
45596 index 4547ac44c8d4..8fa1a7fdf12c 100644
45597 --- a/drivers/parport/ieee1284.c
45598 +++ b/drivers/parport/ieee1284.c
45599 @@ -202,7 +202,7 @@ int parport_wait_peripheral(struct parport *port,
45600                         /* parport_wait_event didn't time out, but the
45601                          * peripheral wasn't actually ready either.
45602                          * Wait for another 10ms. */
45603 -                       schedule_timeout_interruptible(msecs_to_jiffies(10));
45604 +                       schedule_msec_hrtimeout_interruptible((10));
45605                 }
45606         }
45608 diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c
45609 index 2c11bd3fe1fd..8cb6b61c0880 100644
45610 --- a/drivers/parport/ieee1284_ops.c
45611 +++ b/drivers/parport/ieee1284_ops.c
45612 @@ -520,7 +520,7 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port,
45613                         /* Yield the port for a while. */
45614                         if (count && dev->port->irq != PARPORT_IRQ_NONE) {
45615                                 parport_release (dev);
45616 -                               schedule_timeout_interruptible(msecs_to_jiffies(40));
45617 +                               schedule_msec_hrtimeout_interruptible((40));
45618                                 parport_claim_or_block (dev);
45619                         }
45620                         else
45621 diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
45622 index 53aa35cb3a49..a59ecbec601f 100644
45623 --- a/drivers/pci/controller/dwc/pci-keystone.c
45624 +++ b/drivers/pci/controller/dwc/pci-keystone.c
45625 @@ -798,7 +798,8 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
45626         int ret;
45628         pp->bridge->ops = &ks_pcie_ops;
45629 -       pp->bridge->child_ops = &ks_child_pcie_ops;
45630 +       if (!ks_pcie->is_am6)
45631 +               pp->bridge->child_ops = &ks_child_pcie_ops;
45633         ret = ks_pcie_config_legacy_irq(ks_pcie);
45634         if (ret)
45635 diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
45636 index 1c25d8337151..8d028a88b375 100644
45637 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c
45638 +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
45639 @@ -705,6 +705,8 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
45640                 }
45641         }
45643 +       dw_pcie_iatu_detect(pci);
45645         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
45646         if (!res)
45647                 return -EINVAL;
45648 diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
45649 index 7e55b2b66182..24192b40e3a2 100644
45650 --- a/drivers/pci/controller/dwc/pcie-designware-host.c
45651 +++ b/drivers/pci/controller/dwc/pcie-designware-host.c
45652 @@ -398,6 +398,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
45653                 if (ret)
45654                         goto err_free_msi;
45655         }
45656 +       dw_pcie_iatu_detect(pci);
45658         dw_pcie_setup_rc(pp);
45659         dw_pcie_msi_init(pp);
45660 diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
45661 index 004cb860e266..a945f0c0e73d 100644
45662 --- a/drivers/pci/controller/dwc/pcie-designware.c
45663 +++ b/drivers/pci/controller/dwc/pcie-designware.c
45664 @@ -660,11 +660,9 @@ static void dw_pcie_iatu_detect_regions(struct dw_pcie *pci)
45665         pci->num_ob_windows = ob;
45668 -void dw_pcie_setup(struct dw_pcie *pci)
45669 +void dw_pcie_iatu_detect(struct dw_pcie *pci)
45671 -       u32 val;
45672         struct device *dev = pci->dev;
45673 -       struct device_node *np = dev->of_node;
45674         struct platform_device *pdev = to_platform_device(dev);
45676         if (pci->version >= 0x480A || (!pci->version &&
45677 @@ -693,6 +691,13 @@ void dw_pcie_setup(struct dw_pcie *pci)
45679         dev_info(pci->dev, "Detected iATU regions: %u outbound, %u inbound",
45680                  pci->num_ob_windows, pci->num_ib_windows);
45683 +void dw_pcie_setup(struct dw_pcie *pci)
45685 +       u32 val;
45686 +       struct device *dev = pci->dev;
45687 +       struct device_node *np = dev->of_node;
45689         if (pci->link_gen > 0)
45690                 dw_pcie_link_set_max_speed(pci, pci->link_gen);
45691 diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
45692 index 7247c8b01f04..7d6e9b7576be 100644
45693 --- a/drivers/pci/controller/dwc/pcie-designware.h
45694 +++ b/drivers/pci/controller/dwc/pcie-designware.h
45695 @@ -306,6 +306,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
45696  void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
45697                          enum dw_pcie_region_type type);
45698  void dw_pcie_setup(struct dw_pcie *pci);
45699 +void dw_pcie_iatu_detect(struct dw_pcie *pci);
45701  static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
45703 diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
45704 index 6fa216e52d14..0e94190ca4e8 100644
45705 --- a/drivers/pci/controller/dwc/pcie-tegra194.c
45706 +++ b/drivers/pci/controller/dwc/pcie-tegra194.c
45707 @@ -1645,7 +1645,7 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
45708         if (pcie->ep_state == EP_STATE_ENABLED)
45709                 return;
45711 -       ret = pm_runtime_get_sync(dev);
45712 +       ret = pm_runtime_resume_and_get(dev);
45713         if (ret < 0) {
45714                 dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
45715                         ret);
45716 diff --git a/drivers/pci/controller/pci-thunder-ecam.c b/drivers/pci/controller/pci-thunder-ecam.c
45717 index f964fd26f7e0..ffd84656544f 100644
45718 --- a/drivers/pci/controller/pci-thunder-ecam.c
45719 +++ b/drivers/pci/controller/pci-thunder-ecam.c
45720 @@ -116,7 +116,7 @@ static int thunder_ecam_p2_config_read(struct pci_bus *bus, unsigned int devfn,
45721          * the config space access window.  Since we are working with
45722          * the high-order 32 bits, shift everything down by 32 bits.
45723          */
45724 -       node_bits = (cfg->res.start >> 32) & (1 << 12);
45725 +       node_bits = upper_32_bits(cfg->res.start) & (1 << 12);
45727         v |= node_bits;
45728         set_val(v, where, size, val);
45729 diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c
45730 index 1a3f70ac61fc..0660b9da204f 100644
45731 --- a/drivers/pci/controller/pci-thunder-pem.c
45732 +++ b/drivers/pci/controller/pci-thunder-pem.c
45733 @@ -12,6 +12,7 @@
45734  #include <linux/pci-acpi.h>
45735  #include <linux/pci-ecam.h>
45736  #include <linux/platform_device.h>
45737 +#include <linux/io-64-nonatomic-lo-hi.h>
45738  #include "../pci.h"
45740  #if defined(CONFIG_PCI_HOST_THUNDER_PEM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
45741 @@ -324,9 +325,9 @@ static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
45742          * structure here for the BAR.
45743          */
45744         bar4_start = res_pem->start + 0xf00000;
45745 -       pem_pci->ea_entry[0] = (u32)bar4_start | 2;
45746 -       pem_pci->ea_entry[1] = (u32)(res_pem->end - bar4_start) & ~3u;
45747 -       pem_pci->ea_entry[2] = (u32)(bar4_start >> 32);
45748 +       pem_pci->ea_entry[0] = lower_32_bits(bar4_start) | 2;
45749 +       pem_pci->ea_entry[1] = lower_32_bits(res_pem->end - bar4_start) & ~3u;
45750 +       pem_pci->ea_entry[2] = upper_32_bits(bar4_start);
45752         cfg->priv = pem_pci;
45753         return 0;
45754 @@ -334,9 +335,9 @@ static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
45756  #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
45758 -#define PEM_RES_BASE           0x87e0c0000000UL
45759 -#define PEM_NODE_MASK          GENMASK(45, 44)
45760 -#define PEM_INDX_MASK          GENMASK(26, 24)
45761 +#define PEM_RES_BASE           0x87e0c0000000ULL
45762 +#define PEM_NODE_MASK          GENMASK_ULL(45, 44)
45763 +#define PEM_INDX_MASK          GENMASK_ULL(26, 24)
45764  #define PEM_MIN_DOM_IN_NODE    4
45765  #define PEM_MAX_DOM_IN_NODE    10
45767 diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
45768 index 2afdc865253e..7f503dd4ff81 100644
45769 --- a/drivers/pci/controller/pci-xgene.c
45770 +++ b/drivers/pci/controller/pci-xgene.c
45771 @@ -354,7 +354,8 @@ static int xgene_pcie_map_reg(struct xgene_pcie_port *port,
45772         if (IS_ERR(port->csr_base))
45773                 return PTR_ERR(port->csr_base);
45775 -       port->cfg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
45776 +       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
45777 +       port->cfg_base = devm_ioremap_resource(dev, res);
45778         if (IS_ERR(port->cfg_base))
45779                 return PTR_ERR(port->cfg_base);
45780         port->cfg_addr = res->start;
45781 diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
45782 index e330e6811f0b..08bc788d9422 100644
45783 --- a/drivers/pci/controller/pcie-brcmstb.c
45784 +++ b/drivers/pci/controller/pcie-brcmstb.c
45785 @@ -1148,6 +1148,7 @@ static int brcm_pcie_suspend(struct device *dev)
45787         brcm_pcie_turn_off(pcie);
45788         ret = brcm_phy_stop(pcie);
45789 +       reset_control_rearm(pcie->rescal);
45790         clk_disable_unprepare(pcie->clk);
45792         return ret;
45793 @@ -1163,9 +1164,13 @@ static int brcm_pcie_resume(struct device *dev)
45794         base = pcie->base;
45795         clk_prepare_enable(pcie->clk);
45797 +       ret = reset_control_reset(pcie->rescal);
45798 +       if (ret)
45799 +               goto err_disable_clk;
45801         ret = brcm_phy_start(pcie);
45802         if (ret)
45803 -               goto err;
45804 +               goto err_reset;
45806         /* Take bridge out of reset so we can access the SERDES reg */
45807         pcie->bridge_sw_init_set(pcie, 0);
45808 @@ -1180,14 +1185,16 @@ static int brcm_pcie_resume(struct device *dev)
45810         ret = brcm_pcie_setup(pcie);
45811         if (ret)
45812 -               goto err;
45813 +               goto err_reset;
45815         if (pcie->msi)
45816                 brcm_msi_set_regs(pcie->msi);
45818         return 0;
45820 -err:
45821 +err_reset:
45822 +       reset_control_rearm(pcie->rescal);
45823 +err_disable_clk:
45824         clk_disable_unprepare(pcie->clk);
45825         return ret;
45827 @@ -1197,7 +1204,7 @@ static void __brcm_pcie_remove(struct brcm_pcie *pcie)
45828         brcm_msi_remove(pcie);
45829         brcm_pcie_turn_off(pcie);
45830         brcm_phy_stop(pcie);
45831 -       reset_control_assert(pcie->rescal);
45832 +       reset_control_rearm(pcie->rescal);
45833         clk_disable_unprepare(pcie->clk);
45836 @@ -1278,13 +1285,13 @@ static int brcm_pcie_probe(struct platform_device *pdev)
45837                 return PTR_ERR(pcie->perst_reset);
45838         }
45840 -       ret = reset_control_deassert(pcie->rescal);
45841 +       ret = reset_control_reset(pcie->rescal);
45842         if (ret)
45843                 dev_err(&pdev->dev, "failed to deassert 'rescal'\n");
45845         ret = brcm_phy_start(pcie);
45846         if (ret) {
45847 -               reset_control_assert(pcie->rescal);
45848 +               reset_control_rearm(pcie->rescal);
45849                 clk_disable_unprepare(pcie->clk);
45850                 return ret;
45851         }
45852 @@ -1296,6 +1303,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)
45853         pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION);
45854         if (pcie->type == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
45855                 dev_err(pcie->dev, "hardware revision with unsupported PERST# setup\n");
45856 +               ret = -ENODEV;
45857                 goto fail;
45858         }
45860 diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
45861 index 908475d27e0e..eede4e8f3f75 100644
45862 --- a/drivers/pci/controller/pcie-iproc-msi.c
45863 +++ b/drivers/pci/controller/pcie-iproc-msi.c
45864 @@ -271,7 +271,7 @@ static int iproc_msi_irq_domain_alloc(struct irq_domain *domain,
45865                                     NULL, NULL);
45866         }
45868 -       return hwirq;
45869 +       return 0;
45872  static void iproc_msi_irq_domain_free(struct irq_domain *domain,
45873 diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
45874 index c0ac4e9cbe72..f9760e73d568 100644
45875 --- a/drivers/pci/endpoint/functions/pci-epf-test.c
45876 +++ b/drivers/pci/endpoint/functions/pci-epf-test.c
45877 @@ -833,15 +833,18 @@ static int pci_epf_test_bind(struct pci_epf *epf)
45878                 return -EINVAL;
45880         epc_features = pci_epc_get_features(epc, epf->func_no);
45881 -       if (epc_features) {
45882 -               linkup_notifier = epc_features->linkup_notifier;
45883 -               core_init_notifier = epc_features->core_init_notifier;
45884 -               test_reg_bar = pci_epc_get_first_free_bar(epc_features);
45885 -               if (test_reg_bar < 0)
45886 -                       return -EINVAL;
45887 -               pci_epf_configure_bar(epf, epc_features);
45888 +       if (!epc_features) {
45889 +               dev_err(&epf->dev, "epc_features not implemented\n");
45890 +               return -EOPNOTSUPP;
45891         }
45893 +       linkup_notifier = epc_features->linkup_notifier;
45894 +       core_init_notifier = epc_features->core_init_notifier;
45895 +       test_reg_bar = pci_epc_get_first_free_bar(epc_features);
45896 +       if (test_reg_bar < 0)
45897 +               return -EINVAL;
45898 +       pci_epf_configure_bar(epf, epc_features);
45900         epf_test->test_reg_bar = test_reg_bar;
45901         epf_test->epc_features = epc_features;
45903 @@ -922,6 +925,7 @@ static int __init pci_epf_test_init(void)
45905         ret = pci_epf_register_driver(&test_driver);
45906         if (ret) {
45907 +               destroy_workqueue(kpcitest_workqueue);
45908                 pr_err("Failed to register pci epf test driver --> %d\n", ret);
45909                 return ret;
45910         }
45911 @@ -932,6 +936,8 @@ module_init(pci_epf_test_init);
45913  static void __exit pci_epf_test_exit(void)
45915 +       if (kpcitest_workqueue)
45916 +               destroy_workqueue(kpcitest_workqueue);
45917         pci_epf_unregister_driver(&test_driver);
45919  module_exit(pci_epf_test_exit);
45920 diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
45921 index 3365c93abf0e..f031302ad401 100644
45922 --- a/drivers/pci/hotplug/acpiphp_glue.c
45923 +++ b/drivers/pci/hotplug/acpiphp_glue.c
45924 @@ -533,6 +533,7 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
45925                         slot->flags &= ~SLOT_ENABLED;
45926                         continue;
45927                 }
45928 +               pci_dev_put(dev);
45929         }
45932 diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
45933 index 16a17215f633..e4d4e399004b 100644
45934 --- a/drivers/pci/pci.c
45935 +++ b/drivers/pci/pci.c
45936 @@ -1870,20 +1870,10 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
45937         int err;
45938         int i, bars = 0;
45940 -       /*
45941 -        * Power state could be unknown at this point, either due to a fresh
45942 -        * boot or a device removal call.  So get the current power state
45943 -        * so that things like MSI message writing will behave as expected
45944 -        * (e.g. if the device really is in D0 at enable time).
45945 -        */
45946 -       if (dev->pm_cap) {
45947 -               u16 pmcsr;
45948 -               pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
45949 -               dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
45950 -       }
45952 -       if (atomic_inc_return(&dev->enable_cnt) > 1)
45953 +       if (atomic_inc_return(&dev->enable_cnt) > 1) {
45954 +               pci_update_current_state(dev, dev->current_state);
45955                 return 0;               /* already enabled */
45956 +       }
45958         bridge = pci_upstream_bridge(dev);
45959         if (bridge)
45960 diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
45961 index ef7c4661314f..9684b468267f 100644
45962 --- a/drivers/pci/pci.h
45963 +++ b/drivers/pci/pci.h
45964 @@ -624,6 +624,12 @@ static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe)
45965  #if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
45966  int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
45967                           struct resource *res);
45968 +#else
45969 +static inline int acpi_get_rc_resources(struct device *dev, const char *hid,
45970 +                                       u16 segment, struct resource *res)
45972 +       return -ENODEV;
45974  #endif
45976  int pci_rebar_get_current_size(struct pci_dev *pdev, int bar);
45977 diff --git a/drivers/pci/pcie/rcec.c b/drivers/pci/pcie/rcec.c
45978 index 2c5c552994e4..d0bcd141ac9c 100644
45979 --- a/drivers/pci/pcie/rcec.c
45980 +++ b/drivers/pci/pcie/rcec.c
45981 @@ -32,7 +32,7 @@ static bool rcec_assoc_rciep(struct pci_dev *rcec, struct pci_dev *rciep)
45983         /* Same bus, so check bitmap */
45984         for_each_set_bit(devn, &bitmap, 32)
45985 -               if (devn == rciep->devfn)
45986 +               if (devn == PCI_SLOT(rciep->devfn))
45987                         return true;
45989         return false;
45990 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
45991 index 953f15abc850..be51670572fa 100644
45992 --- a/drivers/pci/probe.c
45993 +++ b/drivers/pci/probe.c
45994 @@ -2353,6 +2353,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
45995         pci_set_of_node(dev);
45997         if (pci_setup_device(dev)) {
45998 +               pci_release_of_node(dev);
45999                 pci_bus_put(dev->bus);
46000                 kfree(dev);
46001                 return NULL;
46002 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
46003 index 653660e3ba9e..c87fd7a275e4 100644
46004 --- a/drivers/pci/quirks.c
46005 +++ b/drivers/pci/quirks.c
46006 @@ -3558,6 +3558,106 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
46007         dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
46010 +static bool acs_on_downstream;
46011 +static bool acs_on_multifunction;
46013 +#define NUM_ACS_IDS 16
46014 +struct acs_on_id {
46015 +       unsigned short vendor;
46016 +       unsigned short device;
46018 +static struct acs_on_id acs_on_ids[NUM_ACS_IDS];
46019 +static u8 max_acs_id;
46021 +static __init int pcie_acs_override_setup(char *p)
46023 +       if (!p)
46024 +               return -EINVAL;
46026 +       while (*p) {
46027 +               if (!strncmp(p, "downstream", 10))
46028 +                       acs_on_downstream = true;
46029 +               if (!strncmp(p, "multifunction", 13))
46030 +                       acs_on_multifunction = true;
46031 +               if (!strncmp(p, "id:", 3)) {
46032 +                       char opt[5];
46033 +                       int ret;
46034 +                       long val;
46036 +                       if (max_acs_id >= NUM_ACS_IDS - 1) {
46037 +                               pr_warn("Out of PCIe ACS override slots (%d)\n",
46038 +                                               NUM_ACS_IDS);
46039 +                               goto next;
46040 +                       }
46042 +                       p += 3;
46043 +                       snprintf(opt, 5, "%s", p);
46044 +                       ret = kstrtol(opt, 16, &val);
46045 +                       if (ret) {
46046 +                               pr_warn("PCIe ACS ID parse error %d\n", ret);
46047 +                               goto next;
46048 +                       }
46049 +                       acs_on_ids[max_acs_id].vendor = val;
46051 +                       p += strcspn(p, ":");
46052 +                       if (*p != ':') {
46053 +                               pr_warn("PCIe ACS invalid ID\n");
46054 +                               goto next;
46055 +                       }
46057 +                       p++;
46058 +                       snprintf(opt, 5, "%s", p);
46059 +                       ret = kstrtol(opt, 16, &val);
46060 +                       if (ret) {
46061 +                               pr_warn("PCIe ACS ID parse error %d\n", ret);
46062 +                               goto next;
46063 +                       }
46064 +                       acs_on_ids[max_acs_id].device = val;
46065 +                       max_acs_id++;
46066 +               }
46067 +next:
46068 +               p += strcspn(p, ",");
46069 +               if (*p == ',')
46070 +                       p++;
46071 +       }
46073 +       if (acs_on_downstream || acs_on_multifunction || max_acs_id)
46074 +               pr_warn("Warning: PCIe ACS overrides enabled; This may allow non-IOMMU protected peer-to-peer DMA\n");
46076 +       return 0;
46078 +early_param("pcie_acs_override", pcie_acs_override_setup);
46080 +static int pcie_acs_overrides(struct pci_dev *dev, u16 acs_flags)
46082 +       int i;
46084 +       /* Never override ACS for legacy devices or devices with ACS caps */
46085 +       if (!pci_is_pcie(dev) ||
46086 +               pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS))
46087 +                       return -ENOTTY;
46089 +       for (i = 0; i < max_acs_id; i++)
46090 +               if (acs_on_ids[i].vendor == dev->vendor &&
46091 +                       acs_on_ids[i].device == dev->device)
46092 +                               return 1;
46094 +       switch (pci_pcie_type(dev)) {
46095 +       case PCI_EXP_TYPE_DOWNSTREAM:
46096 +       case PCI_EXP_TYPE_ROOT_PORT:
46097 +               if (acs_on_downstream)
46098 +                       return 1;
46099 +               break;
46100 +       case PCI_EXP_TYPE_ENDPOINT:
46101 +       case PCI_EXP_TYPE_UPSTREAM:
46102 +       case PCI_EXP_TYPE_LEG_END:
46103 +       case PCI_EXP_TYPE_RC_END:
46104 +               if (acs_on_multifunction && dev->multifunction)
46105 +                       return 1;
46106 +       }
46108 +       return -ENOTTY;
46110  /*
46111   * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset.
46112   * The device will throw a Link Down error on AER-capable systems and
46113 @@ -4773,6 +4873,7 @@ static const struct pci_dev_acs_enabled {
46114         { PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs },
46115         /* Zhaoxin Root/Downstream Ports */
46116         { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
46117 +       { PCI_ANY_ID, PCI_ANY_ID, pcie_acs_overrides },
46118         { 0 }
46119  };
46121 diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c
46122 index 7915d10f9aa1..bd549070c011 100644
46123 --- a/drivers/pci/vpd.c
46124 +++ b/drivers/pci/vpd.c
46125 @@ -570,7 +570,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
46126  DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
46127  DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
46128                 quirk_blacklist_vpd);
46129 -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd);
46130  /*
46131   * The Amazon Annapurna Labs 0x0031 device id is reused for other non Root Port
46132   * device types, so the quirk is registered for the PCI_CLASS_BRIDGE_PCI class.
46133 diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
46134 index 933bd8410fc2..ef9676418c9f 100644
46135 --- a/drivers/perf/arm_pmu_platform.c
46136 +++ b/drivers/perf/arm_pmu_platform.c
46137 @@ -6,6 +6,7 @@
46138   * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
46139   */
46140  #define pr_fmt(fmt) "hw perfevents: " fmt
46141 +#define dev_fmt pr_fmt
46143  #include <linux/bug.h>
46144  #include <linux/cpumask.h>
46145 @@ -100,10 +101,8 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
46146         struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
46148         num_irqs = platform_irq_count(pdev);
46149 -       if (num_irqs < 0) {
46150 -               pr_err("unable to count PMU IRQs\n");
46151 -               return num_irqs;
46152 -       }
46153 +       if (num_irqs < 0)
46154 +               return dev_err_probe(&pdev->dev, num_irqs, "unable to count PMU IRQs\n");
46156         /*
46157          * In this case we have no idea which CPUs are covered by the PMU.
46158 @@ -236,7 +235,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
46160         ret = armpmu_register(pmu);
46161         if (ret)
46162 -               goto out_free;
46163 +               goto out_free_irqs;
46165         return 0;
46167 diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
46168 index 26a0badabe38..19f32ae877b9 100644
46169 --- a/drivers/phy/cadence/phy-cadence-sierra.c
46170 +++ b/drivers/phy/cadence/phy-cadence-sierra.c
46171 @@ -319,6 +319,12 @@ static int cdns_sierra_phy_on(struct phy *gphy)
46172         u32 val;
46173         int ret;
46175 +       ret = reset_control_deassert(sp->phy_rst);
46176 +       if (ret) {
46177 +               dev_err(dev, "Failed to take the PHY out of reset\n");
46178 +               return ret;
46179 +       }
46181         /* Take the PHY lane group out of reset */
46182         ret = reset_control_deassert(ins->lnk_rst);
46183         if (ret) {
46184 @@ -616,7 +622,6 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
46186         pm_runtime_enable(dev);
46187         phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
46188 -       reset_control_deassert(sp->phy_rst);
46189         return PTR_ERR_OR_ZERO(phy_provider);
46191  put_child:
46192 diff --git a/drivers/phy/ingenic/phy-ingenic-usb.c b/drivers/phy/ingenic/phy-ingenic-usb.c
46193 index ea127b177f46..28c28d816484 100644
46194 --- a/drivers/phy/ingenic/phy-ingenic-usb.c
46195 +++ b/drivers/phy/ingenic/phy-ingenic-usb.c
46196 @@ -352,8 +352,8 @@ static int ingenic_usb_phy_probe(struct platform_device *pdev)
46197         }
46199         priv->phy = devm_phy_create(dev, NULL, &ingenic_usb_phy_ops);
46200 -       if (IS_ERR(priv))
46201 -               return PTR_ERR(priv);
46202 +       if (IS_ERR(priv->phy))
46203 +               return PTR_ERR(priv->phy);
46205         phy_set_drvdata(priv->phy, priv);
46207 diff --git a/drivers/phy/marvell/Kconfig b/drivers/phy/marvell/Kconfig
46208 index 6c96f2bf5266..c8ee23fc3a83 100644
46209 --- a/drivers/phy/marvell/Kconfig
46210 +++ b/drivers/phy/marvell/Kconfig
46211 @@ -3,8 +3,8 @@
46212  # Phy drivers for Marvell platforms
46214  config ARMADA375_USBCLUSTER_PHY
46215 -       def_bool y
46216 -       depends on MACH_ARMADA_375 || COMPILE_TEST
46217 +       bool "Armada 375 USB cluster PHY support" if COMPILE_TEST
46218 +       default y if MACH_ARMADA_375
46219         depends on OF && HAS_IOMEM
46220         select GENERIC_PHY
46222 diff --git a/drivers/phy/ralink/phy-mt7621-pci.c b/drivers/phy/ralink/phy-mt7621-pci.c
46223 index 9a610b414b1f..753cb5bab930 100644
46224 --- a/drivers/phy/ralink/phy-mt7621-pci.c
46225 +++ b/drivers/phy/ralink/phy-mt7621-pci.c
46226 @@ -62,7 +62,7 @@
46228  #define RG_PE1_FRC_MSTCKDIV                    BIT(5)
46230 -#define XTAL_MASK                              GENMASK(7, 6)
46231 +#define XTAL_MASK                              GENMASK(8, 6)
46233  #define MAX_PHYS       2
46235 @@ -319,9 +319,9 @@ static int mt7621_pci_phy_probe(struct platform_device *pdev)
46236                 return PTR_ERR(phy->regmap);
46238         phy->phy = devm_phy_create(dev, dev->of_node, &mt7621_pci_phy_ops);
46239 -       if (IS_ERR(phy)) {
46240 +       if (IS_ERR(phy->phy)) {
46241                 dev_err(dev, "failed to create phy\n");
46242 -               return PTR_ERR(phy);
46243 +               return PTR_ERR(phy->phy);
46244         }
46246         phy_set_drvdata(phy->phy, phy);
46247 diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
46248 index c9cfafe89cbf..e28e25f98708 100644
46249 --- a/drivers/phy/ti/phy-j721e-wiz.c
46250 +++ b/drivers/phy/ti/phy-j721e-wiz.c
46251 @@ -615,6 +615,12 @@ static void wiz_clock_cleanup(struct wiz *wiz, struct device_node *node)
46252                 of_clk_del_provider(clk_node);
46253                 of_node_put(clk_node);
46254         }
46256 +       for (i = 0; i < wiz->clk_div_sel_num; i++) {
46257 +               clk_node = of_get_child_by_name(node, clk_div_sel[i].node_name);
46258 +               of_clk_del_provider(clk_node);
46259 +               of_node_put(clk_node);
46260 +       }
46263  static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
46264 @@ -947,27 +953,24 @@ static int wiz_probe(struct platform_device *pdev)
46265                 goto err_get_sync;
46266         }
46268 +       ret = wiz_init(wiz);
46269 +       if (ret) {
46270 +               dev_err(dev, "WIZ initialization failed\n");
46271 +               goto err_wiz_init;
46272 +       }
46274         serdes_pdev = of_platform_device_create(child_node, NULL, dev);
46275         if (!serdes_pdev) {
46276                 dev_WARN(dev, "Unable to create SERDES platform device\n");
46277                 ret = -ENOMEM;
46278 -               goto err_pdev_create;
46279 -       }
46280 -       wiz->serdes_pdev = serdes_pdev;
46282 -       ret = wiz_init(wiz);
46283 -       if (ret) {
46284 -               dev_err(dev, "WIZ initialization failed\n");
46285                 goto err_wiz_init;
46286         }
46287 +       wiz->serdes_pdev = serdes_pdev;
46289         of_node_put(child_node);
46290         return 0;
46292  err_wiz_init:
46293 -       of_platform_device_destroy(&serdes_pdev->dev, NULL);
46295 -err_pdev_create:
46296         wiz_clock_cleanup(wiz, node);
46298  err_get_sync:
46299 diff --git a/drivers/phy/ti/phy-twl4030-usb.c b/drivers/phy/ti/phy-twl4030-usb.c
46300 index 9887f908f540..812e5409d359 100644
46301 --- a/drivers/phy/ti/phy-twl4030-usb.c
46302 +++ b/drivers/phy/ti/phy-twl4030-usb.c
46303 @@ -779,7 +779,7 @@ static int twl4030_usb_remove(struct platform_device *pdev)
46305         usb_remove_phy(&twl->phy);
46306         pm_runtime_get_sync(twl->dev);
46307 -       cancel_delayed_work(&twl->id_workaround_work);
46308 +       cancel_delayed_work_sync(&twl->id_workaround_work);
46309         device_remove_file(twl->dev, &dev_attr_vbus);
46311         /* set transceiver mode to power on defaults */
46312 diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
46313 index e71ebccc479c..03c32b2c5d30 100644
46314 --- a/drivers/pinctrl/pinctrl-at91-pio4.c
46315 +++ b/drivers/pinctrl/pinctrl-at91-pio4.c
46316 @@ -801,6 +801,10 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
46318         conf = atmel_pin_config_read(pctldev, pin_id);
46320 +       /* Keep slew rate enabled by default. */
46321 +       if (atmel_pioctrl->slew_rate_support)
46322 +               conf |= ATMEL_PIO_SR_MASK;
46324         for (i = 0; i < num_configs; i++) {
46325                 unsigned int param = pinconf_to_config_param(configs[i]);
46326                 unsigned int arg = pinconf_to_config_argument(configs[i]);
46327 @@ -808,10 +812,6 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
46328                 dev_dbg(pctldev->dev, "%s: pin=%u, config=0x%lx\n",
46329                         __func__, pin_id, configs[i]);
46331 -               /* Keep slew rate enabled by default. */
46332 -               if (atmel_pioctrl->slew_rate_support)
46333 -                       conf |= ATMEL_PIO_SR_MASK;
46335                 switch (param) {
46336                 case PIN_CONFIG_BIAS_DISABLE:
46337                         conf &= (~ATMEL_PIO_PUEN_MASK);
46338 diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
46339 index f2746125b077..3de0f767b7d1 100644
46340 --- a/drivers/pinctrl/pinctrl-ingenic.c
46341 +++ b/drivers/pinctrl/pinctrl-ingenic.c
46342 @@ -667,7 +667,9 @@ static int jz4770_pwm_pwm7_pins[] = { 0x6b, };
46343  static int jz4770_mac_rmii_pins[] = {
46344         0xa9, 0xab, 0xaa, 0xac, 0xa5, 0xa4, 0xad, 0xae, 0xa6, 0xa8,
46345  };
46346 -static int jz4770_mac_mii_pins[] = { 0xa7, 0xaf, };
46347 +static int jz4770_mac_mii_pins[] = {
46348 +       0x7b, 0x7a, 0x7d, 0x7c, 0xa7, 0x24, 0xaf,
46351  static const struct group_desc jz4770_groups[] = {
46352         INGENIC_PIN_GROUP("uart0-data", jz4770_uart0_data, 0),
46353 @@ -2107,26 +2109,48 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
46354         enum pin_config_param param = pinconf_to_config_param(*config);
46355         unsigned int idx = pin % PINS_PER_GPIO_CHIP;
46356         unsigned int offt = pin / PINS_PER_GPIO_CHIP;
46357 -       bool pull;
46358 +       unsigned int bias;
46359 +       bool pull, pullup, pulldown;
46361 -       if (jzpc->info->version >= ID_JZ4770)
46362 -               pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN);
46363 -       else
46364 -               pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
46365 +       if (jzpc->info->version >= ID_X1830) {
46366 +               unsigned int half = PINS_PER_GPIO_CHIP / 2;
46367 +               unsigned int idxh = (pin % half) * 2;
46369 +               if (idx < half)
46370 +                       regmap_read(jzpc->map, offt * jzpc->info->reg_offset +
46371 +                                       X1830_GPIO_PEL, &bias);
46372 +               else
46373 +                       regmap_read(jzpc->map, offt * jzpc->info->reg_offset +
46374 +                                       X1830_GPIO_PEH, &bias);
46376 +               bias = (bias >> idxh) & (GPIO_PULL_UP | GPIO_PULL_DOWN);
46378 +               pullup = (bias == GPIO_PULL_UP) && (jzpc->info->pull_ups[offt] & BIT(idx));
46379 +               pulldown = (bias == GPIO_PULL_DOWN) && (jzpc->info->pull_downs[offt] & BIT(idx));
46381 +       } else {
46382 +               if (jzpc->info->version >= ID_JZ4770)
46383 +                       pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN);
46384 +               else
46385 +                       pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
46387 +               pullup = pull && (jzpc->info->pull_ups[offt] & BIT(idx));
46388 +               pulldown = pull && (jzpc->info->pull_downs[offt] & BIT(idx));
46389 +       }
46391         switch (param) {
46392         case PIN_CONFIG_BIAS_DISABLE:
46393 -               if (pull)
46394 +               if (pullup || pulldown)
46395                         return -EINVAL;
46396                 break;
46398         case PIN_CONFIG_BIAS_PULL_UP:
46399 -               if (!pull || !(jzpc->info->pull_ups[offt] & BIT(idx)))
46400 +               if (!pullup)
46401                         return -EINVAL;
46402                 break;
46404         case PIN_CONFIG_BIAS_PULL_DOWN:
46405 -               if (!pull || !(jzpc->info->pull_downs[offt] & BIT(idx)))
46406 +               if (!pulldown)
46407                         return -EINVAL;
46408                 break;
46410 @@ -2144,7 +2168,7 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
46411         if (jzpc->info->version >= ID_X1830) {
46412                 unsigned int idx = pin % PINS_PER_GPIO_CHIP;
46413                 unsigned int half = PINS_PER_GPIO_CHIP / 2;
46414 -               unsigned int idxh = pin % half * 2;
46415 +               unsigned int idxh = (pin % half) * 2;
46416                 unsigned int offt = pin / PINS_PER_GPIO_CHIP;
46418                 if (idx < half) {
46419 diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
46420 index 7771316dfffa..10890fde9a75 100644
46421 --- a/drivers/pinctrl/pinctrl-single.c
46422 +++ b/drivers/pinctrl/pinctrl-single.c
46423 @@ -270,20 +270,44 @@ static void __maybe_unused pcs_writel(unsigned val, void __iomem *reg)
46424         writel(val, reg);
46427 +static unsigned int pcs_pin_reg_offset_get(struct pcs_device *pcs,
46428 +                                          unsigned int pin)
46430 +       unsigned int mux_bytes = pcs->width / BITS_PER_BYTE;
46432 +       if (pcs->bits_per_mux) {
46433 +               unsigned int pin_offset_bytes;
46435 +               pin_offset_bytes = (pcs->bits_per_pin * pin) / BITS_PER_BYTE;
46436 +               return (pin_offset_bytes / mux_bytes) * mux_bytes;
46437 +       }
46439 +       return pin * mux_bytes;
46442 +static unsigned int pcs_pin_shift_reg_get(struct pcs_device *pcs,
46443 +                                         unsigned int pin)
46445 +       return (pin % (pcs->width / pcs->bits_per_pin)) * pcs->bits_per_pin;
46448  static void pcs_pin_dbg_show(struct pinctrl_dev *pctldev,
46449                                         struct seq_file *s,
46450                                         unsigned pin)
46452         struct pcs_device *pcs;
46453 -       unsigned val, mux_bytes;
46454 +       unsigned int val;
46455         unsigned long offset;
46456         size_t pa;
46458         pcs = pinctrl_dev_get_drvdata(pctldev);
46460 -       mux_bytes = pcs->width / BITS_PER_BYTE;
46461 -       offset = pin * mux_bytes;
46462 +       offset = pcs_pin_reg_offset_get(pcs, pin);
46463         val = pcs->read(pcs->base + offset);
46465 +       if (pcs->bits_per_mux)
46466 +               val &= pcs->fmask << pcs_pin_shift_reg_get(pcs, pin);
46468         pa = pcs->res->start + offset;
46470         seq_printf(s, "%zx %08x %s ", pa, val, DRIVER_NAME);
46471 @@ -384,7 +408,6 @@ static int pcs_request_gpio(struct pinctrl_dev *pctldev,
46472         struct pcs_device *pcs = pinctrl_dev_get_drvdata(pctldev);
46473         struct pcs_gpiofunc_range *frange = NULL;
46474         struct list_head *pos, *tmp;
46475 -       int mux_bytes = 0;
46476         unsigned data;
46478         /* If function mask is null, return directly. */
46479 @@ -392,29 +415,27 @@ static int pcs_request_gpio(struct pinctrl_dev *pctldev,
46480                 return -ENOTSUPP;
46482         list_for_each_safe(pos, tmp, &pcs->gpiofuncs) {
46483 +               u32 offset;
46485                 frange = list_entry(pos, struct pcs_gpiofunc_range, node);
46486                 if (pin >= frange->offset + frange->npins
46487                         || pin < frange->offset)
46488                         continue;
46489 -               mux_bytes = pcs->width / BITS_PER_BYTE;
46491 -               if (pcs->bits_per_mux) {
46492 -                       int byte_num, offset, pin_shift;
46493 +               offset = pcs_pin_reg_offset_get(pcs, pin);
46495 -                       byte_num = (pcs->bits_per_pin * pin) / BITS_PER_BYTE;
46496 -                       offset = (byte_num / mux_bytes) * mux_bytes;
46497 -                       pin_shift = pin % (pcs->width / pcs->bits_per_pin) *
46498 -                                   pcs->bits_per_pin;
46499 +               if (pcs->bits_per_mux) {
46500 +                       int pin_shift = pcs_pin_shift_reg_get(pcs, pin);
46502                         data = pcs->read(pcs->base + offset);
46503                         data &= ~(pcs->fmask << pin_shift);
46504                         data |= frange->gpiofunc << pin_shift;
46505                         pcs->write(data, pcs->base + offset);
46506                 } else {
46507 -                       data = pcs->read(pcs->base + pin * mux_bytes);
46508 +                       data = pcs->read(pcs->base + offset);
46509                         data &= ~pcs->fmask;
46510                         data |= frange->gpiofunc;
46511 -                       pcs->write(data, pcs->base + pin * mux_bytes);
46512 +                       pcs->write(data, pcs->base + offset);
46513                 }
46514                 break;
46515         }
46516 @@ -656,10 +677,8 @@ static const struct pinconf_ops pcs_pinconf_ops = {
46517   * pcs_add_pin() - add a pin to the static per controller pin array
46518   * @pcs: pcs driver instance
46519   * @offset: register offset from base
46520 - * @pin_pos: unused
46521   */
46522 -static int pcs_add_pin(struct pcs_device *pcs, unsigned offset,
46523 -               unsigned pin_pos)
46524 +static int pcs_add_pin(struct pcs_device *pcs, unsigned int offset)
46526         struct pcs_soc_data *pcs_soc = &pcs->socdata;
46527         struct pinctrl_pin_desc *pin;
46528 @@ -728,17 +747,9 @@ static int pcs_allocate_pin_table(struct pcs_device *pcs)
46529         for (i = 0; i < pcs->desc.npins; i++) {
46530                 unsigned offset;
46531                 int res;
46532 -               int byte_num;
46533 -               int pin_pos = 0;
46535 -               if (pcs->bits_per_mux) {
46536 -                       byte_num = (pcs->bits_per_pin * i) / BITS_PER_BYTE;
46537 -                       offset = (byte_num / mux_bytes) * mux_bytes;
46538 -                       pin_pos = i % num_pins_in_register;
46539 -               } else {
46540 -                       offset = i * mux_bytes;
46541 -               }
46542 -               res = pcs_add_pin(pcs, offset, pin_pos);
46543 +               offset = pcs_pin_reg_offset_get(pcs, i);
46544 +               res = pcs_add_pin(pcs, offset);
46545                 if (res < 0) {
46546                         dev_err(pcs->dev, "error adding pins: %i\n", res);
46547                         return res;
46548 diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
46549 index 0cd7f33cdf25..2b99f4130e1e 100644
46550 --- a/drivers/pinctrl/samsung/pinctrl-exynos.c
46551 +++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
46552 @@ -55,7 +55,7 @@ static void exynos_irq_mask(struct irq_data *irqd)
46553         struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
46554         struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
46555         unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset;
46556 -       unsigned long mask;
46557 +       unsigned int mask;
46558         unsigned long flags;
46560         raw_spin_lock_irqsave(&bank->slock, flags);
46561 @@ -83,7 +83,7 @@ static void exynos_irq_unmask(struct irq_data *irqd)
46562         struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
46563         struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
46564         unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset;
46565 -       unsigned long mask;
46566 +       unsigned int mask;
46567         unsigned long flags;
46569         /*
46570 @@ -483,7 +483,7 @@ static void exynos_irq_eint0_15(struct irq_desc *desc)
46571         chained_irq_exit(chip, desc);
46574 -static inline void exynos_irq_demux_eint(unsigned long pend,
46575 +static inline void exynos_irq_demux_eint(unsigned int pend,
46576                                                 struct irq_domain *domain)
46578         unsigned int irq;
46579 @@ -500,8 +500,8 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
46581         struct irq_chip *chip = irq_desc_get_chip(desc);
46582         struct exynos_muxed_weint_data *eintd = irq_desc_get_handler_data(desc);
46583 -       unsigned long pend;
46584 -       unsigned long mask;
46585 +       unsigned int pend;
46586 +       unsigned int mask;
46587         int i;
46589         chained_irq_enter(chip, desc);
46590 diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
46591 index 0811562deecc..24be8f550ae0 100644
46592 --- a/drivers/platform/chrome/cros_ec_typec.c
46593 +++ b/drivers/platform/chrome/cros_ec_typec.c
46594 @@ -483,6 +483,11 @@ static int cros_typec_enable_dp(struct cros_typec_data *typec,
46595                 return -ENOTSUPP;
46596         }
46598 +       if (!pd_ctrl->dp_mode) {
46599 +               dev_err(typec->dev, "No valid DP mode provided.\n");
46600 +               return -EINVAL;
46601 +       }
46603         /* Status VDO. */
46604         dp_data.status = DP_STATUS_ENABLED;
46605         if (port->mux_flags & USB_PD_MUX_HPD_IRQ)
46606 diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c
46607 index 5bcb59ed579d..89761d3e1a47 100644
46608 --- a/drivers/platform/surface/aggregator/controller.c
46609 +++ b/drivers/platform/surface/aggregator/controller.c
46610 @@ -1040,7 +1040,7 @@ static int ssam_dsm_load_u32(acpi_handle handle, u64 funcs, u64 func, u32 *ret)
46611         union acpi_object *obj;
46612         u64 val;
46614 -       if (!(funcs & BIT(func)))
46615 +       if (!(funcs & BIT_ULL(func)))
46616                 return 0; /* Not supported, leave *ret at its default value */
46618         obj = acpi_evaluate_dsm_typed(handle, &SSAM_SSH_DSM_GUID,
46619 diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
46620 index 7410ccae650c..a90ae6ba4a73 100644
46621 --- a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
46622 +++ b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
46623 @@ -399,6 +399,7 @@ static int init_bios_attributes(int attr_type, const char *guid)
46624         union acpi_object *obj = NULL;
46625         union acpi_object *elements;
46626         struct kset *tmp_set;
46627 +       int min_elements;
46629         /* instance_id needs to be reset for each type GUID
46630          * also, instance IDs are unique within GUID but not across
46631 @@ -409,14 +410,38 @@ static int init_bios_attributes(int attr_type, const char *guid)
46632         retval = alloc_attributes_data(attr_type);
46633         if (retval)
46634                 return retval;
46636 +       switch (attr_type) {
46637 +       case ENUM:      min_elements = 8;       break;
46638 +       case INT:       min_elements = 9;       break;
46639 +       case STR:       min_elements = 8;       break;
46640 +       case PO:        min_elements = 4;       break;
46641 +       default:
46642 +               pr_err("Error: Unknown attr_type: %d\n", attr_type);
46643 +               return -EINVAL;
46644 +       }
46646         /* need to use specific instance_id and guid combination to get right data */
46647         obj = get_wmiobj_pointer(instance_id, guid);
46648 -       if (!obj || obj->type != ACPI_TYPE_PACKAGE)
46649 +       if (!obj)
46650                 return -ENODEV;
46651 -       elements = obj->package.elements;
46653         mutex_lock(&wmi_priv.mutex);
46654 -       while (elements) {
46655 +       while (obj) {
46656 +               if (obj->type != ACPI_TYPE_PACKAGE) {
46657 +                       pr_err("Error: Expected ACPI-package type, got: %d\n", obj->type);
46658 +                       retval = -EIO;
46659 +                       goto err_attr_init;
46660 +               }
46662 +               if (obj->package.count < min_elements) {
46663 +                       pr_err("Error: ACPI-package does not have enough elements: %d < %d\n",
46664 +                              obj->package.count, min_elements);
46665 +                       goto nextobj;
46666 +               }
46668 +               elements = obj->package.elements;
46670                 /* sanity checking */
46671                 if (elements[ATTR_NAME].type != ACPI_TYPE_STRING) {
46672                         pr_debug("incorrect element type\n");
46673 @@ -481,7 +506,6 @@ static int init_bios_attributes(int attr_type, const char *guid)
46674                 kfree(obj);
46675                 instance_id++;
46676                 obj = get_wmiobj_pointer(instance_id, guid);
46677 -               elements = obj ? obj->package.elements : NULL;
46678         }
46680         mutex_unlock(&wmi_priv.mutex);
46681 diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
46682 index bffe548187ee..c2918ee3e100 100644
46683 --- a/drivers/platform/x86/intel_ips.c
46684 +++ b/drivers/platform/x86/intel_ips.c
46685 @@ -798,7 +798,7 @@ static int ips_adjust(void *data)
46686                         ips_gpu_lower(ips);
46688  sleep:
46689 -               schedule_timeout_interruptible(msecs_to_jiffies(IPS_ADJUST_PERIOD));
46690 +               schedule_msec_hrtimeout_interruptible((IPS_ADJUST_PERIOD));
46691         } while (!kthread_should_stop());
46693         dev_dbg(ips->dev, "ips-adjust thread stopped\n");
46694 @@ -974,7 +974,7 @@ static int ips_monitor(void *data)
46695         seqno_timestamp = get_jiffies_64();
46697         old_cpu_power = thm_readl(THM_CEC);
46698 -       schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
46699 +       schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
46701         /* Collect an initial average */
46702         for (i = 0; i < IPS_SAMPLE_COUNT; i++) {
46703 @@ -1001,7 +1001,7 @@ static int ips_monitor(void *data)
46704                         mchp_samples[i] = mchp;
46705                 }
46707 -               schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
46708 +               schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
46709                 if (kthread_should_stop())
46710                         break;
46711         }
46712 @@ -1028,7 +1028,7 @@ static int ips_monitor(void *data)
46713          * us to reduce the sample frequency if the CPU and GPU are idle.
46714          */
46715         old_cpu_power = thm_readl(THM_CEC);
46716 -       schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
46717 +       schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
46718         last_sample_period = IPS_SAMPLE_PERIOD;
46720         timer_setup(&ips->timer, monitor_timeout, TIMER_DEFERRABLE);
46721 diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
46722 index b5888aeb4bcf..260d49dca1ad 100644
46723 --- a/drivers/platform/x86/intel_pmc_core.c
46724 +++ b/drivers/platform/x86/intel_pmc_core.c
46725 @@ -1186,9 +1186,15 @@ static const struct pci_device_id pmc_pci_ids[] = {
46726   * the platform BIOS enforces 24Mhz crystal to shutdown
46727   * before PMC can assert SLP_S0#.
46728   */
46729 +static bool xtal_ignore;
46730  static int quirk_xtal_ignore(const struct dmi_system_id *id)
46732 -       struct pmc_dev *pmcdev = &pmc;
46733 +       xtal_ignore = true;
46734 +       return 0;
46737 +static void pmc_core_xtal_ignore(struct pmc_dev *pmcdev)
46739         u32 value;
46741         value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_vric1_offset);
46742 @@ -1197,7 +1203,6 @@ static int quirk_xtal_ignore(const struct dmi_system_id *id)
46743         /* Low Voltage Mode Enable */
46744         value &= ~SPT_PMC_VRIC1_SLPS0LVEN;
46745         pmc_core_reg_write(pmcdev, pmcdev->map->pm_vric1_offset, value);
46746 -       return 0;
46749  static const struct dmi_system_id pmc_core_dmi_table[]  = {
46750 @@ -1212,6 +1217,14 @@ static const struct dmi_system_id pmc_core_dmi_table[]  = {
46751         {}
46752  };
46754 +static void pmc_core_do_dmi_quirks(struct pmc_dev *pmcdev)
46756 +       dmi_check_system(pmc_core_dmi_table);
46758 +       if (xtal_ignore)
46759 +               pmc_core_xtal_ignore(pmcdev);
46762  static int pmc_core_probe(struct platform_device *pdev)
46764         static bool device_initialized;
46765 @@ -1253,7 +1266,7 @@ static int pmc_core_probe(struct platform_device *pdev)
46766         mutex_init(&pmcdev->lock);
46767         platform_set_drvdata(pdev, pmcdev);
46768         pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit();
46769 -       dmi_check_system(pmc_core_dmi_table);
46770 +       pmc_core_do_dmi_quirks(pmcdev);
46772         /*
46773          * On TGL, due to a hardware limitation, the GBE LTR blocks PC10 when
46774 diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
46775 index a2a2d923e60c..df1fc6c719f3 100644
46776 --- a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
46777 +++ b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
46778 @@ -21,12 +21,16 @@
46779  #define PUNIT_MAILBOX_BUSY_BIT         31
46781  /*
46782 - * The average time to complete some commands is about 40us. The current
46783 - * count is enough to satisfy 40us. But when the firmware is very busy, this
46784 - * causes timeout occasionally.  So increase to deal with some worst case
46785 - * scenarios. Most of the command still complete in few us.
46786 + * The average time to complete mailbox commands is less than 40us. Most of
46787 + * the commands complete in few micro seconds. But the same firmware handles
46788 + * requests from all power management features.
46789 + * We can create a scenario where we flood the firmware with requests then
46790 + * the mailbox response can be delayed for 100s of micro seconds. So define
46791 + * two timeouts. One for average case and one for long.
46792 + * If the firmware is taking more than average, just call cond_resched().
46793   */
46794 -#define OS_MAILBOX_RETRY_COUNT         100
46795 +#define OS_MAILBOX_TIMEOUT_AVG_US      40
46796 +#define OS_MAILBOX_TIMEOUT_MAX_US      1000
46798  struct isst_if_device {
46799         struct mutex mutex;
46800 @@ -35,11 +39,13 @@ struct isst_if_device {
46801  static int isst_if_mbox_cmd(struct pci_dev *pdev,
46802                             struct isst_if_mbox_cmd *mbox_cmd)
46804 -       u32 retries, data;
46805 +       s64 tm_delta = 0;
46806 +       ktime_t tm;
46807 +       u32 data;
46808         int ret;
46810         /* Poll for rb bit == 0 */
46811 -       retries = OS_MAILBOX_RETRY_COUNT;
46812 +       tm = ktime_get();
46813         do {
46814                 ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_INTERFACE,
46815                                             &data);
46816 @@ -48,11 +54,14 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
46818                 if (data & BIT_ULL(PUNIT_MAILBOX_BUSY_BIT)) {
46819                         ret = -EBUSY;
46820 +                       tm_delta = ktime_us_delta(ktime_get(), tm);
46821 +                       if (tm_delta > OS_MAILBOX_TIMEOUT_AVG_US)
46822 +                               cond_resched();
46823                         continue;
46824                 }
46825                 ret = 0;
46826                 break;
46827 -       } while (--retries);
46828 +       } while (tm_delta < OS_MAILBOX_TIMEOUT_MAX_US);
46830         if (ret)
46831                 return ret;
46832 @@ -74,7 +83,8 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
46833                 return ret;
46835         /* Poll for rb bit == 0 */
46836 -       retries = OS_MAILBOX_RETRY_COUNT;
46837 +       tm_delta = 0;
46838 +       tm = ktime_get();
46839         do {
46840                 ret = pci_read_config_dword(pdev, PUNIT_MAILBOX_INTERFACE,
46841                                             &data);
46842 @@ -83,6 +93,9 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
46844                 if (data & BIT_ULL(PUNIT_MAILBOX_BUSY_BIT)) {
46845                         ret = -EBUSY;
46846 +                       tm_delta = ktime_us_delta(ktime_get(), tm);
46847 +                       if (tm_delta > OS_MAILBOX_TIMEOUT_AVG_US)
46848 +                               cond_resched();
46849                         continue;
46850                 }
46852 @@ -96,7 +109,7 @@ static int isst_if_mbox_cmd(struct pci_dev *pdev,
46853                 mbox_cmd->resp_data = data;
46854                 ret = 0;
46855                 break;
46856 -       } while (--retries);
46857 +       } while (tm_delta < OS_MAILBOX_TIMEOUT_MAX_US);
46859         return ret;
46861 diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
46862 index ca684ed760d1..a9d2a4b98e57 100644
46863 --- a/drivers/platform/x86/pmc_atom.c
46864 +++ b/drivers/platform/x86/pmc_atom.c
46865 @@ -393,34 +393,10 @@ static const struct dmi_system_id critclk_systems[] = {
46866         },
46867         {
46868                 /* pmc_plt_clk* - are used for ethernet controllers */
46869 -               .ident = "Beckhoff CB3163",
46870 +               .ident = "Beckhoff Baytrail",
46871                 .matches = {
46872                         DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
46873 -                       DMI_MATCH(DMI_BOARD_NAME, "CB3163"),
46874 -               },
46875 -       },
46876 -       {
46877 -               /* pmc_plt_clk* - are used for ethernet controllers */
46878 -               .ident = "Beckhoff CB4063",
46879 -               .matches = {
46880 -                       DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
46881 -                       DMI_MATCH(DMI_BOARD_NAME, "CB4063"),
46882 -               },
46883 -       },
46884 -       {
46885 -               /* pmc_plt_clk* - are used for ethernet controllers */
46886 -               .ident = "Beckhoff CB6263",
46887 -               .matches = {
46888 -                       DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
46889 -                       DMI_MATCH(DMI_BOARD_NAME, "CB6263"),
46890 -               },
46891 -       },
46892 -       {
46893 -               /* pmc_plt_clk* - are used for ethernet controllers */
46894 -               .ident = "Beckhoff CB6363",
46895 -               .matches = {
46896 -                       DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
46897 -                       DMI_MATCH(DMI_BOARD_NAME, "CB6363"),
46898 +                       DMI_MATCH(DMI_PRODUCT_FAMILY, "CBxx63"),
46899                 },
46900         },
46901         {
46902 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
46903 index 0d9e2ddbf904..61f1c91c62de 100644
46904 --- a/drivers/platform/x86/thinkpad_acpi.c
46905 +++ b/drivers/platform/x86/thinkpad_acpi.c
46906 @@ -6260,6 +6260,7 @@ enum thermal_access_mode {
46907  enum { /* TPACPI_THERMAL_TPEC_* */
46908         TP_EC_THERMAL_TMP0 = 0x78,      /* ACPI EC regs TMP 0..7 */
46909         TP_EC_THERMAL_TMP8 = 0xC0,      /* ACPI EC regs TMP 8..15 */
46910 +       TP_EC_FUNCREV      = 0xEF,      /* ACPI EC Functional revision */
46911         TP_EC_THERMAL_TMP_NA = -128,    /* ACPI EC sensor not available */
46913         TPACPI_THERMAL_SENSOR_NA = -128000, /* Sensor not available */
46914 @@ -6458,7 +6459,7 @@ static const struct attribute_group thermal_temp_input8_group = {
46916  static int __init thermal_init(struct ibm_init_struct *iibm)
46918 -       u8 t, ta1, ta2;
46919 +       u8 t, ta1, ta2, ver = 0;
46920         int i;
46921         int acpi_tmp7;
46922         int res;
46923 @@ -6473,7 +6474,14 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
46924                  * 0x78-0x7F, 0xC0-0xC7.  Registers return 0x00 for
46925                  * non-implemented, thermal sensors return 0x80 when
46926                  * not available
46927 +                * The above rule is unfortunately flawed. This has been seen with
46928 +                * 0xC2 (power supply ID) causing thermal control problems.
46929 +                * The EC version can be determined by offset 0xEF and at least for
46930 +                * version 3 the Lenovo firmware team confirmed that registers 0xC0-0xC7
46931 +                * are not thermal registers.
46932                  */
46933 +               if (!acpi_ec_read(TP_EC_FUNCREV, &ver))
46934 +                       pr_warn("Thinkpad ACPI EC unable to access EC version\n");
46936                 ta1 = ta2 = 0;
46937                 for (i = 0; i < 8; i++) {
46938 @@ -6483,11 +6491,13 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
46939                                 ta1 = 0;
46940                                 break;
46941                         }
46942 -                       if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) {
46943 -                               ta2 |= t;
46944 -                       } else {
46945 -                               ta1 = 0;
46946 -                               break;
46947 +                       if (ver < 3) {
46948 +                               if (acpi_ec_read(TP_EC_THERMAL_TMP8 + i, &t)) {
46949 +                                       ta2 |= t;
46950 +                               } else {
46951 +                                       ta1 = 0;
46952 +                                       break;
46953 +                               }
46954                         }
46955                 }
46956                 if (ta1 == 0) {
46957 @@ -6500,9 +6510,12 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
46958                                 thermal_read_mode = TPACPI_THERMAL_NONE;
46959                         }
46960                 } else {
46961 -                       thermal_read_mode =
46962 -                           (ta2 != 0) ?
46963 -                           TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8;
46964 +                       if (ver >= 3)
46965 +                               thermal_read_mode = TPACPI_THERMAL_TPEC_8;
46966 +                       else
46967 +                               thermal_read_mode =
46968 +                                       (ta2 != 0) ?
46969 +                                       TPACPI_THERMAL_TPEC_16 : TPACPI_THERMAL_TPEC_8;
46970                 }
46971         } else if (acpi_tmp7) {
46972                 if (tpacpi_is_ibm() &&
46973 diff --git a/drivers/power/supply/bq25980_charger.c b/drivers/power/supply/bq25980_charger.c
46974 index 530ff4025b31..0008c229fd9c 100644
46975 --- a/drivers/power/supply/bq25980_charger.c
46976 +++ b/drivers/power/supply/bq25980_charger.c
46977 @@ -606,33 +606,6 @@ static int bq25980_get_state(struct bq25980_device *bq,
46978         return 0;
46981 -static int bq25980_set_battery_property(struct power_supply *psy,
46982 -                               enum power_supply_property psp,
46983 -                               const union power_supply_propval *val)
46985 -       struct bq25980_device *bq = power_supply_get_drvdata(psy);
46986 -       int ret = 0;
46988 -       switch (psp) {
46989 -       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
46990 -               ret = bq25980_set_const_charge_curr(bq, val->intval);
46991 -               if (ret)
46992 -                       return ret;
46993 -               break;
46995 -       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
46996 -               ret = bq25980_set_const_charge_volt(bq, val->intval);
46997 -               if (ret)
46998 -                       return ret;
46999 -               break;
47001 -       default:
47002 -               return -EINVAL;
47003 -       }
47005 -       return ret;
47008  static int bq25980_get_battery_property(struct power_supply *psy,
47009                                 enum power_supply_property psp,
47010                                 union power_supply_propval *val)
47011 @@ -701,6 +674,18 @@ static int bq25980_set_charger_property(struct power_supply *psy,
47012                         return ret;
47013                 break;
47015 +       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
47016 +               ret = bq25980_set_const_charge_curr(bq, val->intval);
47017 +               if (ret)
47018 +                       return ret;
47019 +               break;
47021 +       case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
47022 +               ret = bq25980_set_const_charge_volt(bq, val->intval);
47023 +               if (ret)
47024 +                       return ret;
47025 +               break;
47027         default:
47028                 return -EINVAL;
47029         }
47030 @@ -922,7 +907,6 @@ static struct power_supply_desc bq25980_battery_desc = {
47031         .name                   = "bq25980-battery",
47032         .type                   = POWER_SUPPLY_TYPE_BATTERY,
47033         .get_property           = bq25980_get_battery_property,
47034 -       .set_property           = bq25980_set_battery_property,
47035         .properties             = bq25980_battery_props,
47036         .num_properties         = ARRAY_SIZE(bq25980_battery_props),
47037         .property_is_writeable  = bq25980_property_is_writeable,
47038 diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
47039 index 4c4a7b1c64c5..20e1dc8a87cf 100644
47040 --- a/drivers/power/supply/bq27xxx_battery.c
47041 +++ b/drivers/power/supply/bq27xxx_battery.c
47042 @@ -1661,27 +1661,6 @@ static int bq27xxx_battery_read_time(struct bq27xxx_device_info *di, u8 reg)
47043         return tval * 60;
47047 - * Read an average power register.
47048 - * Return < 0 if something fails.
47049 - */
47050 -static int bq27xxx_battery_read_pwr_avg(struct bq27xxx_device_info *di)
47052 -       int tval;
47054 -       tval = bq27xxx_read(di, BQ27XXX_REG_AP, false);
47055 -       if (tval < 0) {
47056 -               dev_err(di->dev, "error reading average power register  %02x: %d\n",
47057 -                       BQ27XXX_REG_AP, tval);
47058 -               return tval;
47059 -       }
47061 -       if (di->opts & BQ27XXX_O_ZERO)
47062 -               return (tval * BQ27XXX_POWER_CONSTANT) / BQ27XXX_RS;
47063 -       else
47064 -               return tval;
47067  /*
47068   * Returns true if a battery over temperature condition is detected
47069   */
47070 @@ -1769,8 +1748,6 @@ void bq27xxx_battery_update(struct bq27xxx_device_info *di)
47071                 }
47072                 if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR)
47073                         cache.cycle_count = bq27xxx_battery_read_cyct(di);
47074 -               if (di->regs[BQ27XXX_REG_AP] != INVALID_REG_ADDR)
47075 -                       cache.power_avg = bq27xxx_battery_read_pwr_avg(di);
47077                 /* We only have to read charge design full once */
47078                 if (di->charge_design_full <= 0)
47079 @@ -1827,9 +1804,35 @@ static int bq27xxx_battery_current(struct bq27xxx_device_info *di,
47080                 val->intval = curr * BQ27XXX_CURRENT_CONSTANT / BQ27XXX_RS;
47081         } else {
47082                 /* Other gauges return signed value */
47083 -               val->intval = -(int)((s16)curr) * 1000;
47084 +               val->intval = (int)((s16)curr) * 1000;
47085 +       }
47087 +       return 0;
47091 + * Get the average power in µW
47092 + * Return < 0 if something fails.
47093 + */
47094 +static int bq27xxx_battery_pwr_avg(struct bq27xxx_device_info *di,
47095 +                                  union power_supply_propval *val)
47097 +       int power;
47099 +       power = bq27xxx_read(di, BQ27XXX_REG_AP, false);
47100 +       if (power < 0) {
47101 +               dev_err(di->dev,
47102 +                       "error reading average power register %02x: %d\n",
47103 +                       BQ27XXX_REG_AP, power);
47104 +               return power;
47105         }
47107 +       if (di->opts & BQ27XXX_O_ZERO)
47108 +               val->intval = (power * BQ27XXX_POWER_CONSTANT) / BQ27XXX_RS;
47109 +       else
47110 +               /* Other gauges return a signed value in units of 10mW */
47111 +               val->intval = (int)((s16)power) * 10000;
47113         return 0;
47116 @@ -2020,7 +2023,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
47117                 ret = bq27xxx_simple_value(di->cache.energy, val);
47118                 break;
47119         case POWER_SUPPLY_PROP_POWER_AVG:
47120 -               ret = bq27xxx_simple_value(di->cache.power_avg, val);
47121 +               ret = bq27xxx_battery_pwr_avg(di, val);
47122                 break;
47123         case POWER_SUPPLY_PROP_HEALTH:
47124                 ret = bq27xxx_simple_value(di->cache.health, val);
47125 diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
47126 index 6d5bcdb9f45d..a3fc0084cda0 100644
47127 --- a/drivers/power/supply/cpcap-battery.c
47128 +++ b/drivers/power/supply/cpcap-battery.c
47129 @@ -786,7 +786,7 @@ static irqreturn_t cpcap_battery_irq_thread(int irq, void *data)
47130                         break;
47131         }
47133 -       if (!d)
47134 +       if (list_entry_is_head(d, &ddata->irq_list, node))
47135                 return IRQ_NONE;
47137         latest = cpcap_battery_latest(ddata);
47138 diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
47139 index 641dcad1133f..2a8915c3e73e 100644
47140 --- a/drivers/power/supply/cpcap-charger.c
47141 +++ b/drivers/power/supply/cpcap-charger.c
47142 @@ -318,7 +318,7 @@ static int cpcap_charger_current_to_regval(int microamp)
47143                 return CPCAP_REG_CRM_ICHRG(0x0);
47144         if (miliamp < 177)
47145                 return CPCAP_REG_CRM_ICHRG(0x1);
47146 -       if (miliamp > 1596)
47147 +       if (miliamp >= 1596)
47148                 return CPCAP_REG_CRM_ICHRG(0xe);
47150         res = microamp / 88666;
47151 @@ -668,6 +668,9 @@ static void cpcap_usb_detect(struct work_struct *work)
47152                 return;
47153         }
47155 +       /* Delay for 80ms to avoid vbus bouncing when usb cable is plugged in */
47156 +       usleep_range(80000, 120000);
47158         /* Throttle chrgcurr2 interrupt for charger done and retry */
47159         switch (ddata->status) {
47160         case POWER_SUPPLY_STATUS_CHARGING:
47161 diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c
47162 index 0032069fbc2b..66039c665dd1 100644
47163 --- a/drivers/power/supply/generic-adc-battery.c
47164 +++ b/drivers/power/supply/generic-adc-battery.c
47165 @@ -373,7 +373,7 @@ static int gab_remove(struct platform_device *pdev)
47166         }
47168         kfree(adc_bat->psy_desc.properties);
47169 -       cancel_delayed_work(&adc_bat->bat_work);
47170 +       cancel_delayed_work_sync(&adc_bat->bat_work);
47171         return 0;
47174 diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c
47175 index e7931ffb7151..397e5a03b7d9 100644
47176 --- a/drivers/power/supply/lp8788-charger.c
47177 +++ b/drivers/power/supply/lp8788-charger.c
47178 @@ -501,7 +501,7 @@ static int lp8788_set_irqs(struct platform_device *pdev,
47180                 ret = request_threaded_irq(virq, NULL,
47181                                         lp8788_charger_irq_thread,
47182 -                                       0, name, pchg);
47183 +                                       IRQF_ONESHOT, name, pchg);
47184                 if (ret)
47185                         break;
47186         }
47187 diff --git a/drivers/power/supply/pm2301_charger.c b/drivers/power/supply/pm2301_charger.c
47188 index ac06ecf7fc9c..a3bfb9612b17 100644
47189 --- a/drivers/power/supply/pm2301_charger.c
47190 +++ b/drivers/power/supply/pm2301_charger.c
47191 @@ -1089,7 +1089,7 @@ static int pm2xxx_wall_charger_probe(struct i2c_client *i2c_client,
47192         ret = request_threaded_irq(gpio_to_irq(pm2->pdata->gpio_irq_number),
47193                                 NULL,
47194                                 pm2xxx_charger_irq[0].isr,
47195 -                               pm2->pdata->irq_type,
47196 +                               pm2->pdata->irq_type | IRQF_ONESHOT,
47197                                 pm2xxx_charger_irq[0].name, pm2);
47199         if (ret != 0) {
47200 diff --git a/drivers/power/supply/s3c_adc_battery.c b/drivers/power/supply/s3c_adc_battery.c
47201 index a2addc24ee8b..3e3a598f114d 100644
47202 --- a/drivers/power/supply/s3c_adc_battery.c
47203 +++ b/drivers/power/supply/s3c_adc_battery.c
47204 @@ -395,7 +395,7 @@ static int s3c_adc_bat_remove(struct platform_device *pdev)
47205         if (main_bat.charge_finished)
47206                 free_irq(gpiod_to_irq(main_bat.charge_finished), NULL);
47208 -       cancel_delayed_work(&bat_work);
47209 +       cancel_delayed_work_sync(&bat_work);
47211         if (pdata->exit)
47212                 pdata->exit();
47213 diff --git a/drivers/power/supply/tps65090-charger.c b/drivers/power/supply/tps65090-charger.c
47214 index 6b0098e5a88b..0990b2fa6cd8 100644
47215 --- a/drivers/power/supply/tps65090-charger.c
47216 +++ b/drivers/power/supply/tps65090-charger.c
47217 @@ -301,7 +301,7 @@ static int tps65090_charger_probe(struct platform_device *pdev)
47219         if (irq != -ENXIO) {
47220                 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
47221 -                       tps65090_charger_isr, 0, "tps65090-charger", cdata);
47222 +                       tps65090_charger_isr, IRQF_ONESHOT, "tps65090-charger", cdata);
47223                 if (ret) {
47224                         dev_err(cdata->dev,
47225                                 "Unable to register irq %d err %d\n", irq,
47226 diff --git a/drivers/power/supply/tps65217_charger.c b/drivers/power/supply/tps65217_charger.c
47227 index 814c2b81fdfe..ba33d1617e0b 100644
47228 --- a/drivers/power/supply/tps65217_charger.c
47229 +++ b/drivers/power/supply/tps65217_charger.c
47230 @@ -238,7 +238,7 @@ static int tps65217_charger_probe(struct platform_device *pdev)
47231         for (i = 0; i < NUM_CHARGER_IRQS; i++) {
47232                 ret = devm_request_threaded_irq(&pdev->dev, irq[i], NULL,
47233                                                 tps65217_charger_irq,
47234 -                                               0, "tps65217-charger",
47235 +                                               IRQF_ONESHOT, "tps65217-charger",
47236                                                 charger);
47237                 if (ret) {
47238                         dev_err(charger->dev,
47239 diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
47240 index fdda2a737186..58ecdad26cca 100644
47241 --- a/drivers/powercap/intel_rapl_common.c
47242 +++ b/drivers/powercap/intel_rapl_common.c
47243 @@ -1454,7 +1454,7 @@ static int __init rapl_init(void)
47245         id = x86_match_cpu(rapl_ids);
47246         if (!id) {
47247 -               pr_err("driver does not support CPU family %d model %d\n",
47248 +               pr_info("driver does not support CPU family %d model %d\n",
47249                        boot_cpu_data.x86, boot_cpu_data.x86_model);
47251                 return -ENODEV;
47252 diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
47253 index 5813339b597b..3292158157b6 100644
47254 --- a/drivers/pwm/pwm-atmel.c
47255 +++ b/drivers/pwm/pwm-atmel.c
47256 @@ -319,7 +319,7 @@ static void atmel_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
47258                 cdty = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm,
47259                                           atmel_pwm->data->regs.duty);
47260 -               tmp = (u64)cdty * NSEC_PER_SEC;
47261 +               tmp = (u64)(cprd - cdty) * NSEC_PER_SEC;
47262                 tmp <<= pres;
47263                 state->duty_cycle = DIV64_U64_ROUND_UP(tmp, rate);
47265 diff --git a/drivers/regulator/bd9576-regulator.c b/drivers/regulator/bd9576-regulator.c
47266 index a8b5832a5a1b..204a2da054f5 100644
47267 --- a/drivers/regulator/bd9576-regulator.c
47268 +++ b/drivers/regulator/bd9576-regulator.c
47269 @@ -206,7 +206,7 @@ static int bd957x_probe(struct platform_device *pdev)
47271         struct regmap *regmap;
47272         struct regulator_config config = { 0 };
47273 -       int i, err;
47274 +       int i;
47275         bool vout_mode, ddr_sel;
47276         const struct bd957x_regulator_data *reg_data = &bd9576_regulators[0];
47277         unsigned int num_reg_data = ARRAY_SIZE(bd9576_regulators);
47278 @@ -279,8 +279,7 @@ static int bd957x_probe(struct platform_device *pdev)
47279                 break;
47280         default:
47281                 dev_err(&pdev->dev, "Unsupported chip type\n");
47282 -               err = -EINVAL;
47283 -               goto err;
47284 +               return -EINVAL;
47285         }
47287         config.dev = pdev->dev.parent;
47288 @@ -300,8 +299,7 @@ static int bd957x_probe(struct platform_device *pdev)
47289                         dev_err(&pdev->dev,
47290                                 "failed to register %s regulator\n",
47291                                 desc->name);
47292 -                       err = PTR_ERR(rdev);
47293 -                       goto err;
47294 +                       return PTR_ERR(rdev);
47295                 }
47296                 /*
47297                  * Clear the VOUT1 GPIO setting - rest of the regulators do not
47298 @@ -310,8 +308,7 @@ static int bd957x_probe(struct platform_device *pdev)
47299                 config.ena_gpiod = NULL;
47300         }
47302 -err:
47303 -       return err;
47304 +       return 0;
47307  static const struct platform_device_id bd957x_pmic_id[] = {
47308 diff --git a/drivers/regulator/da9121-regulator.c b/drivers/regulator/da9121-regulator.c
47309 index a2ede7d7897e..08cbf688e14d 100644
47310 --- a/drivers/regulator/da9121-regulator.c
47311 +++ b/drivers/regulator/da9121-regulator.c
47312 @@ -40,6 +40,7 @@ struct da9121 {
47313         unsigned int passive_delay;
47314         int chip_irq;
47315         int variant_id;
47316 +       int subvariant_id;
47317  };
47319  /* Define ranges for different variants, enabling translation to/from
47320 @@ -812,7 +813,6 @@ static struct regmap_config da9121_2ch_regmap_config = {
47321  static int da9121_check_device_type(struct i2c_client *i2c, struct da9121 *chip)
47323         u32 device_id;
47324 -       u8 chip_id = chip->variant_id;
47325         u32 variant_id;
47326         u8 variant_mrc, variant_vrc;
47327         char *type;
47328 @@ -839,22 +839,34 @@ static int da9121_check_device_type(struct i2c_client *i2c, struct da9121 *chip)
47330         variant_vrc = variant_id & DA9121_MASK_OTP_VARIANT_ID_VRC;
47332 -       switch (variant_vrc) {
47333 -       case DA9121_VARIANT_VRC:
47334 -               type = "DA9121/DA9130";
47335 -               config_match = (chip_id == DA9121_TYPE_DA9121_DA9130);
47336 +       switch (chip->subvariant_id) {
47337 +       case DA9121_SUBTYPE_DA9121:
47338 +               type = "DA9121";
47339 +               config_match = (variant_vrc == DA9121_VARIANT_VRC);
47340                 break;
47341 -       case DA9220_VARIANT_VRC:
47342 -               type = "DA9220/DA9132";
47343 -               config_match = (chip_id == DA9121_TYPE_DA9220_DA9132);
47344 +       case DA9121_SUBTYPE_DA9130:
47345 +               type = "DA9130";
47346 +               config_match = (variant_vrc == DA9130_VARIANT_VRC);
47347                 break;
47348 -       case DA9122_VARIANT_VRC:
47349 -               type = "DA9122/DA9131";
47350 -               config_match = (chip_id == DA9121_TYPE_DA9122_DA9131);
47351 +       case DA9121_SUBTYPE_DA9220:
47352 +               type = "DA9220";
47353 +               config_match = (variant_vrc == DA9220_VARIANT_VRC);
47354                 break;
47355 -       case DA9217_VARIANT_VRC:
47356 +       case DA9121_SUBTYPE_DA9132:
47357 +               type = "DA9132";
47358 +               config_match = (variant_vrc == DA9132_VARIANT_VRC);
47359 +               break;
47360 +       case DA9121_SUBTYPE_DA9122:
47361 +               type = "DA9122";
47362 +               config_match = (variant_vrc == DA9122_VARIANT_VRC);
47363 +               break;
47364 +       case DA9121_SUBTYPE_DA9131:
47365 +               type = "DA9131";
47366 +               config_match = (variant_vrc == DA9131_VARIANT_VRC);
47367 +               break;
47368 +       case DA9121_SUBTYPE_DA9217:
47369                 type = "DA9217";
47370 -               config_match = (chip_id == DA9121_TYPE_DA9217);
47371 +               config_match = (variant_vrc == DA9217_VARIANT_VRC);
47372                 break;
47373         default:
47374                 type = "Unknown";
47375 @@ -892,15 +904,27 @@ static int da9121_assign_chip_model(struct i2c_client *i2c,
47377         chip->dev = &i2c->dev;
47379 -       switch (chip->variant_id) {
47380 -       case DA9121_TYPE_DA9121_DA9130:
47381 -               fallthrough;
47382 -       case DA9121_TYPE_DA9217:
47383 +       /* Use configured subtype to select the regulator descriptor index and
47384 +        * register map, common to both consumer and automotive grade variants
47385 +        */
47386 +       switch (chip->subvariant_id) {
47387 +       case DA9121_SUBTYPE_DA9121:
47388 +       case DA9121_SUBTYPE_DA9130:
47389 +               chip->variant_id = DA9121_TYPE_DA9121_DA9130;
47390                 regmap = &da9121_1ch_regmap_config;
47391                 break;
47392 -       case DA9121_TYPE_DA9122_DA9131:
47393 -               fallthrough;
47394 -       case DA9121_TYPE_DA9220_DA9132:
47395 +       case DA9121_SUBTYPE_DA9217:
47396 +               chip->variant_id = DA9121_TYPE_DA9217;
47397 +               regmap = &da9121_1ch_regmap_config;
47398 +               break;
47399 +       case DA9121_SUBTYPE_DA9122:
47400 +       case DA9121_SUBTYPE_DA9131:
47401 +               chip->variant_id = DA9121_TYPE_DA9122_DA9131;
47402 +               regmap = &da9121_2ch_regmap_config;
47403 +               break;
47404 +       case DA9121_SUBTYPE_DA9220:
47405 +       case DA9121_SUBTYPE_DA9132:
47406 +               chip->variant_id = DA9121_TYPE_DA9220_DA9132;
47407                 regmap = &da9121_2ch_regmap_config;
47408                 break;
47409         }
47410 @@ -975,13 +999,13 @@ static int da9121_config_irq(struct i2c_client *i2c,
47413  static const struct of_device_id da9121_dt_ids[] = {
47414 -       { .compatible = "dlg,da9121", .data = (void *) DA9121_TYPE_DA9121_DA9130 },
47415 -       { .compatible = "dlg,da9130", .data = (void *) DA9121_TYPE_DA9121_DA9130 },
47416 -       { .compatible = "dlg,da9217", .data = (void *) DA9121_TYPE_DA9217 },
47417 -       { .compatible = "dlg,da9122", .data = (void *) DA9121_TYPE_DA9122_DA9131 },
47418 -       { .compatible = "dlg,da9131", .data = (void *) DA9121_TYPE_DA9122_DA9131 },
47419 -       { .compatible = "dlg,da9220", .data = (void *) DA9121_TYPE_DA9220_DA9132 },
47420 -       { .compatible = "dlg,da9132", .data = (void *) DA9121_TYPE_DA9220_DA9132 },
47421 +       { .compatible = "dlg,da9121", .data = (void *) DA9121_SUBTYPE_DA9121 },
47422 +       { .compatible = "dlg,da9130", .data = (void *) DA9121_SUBTYPE_DA9130 },
47423 +       { .compatible = "dlg,da9217", .data = (void *) DA9121_SUBTYPE_DA9217 },
47424 +       { .compatible = "dlg,da9122", .data = (void *) DA9121_SUBTYPE_DA9122 },
47425 +       { .compatible = "dlg,da9131", .data = (void *) DA9121_SUBTYPE_DA9131 },
47426 +       { .compatible = "dlg,da9220", .data = (void *) DA9121_SUBTYPE_DA9220 },
47427 +       { .compatible = "dlg,da9132", .data = (void *) DA9121_SUBTYPE_DA9132 },
47428         { }
47429  };
47430  MODULE_DEVICE_TABLE(of, da9121_dt_ids);
47431 @@ -1011,7 +1035,7 @@ static int da9121_i2c_probe(struct i2c_client *i2c,
47432         }
47434         chip->pdata = i2c->dev.platform_data;
47435 -       chip->variant_id = da9121_of_get_id(&i2c->dev);
47436 +       chip->subvariant_id = da9121_of_get_id(&i2c->dev);
47438         ret = da9121_assign_chip_model(i2c, chip);
47439         if (ret < 0)
47440 diff --git a/drivers/regulator/da9121-regulator.h b/drivers/regulator/da9121-regulator.h
47441 index 3c34cb889ca8..357f416e17c1 100644
47442 --- a/drivers/regulator/da9121-regulator.h
47443 +++ b/drivers/regulator/da9121-regulator.h
47444 @@ -29,6 +29,16 @@ enum da9121_variant {
47445         DA9121_TYPE_DA9217
47446  };
47448 +enum da9121_subvariant {
47449 +       DA9121_SUBTYPE_DA9121,
47450 +       DA9121_SUBTYPE_DA9130,
47451 +       DA9121_SUBTYPE_DA9220,
47452 +       DA9121_SUBTYPE_DA9132,
47453 +       DA9121_SUBTYPE_DA9122,
47454 +       DA9121_SUBTYPE_DA9131,
47455 +       DA9121_SUBTYPE_DA9217
47458  /* Minimum, maximum and default polling millisecond periods are provided
47459   * here as an example. It is expected that any final implementation will
47460   * include a modification of these settings to match the required
47461 @@ -279,6 +289,9 @@ enum da9121_variant {
47462  #define DA9220_VARIANT_VRC     0x0
47463  #define DA9122_VARIANT_VRC     0x2
47464  #define DA9217_VARIANT_VRC     0x7
47465 +#define DA9130_VARIANT_VRC     0x0
47466 +#define DA9131_VARIANT_VRC     0x1
47467 +#define DA9132_VARIANT_VRC     0x2
47469  /* DA9121_REG_OTP_CUSTOMER_ID */
47471 diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
47472 index dcb380e868df..549ed3fed625 100644
47473 --- a/drivers/remoteproc/pru_rproc.c
47474 +++ b/drivers/remoteproc/pru_rproc.c
47475 @@ -266,12 +266,17 @@ static void pru_rproc_create_debug_entries(struct rproc *rproc)
47477  static void pru_dispose_irq_mapping(struct pru_rproc *pru)
47479 -       while (pru->evt_count--) {
47480 +       if (!pru->mapped_irq)
47481 +               return;
47483 +       while (pru->evt_count) {
47484 +               pru->evt_count--;
47485                 if (pru->mapped_irq[pru->evt_count] > 0)
47486                         irq_dispose_mapping(pru->mapped_irq[pru->evt_count]);
47487         }
47489         kfree(pru->mapped_irq);
47490 +       pru->mapped_irq = NULL;
47493  /*
47494 @@ -284,7 +289,7 @@ static int pru_handle_intrmap(struct rproc *rproc)
47495         struct pru_rproc *pru = rproc->priv;
47496         struct pru_irq_rsc *rsc = pru->pru_interrupt_map;
47497         struct irq_fwspec fwspec;
47498 -       struct device_node *irq_parent;
47499 +       struct device_node *parent, *irq_parent;
47500         int i, ret = 0;
47502         /* not having pru_interrupt_map is not an error */
47503 @@ -307,16 +312,31 @@ static int pru_handle_intrmap(struct rproc *rproc)
47504         pru->evt_count = rsc->num_evts;
47505         pru->mapped_irq = kcalloc(pru->evt_count, sizeof(unsigned int),
47506                                   GFP_KERNEL);
47507 -       if (!pru->mapped_irq)
47508 +       if (!pru->mapped_irq) {
47509 +               pru->evt_count = 0;
47510                 return -ENOMEM;
47511 +       }
47513         /*
47514          * parse and fill in system event to interrupt channel and
47515 -        * channel-to-host mapping
47516 +        * channel-to-host mapping. The interrupt controller to be used
47517 +        * for these mappings for a given PRU remoteproc is always its
47518 +        * corresponding sibling PRUSS INTC node.
47519          */
47520 -       irq_parent = of_irq_find_parent(pru->dev->of_node);
47521 +       parent = of_get_parent(dev_of_node(pru->dev));
47522 +       if (!parent) {
47523 +               kfree(pru->mapped_irq);
47524 +               pru->mapped_irq = NULL;
47525 +               pru->evt_count = 0;
47526 +               return -ENODEV;
47527 +       }
47529 +       irq_parent = of_get_child_by_name(parent, "interrupt-controller");
47530 +       of_node_put(parent);
47531         if (!irq_parent) {
47532                 kfree(pru->mapped_irq);
47533 +               pru->mapped_irq = NULL;
47534 +               pru->evt_count = 0;
47535                 return -ENODEV;
47536         }
47538 @@ -332,16 +352,20 @@ static int pru_handle_intrmap(struct rproc *rproc)
47540                 pru->mapped_irq[i] = irq_create_fwspec_mapping(&fwspec);
47541                 if (!pru->mapped_irq[i]) {
47542 -                       dev_err(dev, "failed to get virq\n");
47543 -                       ret = pru->mapped_irq[i];
47544 +                       dev_err(dev, "failed to get virq for fw mapping %d: event %d chnl %d host %d\n",
47545 +                               i, fwspec.param[0], fwspec.param[1],
47546 +                               fwspec.param[2]);
47547 +                       ret = -EINVAL;
47548                         goto map_fail;
47549                 }
47550         }
47551 +       of_node_put(irq_parent);
47553         return ret;
47555  map_fail:
47556         pru_dispose_irq_mapping(pru);
47557 +       of_node_put(irq_parent);
47559         return ret;
47561 @@ -387,8 +411,7 @@ static int pru_rproc_stop(struct rproc *rproc)
47562         pru_control_write_reg(pru, PRU_CTRL_CTRL, val);
47564         /* dispose irq mapping - new firmware can provide new mapping */
47565 -       if (pru->mapped_irq)
47566 -               pru_dispose_irq_mapping(pru);
47567 +       pru_dispose_irq_mapping(pru);
47569         return 0;
47571 diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
47572 index 66106ba25ba3..14e0ce5f18f5 100644
47573 --- a/drivers/remoteproc/qcom_q6v5_mss.c
47574 +++ b/drivers/remoteproc/qcom_q6v5_mss.c
47575 @@ -1210,6 +1210,14 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
47576                         goto release_firmware;
47577                 }
47579 +               if (phdr->p_filesz > phdr->p_memsz) {
47580 +                       dev_err(qproc->dev,
47581 +                               "refusing to load segment %d with p_filesz > p_memsz\n",
47582 +                               i);
47583 +                       ret = -EINVAL;
47584 +                       goto release_firmware;
47585 +               }
47587                 ptr = memremap(qproc->mpss_phys + offset, phdr->p_memsz, MEMREMAP_WC);
47588                 if (!ptr) {
47589                         dev_err(qproc->dev,
47590 @@ -1241,6 +1249,16 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
47591                                 goto release_firmware;
47592                         }
47594 +                       if (seg_fw->size != phdr->p_filesz) {
47595 +                               dev_err(qproc->dev,
47596 +                                       "failed to load segment %d from truncated file %s\n",
47597 +                                       i, fw_name);
47598 +                               ret = -EINVAL;
47599 +                               release_firmware(seg_fw);
47600 +                               memunmap(ptr);
47601 +                               goto release_firmware;
47602 +                       }
47604                         release_firmware(seg_fw);
47605                 }
47607 diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
47608 index 27a05167c18c..4840886532ff 100644
47609 --- a/drivers/rpmsg/qcom_glink_native.c
47610 +++ b/drivers/rpmsg/qcom_glink_native.c
47611 @@ -857,6 +857,7 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
47612                         dev_err(glink->dev,
47613                                 "no intent found for channel %s intent %d",
47614                                 channel->name, liid);
47615 +                       ret = -ENOENT;
47616                         goto advance_rx;
47617                 }
47618         }
47619 diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
47620 index cd8e438bc9c4..8752620d8e34 100644
47621 --- a/drivers/rtc/rtc-ds1307.c
47622 +++ b/drivers/rtc/rtc-ds1307.c
47623 @@ -296,7 +296,11 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
47624         t->tm_min = bcd2bin(regs[DS1307_REG_MIN] & 0x7f);
47625         tmp = regs[DS1307_REG_HOUR] & 0x3f;
47626         t->tm_hour = bcd2bin(tmp);
47627 -       t->tm_wday = bcd2bin(regs[DS1307_REG_WDAY] & 0x07) - 1;
47628 +       /* rx8130 is bit position, not BCD */
47629 +       if (ds1307->type == rx_8130)
47630 +               t->tm_wday = fls(regs[DS1307_REG_WDAY] & 0x7f);
47631 +       else
47632 +               t->tm_wday = bcd2bin(regs[DS1307_REG_WDAY] & 0x07) - 1;
47633         t->tm_mday = bcd2bin(regs[DS1307_REG_MDAY] & 0x3f);
47634         tmp = regs[DS1307_REG_MONTH] & 0x1f;
47635         t->tm_mon = bcd2bin(tmp) - 1;
47636 @@ -343,7 +347,11 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
47637         regs[DS1307_REG_SECS] = bin2bcd(t->tm_sec);
47638         regs[DS1307_REG_MIN] = bin2bcd(t->tm_min);
47639         regs[DS1307_REG_HOUR] = bin2bcd(t->tm_hour);
47640 -       regs[DS1307_REG_WDAY] = bin2bcd(t->tm_wday + 1);
47641 +       /* rx8130 is bit position, not BCD */
47642 +       if (ds1307->type == rx_8130)
47643 +               regs[DS1307_REG_WDAY] = 1 << t->tm_wday;
47644 +       else
47645 +               regs[DS1307_REG_WDAY] = bin2bcd(t->tm_wday + 1);
47646         regs[DS1307_REG_MDAY] = bin2bcd(t->tm_mday);
47647         regs[DS1307_REG_MONTH] = bin2bcd(t->tm_mon + 1);
47649 diff --git a/drivers/rtc/rtc-fsl-ftm-alarm.c b/drivers/rtc/rtc-fsl-ftm-alarm.c
47650 index 57cc09d0a806..c0df49fb978c 100644
47651 --- a/drivers/rtc/rtc-fsl-ftm-alarm.c
47652 +++ b/drivers/rtc/rtc-fsl-ftm-alarm.c
47653 @@ -310,6 +310,7 @@ static const struct of_device_id ftm_rtc_match[] = {
47654         { .compatible = "fsl,lx2160a-ftm-alarm", },
47655         { },
47656  };
47657 +MODULE_DEVICE_TABLE(of, ftm_rtc_match);
47659  static const struct acpi_device_id ftm_imx_acpi_ids[] = {
47660         {"NXP0014",},
47661 diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
47662 index 288abb1abdb8..bc89c62ccb9b 100644
47663 --- a/drivers/rtc/rtc-tps65910.c
47664 +++ b/drivers/rtc/rtc-tps65910.c
47665 @@ -18,6 +18,7 @@
47666  #include <linux/rtc.h>
47667  #include <linux/bcd.h>
47668  #include <linux/math64.h>
47669 +#include <linux/property.h>
47670  #include <linux/platform_device.h>
47671  #include <linux/interrupt.h>
47672  #include <linux/mfd/tps65910.h>
47673 diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
47674 index 2018614f258f..fc19b312c345 100644
47675 --- a/drivers/rtc/rtc-wm8350.c
47676 +++ b/drivers/rtc/rtc-wm8350.c
47677 @@ -114,7 +114,7 @@ static int wm8350_rtc_settime(struct device *dev, struct rtc_time *tm)
47678         /* Wait until confirmation of stopping */
47679         do {
47680                 rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
47681 -               schedule_timeout_uninterruptible(msecs_to_jiffies(1));
47682 +               schedule_msec_hrtimeout_uninterruptible((1));
47683         } while (--retries && !(rtc_ctrl & WM8350_RTC_STS));
47685         if (!retries) {
47686 @@ -197,7 +197,7 @@ static int wm8350_rtc_stop_alarm(struct wm8350 *wm8350)
47687         /* Wait until confirmation of stopping */
47688         do {
47689                 rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
47690 -               schedule_timeout_uninterruptible(msecs_to_jiffies(1));
47691 +               schedule_msec_hrtimeout_uninterruptible((1));
47692         } while (retries-- && !(rtc_ctrl & WM8350_RTC_ALMSTS));
47694         if (!(rtc_ctrl & WM8350_RTC_ALMSTS))
47695 @@ -220,7 +220,7 @@ static int wm8350_rtc_start_alarm(struct wm8350 *wm8350)
47696         /* Wait until confirmation */
47697         do {
47698                 rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
47699 -               schedule_timeout_uninterruptible(msecs_to_jiffies(1));
47700 +               schedule_msec_hrtimeout_uninterruptible((1));
47701         } while (retries-- && rtc_ctrl & WM8350_RTC_ALMSTS);
47703         if (rtc_ctrl & WM8350_RTC_ALMSTS)
47704 diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
47705 index 3f026021e95e..84f659cafe76 100644
47706 --- a/drivers/s390/cio/device.c
47707 +++ b/drivers/s390/cio/device.c
47708 @@ -1532,8 +1532,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
47709         switch (action) {
47710         case IO_SCH_ORPH_UNREG:
47711         case IO_SCH_UNREG:
47712 -               if (!cdev)
47713 -                       css_sch_device_unregister(sch);
47714 +               css_sch_device_unregister(sch);
47715                 break;
47716         case IO_SCH_ORPH_ATTACH:
47717         case IO_SCH_UNREG_ATTACH:
47718 diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
47719 index 34bf2f197c71..0e0044d70844 100644
47720 --- a/drivers/s390/cio/qdio.h
47721 +++ b/drivers/s390/cio/qdio.h
47722 @@ -181,12 +181,6 @@ struct qdio_input_q {
47723  struct qdio_output_q {
47724         /* PCIs are enabled for the queue */
47725         int pci_out_enabled;
47726 -       /* cq: use asynchronous output buffers */
47727 -       int use_cq;
47728 -       /* cq: aobs used for particual SBAL */
47729 -       struct qaob **aobs;
47730 -       /* cq: sbal state related to asynchronous operation */
47731 -       struct qdio_outbuf_state *sbal_state;
47732         /* timer to check for more outbound work */
47733         struct timer_list timer;
47734         /* tasklet to check for completions */
47735 @@ -379,12 +373,8 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data);
47736  void qdio_shutdown_irq(struct qdio_irq *irq);
47737  void qdio_print_subchannel_info(struct qdio_irq *irq_ptr);
47738  void qdio_free_queues(struct qdio_irq *irq_ptr);
47739 -void qdio_free_async_data(struct qdio_irq *irq_ptr);
47740  int qdio_setup_init(void);
47741  void qdio_setup_exit(void);
47742 -int qdio_enable_async_operation(struct qdio_output_q *q);
47743 -void qdio_disable_async_operation(struct qdio_output_q *q);
47744 -struct qaob *qdio_allocate_aob(void);
47746  int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
47747                         unsigned char *state);
47748 diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
47749 index 03a011619908..307ce7ff5ca4 100644
47750 --- a/drivers/s390/cio/qdio_main.c
47751 +++ b/drivers/s390/cio/qdio_main.c
47752 @@ -517,24 +517,6 @@ static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
47753         return 1;
47756 -static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
47757 -                                       int bufnr)
47759 -       unsigned long phys_aob = 0;
47761 -       if (!q->aobs[bufnr]) {
47762 -               struct qaob *aob = qdio_allocate_aob();
47763 -               q->aobs[bufnr] = aob;
47764 -       }
47765 -       if (q->aobs[bufnr]) {
47766 -               q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
47767 -               phys_aob = virt_to_phys(q->aobs[bufnr]);
47768 -               WARN_ON_ONCE(phys_aob & 0xFF);
47769 -       }
47771 -       return phys_aob;
47774  static inline int qdio_tasklet_schedule(struct qdio_q *q)
47776         if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
47777 @@ -548,7 +530,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
47778                                         unsigned int *error)
47780         unsigned char state = 0;
47781 -       unsigned int i;
47782         int count;
47784         q->timestamp = get_tod_clock_fast();
47785 @@ -570,10 +551,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
47787         switch (state) {
47788         case SLSB_P_OUTPUT_PENDING:
47789 -               /* detach the utilized QAOBs: */
47790 -               for (i = 0; i < count; i++)
47791 -                       q->u.out.aobs[QDIO_BUFNR(start + i)] = NULL;
47793                 *error = QDIO_ERROR_SLSB_PENDING;
47794                 fallthrough;
47795         case SLSB_P_OUTPUT_EMPTY:
47796 @@ -999,7 +976,6 @@ int qdio_free(struct ccw_device *cdev)
47797         cdev->private->qdio_data = NULL;
47798         mutex_unlock(&irq_ptr->setup_mutex);
47800 -       qdio_free_async_data(irq_ptr);
47801         qdio_free_queues(irq_ptr);
47802         free_page((unsigned long) irq_ptr->qdr);
47803         free_page(irq_ptr->chsc_page);
47804 @@ -1075,28 +1051,6 @@ int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
47806  EXPORT_SYMBOL_GPL(qdio_allocate);
47808 -static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
47810 -       struct qdio_q *q = irq_ptr->input_qs[0];
47811 -       int i, use_cq = 0;
47813 -       if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
47814 -               use_cq = 1;
47816 -       for_each_output_queue(irq_ptr, q, i) {
47817 -               if (use_cq) {
47818 -                       if (multicast_outbound(q))
47819 -                               continue;
47820 -                       if (qdio_enable_async_operation(&q->u.out) < 0) {
47821 -                               use_cq = 0;
47822 -                               continue;
47823 -                       }
47824 -               } else
47825 -                       qdio_disable_async_operation(&q->u.out);
47826 -       }
47827 -       DBF_EVENT("use_cq:%d", use_cq);
47830  static void qdio_trace_init_data(struct qdio_irq *irq,
47831                                  struct qdio_initialize *data)
47833 @@ -1191,8 +1145,6 @@ int qdio_establish(struct ccw_device *cdev,
47835         qdio_setup_ssqd_info(irq_ptr);
47837 -       qdio_detect_hsicq(irq_ptr);
47839         /* qebsm is now setup if available, initialize buffer states */
47840         qdio_init_buf_states(irq_ptr);
47842 @@ -1297,9 +1249,11 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
47843   * @callflags: flags
47844   * @bufnr: first buffer to process
47845   * @count: how many buffers are filled
47846 + * @aob: asynchronous operation block
47847   */
47848  static int handle_outbound(struct qdio_q *q, unsigned int callflags,
47849 -                          unsigned int bufnr, unsigned int count)
47850 +                          unsigned int bufnr, unsigned int count,
47851 +                          struct qaob *aob)
47853         const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
47854         unsigned char state = 0;
47855 @@ -1320,11 +1274,9 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
47856                 q->u.out.pci_out_enabled = 0;
47858         if (queue_type(q) == QDIO_IQDIO_QFMT) {
47859 -               unsigned long phys_aob = 0;
47861 -               if (q->u.out.use_cq && count == 1)
47862 -                       phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
47863 +               unsigned long phys_aob = aob ? virt_to_phys(aob) : 0;
47865 +               WARN_ON_ONCE(!IS_ALIGNED(phys_aob, 256));
47866                 rc = qdio_kick_outbound_q(q, count, phys_aob);
47867         } else if (need_siga_sync(q)) {
47868                 rc = qdio_siga_sync_q(q);
47869 @@ -1359,9 +1311,10 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
47870   * @q_nr: queue number
47871   * @bufnr: buffer number
47872   * @count: how many buffers to process
47873 + * @aob: asynchronous operation block (outbound only)
47874   */
47875  int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
47876 -           int q_nr, unsigned int bufnr, unsigned int count)
47877 +           int q_nr, unsigned int bufnr, unsigned int count, struct qaob *aob)
47879         struct qdio_irq *irq_ptr = cdev->private->qdio_data;
47881 @@ -1383,7 +1336,7 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
47882                                       callflags, bufnr, count);
47883         else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
47884                 return handle_outbound(irq_ptr->output_qs[q_nr],
47885 -                                      callflags, bufnr, count);
47886 +                                      callflags, bufnr, count, aob);
47887         return -EINVAL;
47889  EXPORT_SYMBOL_GPL(do_QDIO);
47890 diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
47891 index c8b9620bc688..da67e4979402 100644
47892 --- a/drivers/s390/cio/qdio_setup.c
47893 +++ b/drivers/s390/cio/qdio_setup.c
47894 @@ -30,6 +30,7 @@ struct qaob *qdio_allocate_aob(void)
47896         return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
47898 +EXPORT_SYMBOL_GPL(qdio_allocate_aob);
47900  void qdio_release_aob(struct qaob *aob)
47902 @@ -247,8 +248,6 @@ static void setup_queues(struct qdio_irq *irq_ptr,
47903                          struct qdio_initialize *qdio_init)
47905         struct qdio_q *q;
47906 -       struct qdio_outbuf_state *output_sbal_state_array =
47907 -                                 qdio_init->output_sbal_state_array;
47908         int i;
47910         for_each_input_queue(irq_ptr, q, i) {
47911 @@ -265,9 +264,6 @@ static void setup_queues(struct qdio_irq *irq_ptr,
47912                 DBF_EVENT("outq:%1d", i);
47913                 setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
47915 -               q->u.out.sbal_state = output_sbal_state_array;
47916 -               output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q;
47918                 q->is_input_q = 0;
47919                 setup_storage_lists(q, irq_ptr,
47920                                     qdio_init->output_sbal_addr_array[i], i);
47921 @@ -372,30 +368,6 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
47922         DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac);
47925 -void qdio_free_async_data(struct qdio_irq *irq_ptr)
47927 -       struct qdio_q *q;
47928 -       int i;
47930 -       for (i = 0; i < irq_ptr->max_output_qs; i++) {
47931 -               q = irq_ptr->output_qs[i];
47932 -               if (q->u.out.use_cq) {
47933 -                       unsigned int n;
47935 -                       for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; n++) {
47936 -                               struct qaob *aob = q->u.out.aobs[n];
47938 -                               if (aob) {
47939 -                                       qdio_release_aob(aob);
47940 -                                       q->u.out.aobs[n] = NULL;
47941 -                               }
47942 -                       }
47944 -                       qdio_disable_async_operation(&q->u.out);
47945 -               }
47946 -       }
47949  static void qdio_fill_qdr_desc(struct qdesfmt0 *desc, struct qdio_q *queue)
47951         desc->sliba = virt_to_phys(queue->slib);
47952 @@ -545,25 +517,6 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr)
47953         printk(KERN_INFO "%s", s);
47956 -int qdio_enable_async_operation(struct qdio_output_q *outq)
47958 -       outq->aobs = kcalloc(QDIO_MAX_BUFFERS_PER_Q, sizeof(struct qaob *),
47959 -                            GFP_KERNEL);
47960 -       if (!outq->aobs) {
47961 -               outq->use_cq = 0;
47962 -               return -ENOMEM;
47963 -       }
47964 -       outq->use_cq = 1;
47965 -       return 0;
47968 -void qdio_disable_async_operation(struct qdio_output_q *q)
47970 -       kfree(q->aobs);
47971 -       q->aobs = NULL;
47972 -       q->use_cq = 0;
47975  int __init qdio_setup_init(void)
47977         int rc;
47978 diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
47979 index 1ffdd411201c..6946a7e26eff 100644
47980 --- a/drivers/s390/crypto/vfio_ap_ops.c
47981 +++ b/drivers/s390/crypto/vfio_ap_ops.c
47982 @@ -294,6 +294,19 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
47983         matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook,
47984                                    struct ap_matrix_mdev, pqap_hook);
47986 +       /*
47987 +        * If the KVM pointer is in the process of being set, wait until the
47988 +        * process has completed.
47989 +        */
47990 +       wait_event_cmd(matrix_mdev->wait_for_kvm,
47991 +                      !matrix_mdev->kvm_busy,
47992 +                      mutex_unlock(&matrix_dev->lock),
47993 +                      mutex_lock(&matrix_dev->lock));
47995 +       /* If the there is no guest using the mdev, there is nothing to do */
47996 +       if (!matrix_mdev->kvm)
47997 +               goto out_unlock;
47999         q = vfio_ap_get_queue(matrix_mdev, apqn);
48000         if (!q)
48001                 goto out_unlock;
48002 @@ -337,6 +350,7 @@ static int vfio_ap_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
48004         matrix_mdev->mdev = mdev;
48005         vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
48006 +       init_waitqueue_head(&matrix_mdev->wait_for_kvm);
48007         mdev_set_drvdata(mdev, matrix_mdev);
48008         matrix_mdev->pqap_hook.hook = handle_pqap;
48009         matrix_mdev->pqap_hook.owner = THIS_MODULE;
48010 @@ -351,17 +365,23 @@ static int vfio_ap_mdev_remove(struct mdev_device *mdev)
48012         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
48014 -       if (matrix_mdev->kvm)
48015 +       mutex_lock(&matrix_dev->lock);
48017 +       /*
48018 +        * If the KVM pointer is in flux or the guest is running, disallow
48019 +        * un-assignment of control domain.
48020 +        */
48021 +       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
48022 +               mutex_unlock(&matrix_dev->lock);
48023                 return -EBUSY;
48024 +       }
48026 -       mutex_lock(&matrix_dev->lock);
48027         vfio_ap_mdev_reset_queues(mdev);
48028         list_del(&matrix_mdev->node);
48029 -       mutex_unlock(&matrix_dev->lock);
48031         kfree(matrix_mdev);
48032         mdev_set_drvdata(mdev, NULL);
48033         atomic_inc(&matrix_dev->available_instances);
48034 +       mutex_unlock(&matrix_dev->lock);
48036         return 0;
48038 @@ -606,24 +626,31 @@ static ssize_t assign_adapter_store(struct device *dev,
48039         struct mdev_device *mdev = mdev_from_dev(dev);
48040         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
48042 -       /* If the guest is running, disallow assignment of adapter */
48043 -       if (matrix_mdev->kvm)
48044 -               return -EBUSY;
48045 +       mutex_lock(&matrix_dev->lock);
48047 +       /*
48048 +        * If the KVM pointer is in flux or the guest is running, disallow
48049 +        * un-assignment of adapter
48050 +        */
48051 +       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
48052 +               ret = -EBUSY;
48053 +               goto done;
48054 +       }
48056         ret = kstrtoul(buf, 0, &apid);
48057         if (ret)
48058 -               return ret;
48059 +               goto done;
48061 -       if (apid > matrix_mdev->matrix.apm_max)
48062 -               return -ENODEV;
48063 +       if (apid > matrix_mdev->matrix.apm_max) {
48064 +               ret = -ENODEV;
48065 +               goto done;
48066 +       }
48068         /*
48069          * Set the bit in the AP mask (APM) corresponding to the AP adapter
48070          * number (APID). The bits in the mask, from most significant to least
48071          * significant bit, correspond to APIDs 0-255.
48072          */
48073 -       mutex_lock(&matrix_dev->lock);
48075         ret = vfio_ap_mdev_verify_queues_reserved_for_apid(matrix_mdev, apid);
48076         if (ret)
48077                 goto done;
48078 @@ -672,22 +699,31 @@ static ssize_t unassign_adapter_store(struct device *dev,
48079         struct mdev_device *mdev = mdev_from_dev(dev);
48080         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
48082 -       /* If the guest is running, disallow un-assignment of adapter */
48083 -       if (matrix_mdev->kvm)
48084 -               return -EBUSY;
48085 +       mutex_lock(&matrix_dev->lock);
48087 +       /*
48088 +        * If the KVM pointer is in flux or the guest is running, disallow
48089 +        * un-assignment of adapter
48090 +        */
48091 +       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
48092 +               ret = -EBUSY;
48093 +               goto done;
48094 +       }
48096         ret = kstrtoul(buf, 0, &apid);
48097         if (ret)
48098 -               return ret;
48099 +               goto done;
48101 -       if (apid > matrix_mdev->matrix.apm_max)
48102 -               return -ENODEV;
48103 +       if (apid > matrix_mdev->matrix.apm_max) {
48104 +               ret = -ENODEV;
48105 +               goto done;
48106 +       }
48108 -       mutex_lock(&matrix_dev->lock);
48109         clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
48110 +       ret = count;
48111 +done:
48112         mutex_unlock(&matrix_dev->lock);
48114 -       return count;
48115 +       return ret;
48117  static DEVICE_ATTR_WO(unassign_adapter);
48119 @@ -753,17 +789,24 @@ static ssize_t assign_domain_store(struct device *dev,
48120         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
48121         unsigned long max_apqi = matrix_mdev->matrix.aqm_max;
48123 -       /* If the guest is running, disallow assignment of domain */
48124 -       if (matrix_mdev->kvm)
48125 -               return -EBUSY;
48126 +       mutex_lock(&matrix_dev->lock);
48128 +       /*
48129 +        * If the KVM pointer is in flux or the guest is running, disallow
48130 +        * assignment of domain
48131 +        */
48132 +       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
48133 +               ret = -EBUSY;
48134 +               goto done;
48135 +       }
48137         ret = kstrtoul(buf, 0, &apqi);
48138         if (ret)
48139 -               return ret;
48140 -       if (apqi > max_apqi)
48141 -               return -ENODEV;
48143 -       mutex_lock(&matrix_dev->lock);
48144 +               goto done;
48145 +       if (apqi > max_apqi) {
48146 +               ret = -ENODEV;
48147 +               goto done;
48148 +       }
48150         ret = vfio_ap_mdev_verify_queues_reserved_for_apqi(matrix_mdev, apqi);
48151         if (ret)
48152 @@ -814,22 +857,32 @@ static ssize_t unassign_domain_store(struct device *dev,
48153         struct mdev_device *mdev = mdev_from_dev(dev);
48154         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
48156 -       /* If the guest is running, disallow un-assignment of domain */
48157 -       if (matrix_mdev->kvm)
48158 -               return -EBUSY;
48159 +       mutex_lock(&matrix_dev->lock);
48161 +       /*
48162 +        * If the KVM pointer is in flux or the guest is running, disallow
48163 +        * un-assignment of domain
48164 +        */
48165 +       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
48166 +               ret = -EBUSY;
48167 +               goto done;
48168 +       }
48170         ret = kstrtoul(buf, 0, &apqi);
48171         if (ret)
48172 -               return ret;
48173 +               goto done;
48175 -       if (apqi > matrix_mdev->matrix.aqm_max)
48176 -               return -ENODEV;
48177 +       if (apqi > matrix_mdev->matrix.aqm_max) {
48178 +               ret = -ENODEV;
48179 +               goto done;
48180 +       }
48182 -       mutex_lock(&matrix_dev->lock);
48183         clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm);
48184 -       mutex_unlock(&matrix_dev->lock);
48185 +       ret = count;
48187 -       return count;
48188 +done:
48189 +       mutex_unlock(&matrix_dev->lock);
48190 +       return ret;
48192  static DEVICE_ATTR_WO(unassign_domain);
48194 @@ -858,27 +911,36 @@ static ssize_t assign_control_domain_store(struct device *dev,
48195         struct mdev_device *mdev = mdev_from_dev(dev);
48196         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
48198 -       /* If the guest is running, disallow assignment of control domain */
48199 -       if (matrix_mdev->kvm)
48200 -               return -EBUSY;
48201 +       mutex_lock(&matrix_dev->lock);
48203 +       /*
48204 +        * If the KVM pointer is in flux or the guest is running, disallow
48205 +        * assignment of control domain.
48206 +        */
48207 +       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
48208 +               ret = -EBUSY;
48209 +               goto done;
48210 +       }
48212         ret = kstrtoul(buf, 0, &id);
48213         if (ret)
48214 -               return ret;
48215 +               goto done;
48217 -       if (id > matrix_mdev->matrix.adm_max)
48218 -               return -ENODEV;
48219 +       if (id > matrix_mdev->matrix.adm_max) {
48220 +               ret = -ENODEV;
48221 +               goto done;
48222 +       }
48224         /* Set the bit in the ADM (bitmask) corresponding to the AP control
48225          * domain number (id). The bits in the mask, from most significant to
48226          * least significant, correspond to IDs 0 up to the one less than the
48227          * number of control domains that can be assigned.
48228          */
48229 -       mutex_lock(&matrix_dev->lock);
48230         set_bit_inv(id, matrix_mdev->matrix.adm);
48231 +       ret = count;
48232 +done:
48233         mutex_unlock(&matrix_dev->lock);
48235 -       return count;
48236 +       return ret;
48238  static DEVICE_ATTR_WO(assign_control_domain);
48240 @@ -908,21 +970,30 @@ static ssize_t unassign_control_domain_store(struct device *dev,
48241         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
48242         unsigned long max_domid =  matrix_mdev->matrix.adm_max;
48244 -       /* If the guest is running, disallow un-assignment of control domain */
48245 -       if (matrix_mdev->kvm)
48246 -               return -EBUSY;
48247 +       mutex_lock(&matrix_dev->lock);
48249 +       /*
48250 +        * If the KVM pointer is in flux or the guest is running, disallow
48251 +        * un-assignment of control domain.
48252 +        */
48253 +       if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
48254 +               ret = -EBUSY;
48255 +               goto done;
48256 +       }
48258         ret = kstrtoul(buf, 0, &domid);
48259         if (ret)
48260 -               return ret;
48261 -       if (domid > max_domid)
48262 -               return -ENODEV;
48263 +               goto done;
48264 +       if (domid > max_domid) {
48265 +               ret = -ENODEV;
48266 +               goto done;
48267 +       }
48269 -       mutex_lock(&matrix_dev->lock);
48270         clear_bit_inv(domid, matrix_mdev->matrix.adm);
48271 +       ret = count;
48272 +done:
48273         mutex_unlock(&matrix_dev->lock);
48275 -       return count;
48276 +       return ret;
48278  static DEVICE_ATTR_WO(unassign_control_domain);
48280 @@ -1027,8 +1098,15 @@ static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
48281   * @matrix_mdev: a mediated matrix device
48282   * @kvm: reference to KVM instance
48283   *
48284 - * Verifies no other mediated matrix device has @kvm and sets a reference to
48285 - * it in @matrix_mdev->kvm.
48286 + * Sets all data for @matrix_mdev that are needed to manage AP resources
48287 + * for the guest whose state is represented by @kvm.
48288 + *
48289 + * Note: The matrix_dev->lock must be taken prior to calling
48290 + * this function; however, the lock will be temporarily released while the
48291 + * guest's AP configuration is set to avoid a potential lockdep splat.
48292 + * The kvm->lock is taken to set the guest's AP configuration which, under
48293 + * certain circumstances, will result in a circular lock dependency if this is
48294 + * done under the @matrix_mdev->lock.
48295   *
48296   * Return 0 if no other mediated matrix device has a reference to @kvm;
48297   * otherwise, returns an -EPERM.
48298 @@ -1038,14 +1116,25 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
48300         struct ap_matrix_mdev *m;
48302 -       list_for_each_entry(m, &matrix_dev->mdev_list, node) {
48303 -               if ((m != matrix_mdev) && (m->kvm == kvm))
48304 -                       return -EPERM;
48305 -       }
48306 +       if (kvm->arch.crypto.crycbd) {
48307 +               list_for_each_entry(m, &matrix_dev->mdev_list, node) {
48308 +                       if (m != matrix_mdev && m->kvm == kvm)
48309 +                               return -EPERM;
48310 +               }
48312 -       matrix_mdev->kvm = kvm;
48313 -       kvm_get_kvm(kvm);
48314 -       kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
48315 +               kvm_get_kvm(kvm);
48316 +               matrix_mdev->kvm_busy = true;
48317 +               mutex_unlock(&matrix_dev->lock);
48318 +               kvm_arch_crypto_set_masks(kvm,
48319 +                                         matrix_mdev->matrix.apm,
48320 +                                         matrix_mdev->matrix.aqm,
48321 +                                         matrix_mdev->matrix.adm);
48322 +               mutex_lock(&matrix_dev->lock);
48323 +               kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
48324 +               matrix_mdev->kvm = kvm;
48325 +               matrix_mdev->kvm_busy = false;
48326 +               wake_up_all(&matrix_mdev->wait_for_kvm);
48327 +       }
48329         return 0;
48331 @@ -1079,51 +1168,65 @@ static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
48332         return NOTIFY_DONE;
48335 +/**
48336 + * vfio_ap_mdev_unset_kvm
48337 + *
48338 + * @matrix_mdev: a matrix mediated device
48339 + *
48340 + * Performs clean-up of resources no longer needed by @matrix_mdev.
48341 + *
48342 + * Note: The matrix_dev->lock must be taken prior to calling
48343 + * this function; however, the lock will be temporarily released while the
48344 + * guest's AP configuration is cleared to avoid a potential lockdep splat.
48345 + * The kvm->lock is taken to clear the guest's AP configuration which, under
48346 + * certain circumstances, will result in a circular lock dependency if this is
48347 + * done under the @matrix_mdev->lock.
48348 + *
48349 + */
48350  static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
48352 -       kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
48353 -       matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
48354 -       vfio_ap_mdev_reset_queues(matrix_mdev->mdev);
48355 -       kvm_put_kvm(matrix_mdev->kvm);
48356 -       matrix_mdev->kvm = NULL;
48357 +       /*
48358 +        * If the KVM pointer is in the process of being set, wait until the
48359 +        * process has completed.
48360 +        */
48361 +       wait_event_cmd(matrix_mdev->wait_for_kvm,
48362 +                      !matrix_mdev->kvm_busy,
48363 +                      mutex_unlock(&matrix_dev->lock),
48364 +                      mutex_lock(&matrix_dev->lock));
48366 +       if (matrix_mdev->kvm) {
48367 +               matrix_mdev->kvm_busy = true;
48368 +               mutex_unlock(&matrix_dev->lock);
48369 +               kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
48370 +               mutex_lock(&matrix_dev->lock);
48371 +               vfio_ap_mdev_reset_queues(matrix_mdev->mdev);
48372 +               matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
48373 +               kvm_put_kvm(matrix_mdev->kvm);
48374 +               matrix_mdev->kvm = NULL;
48375 +               matrix_mdev->kvm_busy = false;
48376 +               wake_up_all(&matrix_mdev->wait_for_kvm);
48377 +       }
48380  static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
48381                                        unsigned long action, void *data)
48383 -       int ret, notify_rc = NOTIFY_OK;
48384 +       int notify_rc = NOTIFY_OK;
48385         struct ap_matrix_mdev *matrix_mdev;
48387         if (action != VFIO_GROUP_NOTIFY_SET_KVM)
48388                 return NOTIFY_OK;
48390 -       matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
48391         mutex_lock(&matrix_dev->lock);
48392 +       matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
48394 -       if (!data) {
48395 -               if (matrix_mdev->kvm)
48396 -                       vfio_ap_mdev_unset_kvm(matrix_mdev);
48397 -               goto notify_done;
48398 -       }
48400 -       ret = vfio_ap_mdev_set_kvm(matrix_mdev, data);
48401 -       if (ret) {
48402 -               notify_rc = NOTIFY_DONE;
48403 -               goto notify_done;
48404 -       }
48406 -       /* If there is no CRYCB pointer, then we can't copy the masks */
48407 -       if (!matrix_mdev->kvm->arch.crypto.crycbd) {
48408 +       if (!data)
48409 +               vfio_ap_mdev_unset_kvm(matrix_mdev);
48410 +       else if (vfio_ap_mdev_set_kvm(matrix_mdev, data))
48411                 notify_rc = NOTIFY_DONE;
48412 -               goto notify_done;
48413 -       }
48415 -       kvm_arch_crypto_set_masks(matrix_mdev->kvm, matrix_mdev->matrix.apm,
48416 -                                 matrix_mdev->matrix.aqm,
48417 -                                 matrix_mdev->matrix.adm);
48419 -notify_done:
48420         mutex_unlock(&matrix_dev->lock);
48422         return notify_rc;
48425 @@ -1258,8 +1361,7 @@ static void vfio_ap_mdev_release(struct mdev_device *mdev)
48426         struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
48428         mutex_lock(&matrix_dev->lock);
48429 -       if (matrix_mdev->kvm)
48430 -               vfio_ap_mdev_unset_kvm(matrix_mdev);
48431 +       vfio_ap_mdev_unset_kvm(matrix_mdev);
48432         mutex_unlock(&matrix_dev->lock);
48434         vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
48435 @@ -1293,6 +1395,7 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
48436                                     unsigned int cmd, unsigned long arg)
48438         int ret;
48439 +       struct ap_matrix_mdev *matrix_mdev;
48441         mutex_lock(&matrix_dev->lock);
48442         switch (cmd) {
48443 @@ -1300,6 +1403,21 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
48444                 ret = vfio_ap_mdev_get_device_info(arg);
48445                 break;
48446         case VFIO_DEVICE_RESET:
48447 +               matrix_mdev = mdev_get_drvdata(mdev);
48448 +               if (WARN(!matrix_mdev, "Driver data missing from mdev!!")) {
48449 +                       ret = -EINVAL;
48450 +                       break;
48451 +               }
48453 +               /*
48454 +                * If the KVM pointer is in the process of being set, wait until
48455 +                * the process has completed.
48456 +                */
48457 +               wait_event_cmd(matrix_mdev->wait_for_kvm,
48458 +                              !matrix_mdev->kvm_busy,
48459 +                              mutex_unlock(&matrix_dev->lock),
48460 +                              mutex_lock(&matrix_dev->lock));
48462                 ret = vfio_ap_mdev_reset_queues(mdev);
48463                 break;
48464         default:
48465 diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
48466 index 28e9d9989768..f82a6396acae 100644
48467 --- a/drivers/s390/crypto/vfio_ap_private.h
48468 +++ b/drivers/s390/crypto/vfio_ap_private.h
48469 @@ -83,6 +83,8 @@ struct ap_matrix_mdev {
48470         struct ap_matrix matrix;
48471         struct notifier_block group_notifier;
48472         struct notifier_block iommu_notifier;
48473 +       bool kvm_busy;
48474 +       wait_queue_head_t wait_for_kvm;
48475         struct kvm *kvm;
48476         struct kvm_s390_module_hook pqap_hook;
48477         struct mdev_device *mdev;
48478 diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
48479 index 33b23884b133..09fe6bb8880b 100644
48480 --- a/drivers/s390/crypto/zcrypt_card.c
48481 +++ b/drivers/s390/crypto/zcrypt_card.c
48482 @@ -192,5 +192,6 @@ void zcrypt_card_unregister(struct zcrypt_card *zc)
48483         spin_unlock(&zcrypt_list_lock);
48484         sysfs_remove_group(&zc->card->ap_dev.device.kobj,
48485                            &zcrypt_card_attr_group);
48486 +       zcrypt_card_put(zc);
48488  EXPORT_SYMBOL(zcrypt_card_unregister);
48489 diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
48490 index 5062eae73d4a..c3ffbd26b73f 100644
48491 --- a/drivers/s390/crypto/zcrypt_queue.c
48492 +++ b/drivers/s390/crypto/zcrypt_queue.c
48493 @@ -223,5 +223,6 @@ void zcrypt_queue_unregister(struct zcrypt_queue *zq)
48494         sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
48495                            &zcrypt_queue_attr_group);
48496         zcrypt_card_put(zc);
48497 +       zcrypt_queue_put(zq);
48499  EXPORT_SYMBOL(zcrypt_queue_unregister);
48500 diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
48501 index 91acff493612..fd9b869d278e 100644
48502 --- a/drivers/s390/net/qeth_core.h
48503 +++ b/drivers/s390/net/qeth_core.h
48504 @@ -437,6 +437,7 @@ struct qeth_qdio_out_buffer {
48506         struct qeth_qdio_out_q *q;
48507         struct list_head list_entry;
48508 +       struct qaob *aob;
48509  };
48511  struct qeth_card;
48512 @@ -499,7 +500,6 @@ struct qeth_out_q_stats {
48513  struct qeth_qdio_out_q {
48514         struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
48515         struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
48516 -       struct qdio_outbuf_state *bufstates; /* convenience pointer */
48517         struct list_head pending_bufs;
48518         struct qeth_out_q_stats stats;
48519         spinlock_t lock;
48520 @@ -563,7 +563,6 @@ struct qeth_qdio_info {
48521         /* output */
48522         unsigned int no_out_queues;
48523         struct qeth_qdio_out_q *out_qs[QETH_MAX_OUT_QUEUES];
48524 -       struct qdio_outbuf_state *out_bufstates;
48526         /* priority queueing */
48527         int do_prio_queueing;
48528 diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
48529 index a814698387bc..175b82b98f36 100644
48530 --- a/drivers/s390/net/qeth_core_main.c
48531 +++ b/drivers/s390/net/qeth_core_main.c
48532 @@ -369,8 +369,7 @@ static int qeth_cq_init(struct qeth_card *card)
48533                                    QDIO_MAX_BUFFERS_PER_Q);
48534                 card->qdio.c_q->next_buf_to_init = 127;
48535                 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
48536 -                            card->qdio.no_in_queues - 1, 0,
48537 -                            127);
48538 +                            card->qdio.no_in_queues - 1, 0, 127, NULL);
48539                 if (rc) {
48540                         QETH_CARD_TEXT_(card, 2, "1err%d", rc);
48541                         goto out;
48542 @@ -383,48 +382,22 @@ static int qeth_cq_init(struct qeth_card *card)
48544  static int qeth_alloc_cq(struct qeth_card *card)
48546 -       int rc;
48548         if (card->options.cq == QETH_CQ_ENABLED) {
48549 -               int i;
48550 -               struct qdio_outbuf_state *outbuf_states;
48552                 QETH_CARD_TEXT(card, 2, "cqon");
48553                 card->qdio.c_q = qeth_alloc_qdio_queue();
48554                 if (!card->qdio.c_q) {
48555 -                       rc = -1;
48556 -                       goto kmsg_out;
48557 +                       dev_err(&card->gdev->dev, "Failed to create completion queue\n");
48558 +                       return -ENOMEM;
48559                 }
48561                 card->qdio.no_in_queues = 2;
48562 -               card->qdio.out_bufstates =
48563 -                       kcalloc(card->qdio.no_out_queues *
48564 -                                       QDIO_MAX_BUFFERS_PER_Q,
48565 -                               sizeof(struct qdio_outbuf_state),
48566 -                               GFP_KERNEL);
48567 -               outbuf_states = card->qdio.out_bufstates;
48568 -               if (outbuf_states == NULL) {
48569 -                       rc = -1;
48570 -                       goto free_cq_out;
48571 -               }
48572 -               for (i = 0; i < card->qdio.no_out_queues; ++i) {
48573 -                       card->qdio.out_qs[i]->bufstates = outbuf_states;
48574 -                       outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
48575 -               }
48576         } else {
48577                 QETH_CARD_TEXT(card, 2, "nocq");
48578                 card->qdio.c_q = NULL;
48579                 card->qdio.no_in_queues = 1;
48580         }
48581         QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues);
48582 -       rc = 0;
48583 -out:
48584 -       return rc;
48585 -free_cq_out:
48586 -       qeth_free_qdio_queue(card->qdio.c_q);
48587 -       card->qdio.c_q = NULL;
48588 -kmsg_out:
48589 -       dev_err(&card->gdev->dev, "Failed to create completion queue\n");
48590 -       goto out;
48591 +       return 0;
48594  static void qeth_free_cq(struct qeth_card *card)
48595 @@ -434,8 +407,6 @@ static void qeth_free_cq(struct qeth_card *card)
48596                 qeth_free_qdio_queue(card->qdio.c_q);
48597                 card->qdio.c_q = NULL;
48598         }
48599 -       kfree(card->qdio.out_bufstates);
48600 -       card->qdio.out_bufstates = NULL;
48603  static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
48604 @@ -487,12 +458,12 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
48605         switch (atomic_xchg(&buffer->state, new_state)) {
48606         case QETH_QDIO_BUF_PRIMED:
48607                 /* Faster than TX completion code, let it handle the async
48608 -                * completion for us.
48609 +                * completion for us. It will also recycle the QAOB.
48610                  */
48611                 break;
48612         case QETH_QDIO_BUF_PENDING:
48613                 /* TX completion code is active and will handle the async
48614 -                * completion for us.
48615 +                * completion for us. It will also recycle the QAOB.
48616                  */
48617                 break;
48618         case QETH_QDIO_BUF_NEED_QAOB:
48619 @@ -501,7 +472,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
48620                 qeth_notify_skbs(buffer->q, buffer, notification);
48622                 /* Free dangling allocations. The attached skbs are handled by
48623 -                * qeth_tx_complete_pending_bufs().
48624 +                * qeth_tx_complete_pending_bufs(), and so is the QAOB.
48625                  */
48626                 for (i = 0;
48627                      i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
48628 @@ -520,8 +491,6 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
48629         default:
48630                 WARN_ON_ONCE(1);
48631         }
48633 -       qdio_release_aob(aob);
48636  static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
48637 @@ -1451,6 +1420,13 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
48638         atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
48641 +static void qeth_free_out_buf(struct qeth_qdio_out_buffer *buf)
48643 +       if (buf->aob)
48644 +               qdio_release_aob(buf->aob);
48645 +       kmem_cache_free(qeth_qdio_outbuf_cache, buf);
48648  static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
48649                                           struct qeth_qdio_out_q *queue,
48650                                           bool drain)
48651 @@ -1468,7 +1444,7 @@ static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
48652                         qeth_tx_complete_buf(buf, drain, 0);
48654                         list_del(&buf->list_entry);
48655 -                       kmem_cache_free(qeth_qdio_outbuf_cache, buf);
48656 +                       qeth_free_out_buf(buf);
48657                 }
48658         }
48660 @@ -1485,7 +1461,7 @@ static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
48662                 qeth_clear_output_buffer(q, q->bufs[j], true, 0);
48663                 if (free) {
48664 -                       kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
48665 +                       qeth_free_out_buf(q->bufs[j]);
48666                         q->bufs[j] = NULL;
48667                 }
48668         }
48669 @@ -2637,7 +2613,7 @@ static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
48671  err_out_bufs:
48672         while (i > 0)
48673 -               kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[--i]);
48674 +               qeth_free_out_buf(q->bufs[--i]);
48675         qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
48676  err_qdio_bufs:
48677         kfree(q);
48678 @@ -3024,7 +3000,8 @@ static int qeth_init_qdio_queues(struct qeth_card *card)
48679         }
48681         card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs);
48682 -       rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs);
48683 +       rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs,
48684 +                    NULL);
48685         if (rc) {
48686                 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
48687                 return rc;
48688 @@ -3516,7 +3493,7 @@ static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
48689                 }
48691                 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
48692 -                            queue->next_buf_to_init, count);
48693 +                            queue->next_buf_to_init, count, NULL);
48694                 if (rc) {
48695                         QETH_CARD_TEXT(card, 2, "qinberr");
48696                 }
48697 @@ -3625,6 +3602,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
48698         struct qeth_qdio_out_buffer *buf = queue->bufs[index];
48699         unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
48700         struct qeth_card *card = queue->card;
48701 +       struct qaob *aob = NULL;
48702         int rc;
48703         int i;
48705 @@ -3637,16 +3615,24 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
48706                                 SBAL_EFLAGS_LAST_ENTRY;
48707                 queue->coalesced_frames += buf->frames;
48709 -               if (queue->bufstates)
48710 -                       queue->bufstates[bidx].user = buf;
48712                 if (IS_IQD(card)) {
48713                         skb_queue_walk(&buf->skb_list, skb)
48714                                 skb_tx_timestamp(skb);
48715                 }
48716         }
48718 -       if (!IS_IQD(card)) {
48719 +       if (IS_IQD(card)) {
48720 +               if (card->options.cq == QETH_CQ_ENABLED &&
48721 +                   !qeth_iqd_is_mcast_queue(card, queue) &&
48722 +                   count == 1) {
48723 +                       if (!buf->aob)
48724 +                               buf->aob = qdio_allocate_aob();
48725 +                       if (buf->aob) {
48726 +                               aob = buf->aob;
48727 +                               aob->user1 = (u64) buf;
48728 +                       }
48729 +               }
48730 +       } else {
48731                 if (!queue->do_pack) {
48732                         if ((atomic_read(&queue->used_buffers) >=
48733                                 (QETH_HIGH_WATERMARK_PACK -
48734 @@ -3677,8 +3663,8 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
48735         }
48737         QETH_TXQ_STAT_INC(queue, doorbell);
48738 -       rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
48739 -                    queue->queue_no, index, count);
48740 +       rc = do_QDIO(CARD_DDEV(card), qdio_flags, queue->queue_no, index, count,
48741 +                    aob);
48743         switch (rc) {
48744         case 0:
48745 @@ -3814,8 +3800,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
48746                 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
48747         }
48748         rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
48749 -                   card->qdio.c_q->next_buf_to_init,
48750 -                   count);
48751 +                    cq->next_buf_to_init, count, NULL);
48752         if (rc) {
48753                 dev_warn(&card->gdev->dev,
48754                         "QDIO reported an error, rc=%i\n", rc);
48755 @@ -5270,7 +5255,6 @@ static int qeth_qdio_establish(struct qeth_card *card)
48756         init_data.int_parm               = (unsigned long) card;
48757         init_data.input_sbal_addr_array  = in_sbal_ptrs;
48758         init_data.output_sbal_addr_array = out_sbal_ptrs;
48759 -       init_data.output_sbal_state_array = card->qdio.out_bufstates;
48760         init_data.scan_threshold         = IS_IQD(card) ? 0 : 32;
48762         if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
48763 @@ -6069,7 +6053,15 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
48764         bool error = !!qdio_error;
48766         if (qdio_error == QDIO_ERROR_SLSB_PENDING) {
48767 -               WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
48768 +               struct qaob *aob = buffer->aob;
48770 +               if (!aob) {
48771 +                       netdev_WARN_ONCE(card->dev,
48772 +                                        "Pending TX buffer %#x without QAOB on TX queue %u\n",
48773 +                                        bidx, queue->queue_no);
48774 +                       qeth_schedule_recovery(card);
48775 +                       return;
48776 +               }
48778                 QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
48780 @@ -6125,6 +6117,8 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
48781                 default:
48782                         WARN_ON_ONCE(1);
48783                 }
48785 +               memset(aob, 0, sizeof(*aob));
48786         } else if (card->options.cq == QETH_CQ_ENABLED) {
48787                 qeth_notify_skbs(queue, buffer,
48788                                  qeth_compute_cq_notification(sflags, 0));
48789 diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
48790 index 23ab16d65f2a..049596cbfb5d 100644
48791 --- a/drivers/s390/scsi/zfcp_qdio.c
48792 +++ b/drivers/s390/scsi/zfcp_qdio.c
48793 @@ -128,7 +128,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
48794         /*
48795          * put SBALs back to response queue
48796          */
48797 -       if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count))
48798 +       if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count, NULL))
48799                 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
48802 @@ -298,7 +298,7 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
48803         atomic_sub(sbal_number, &qdio->req_q_free);
48805         retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
48806 -                        q_req->sbal_first, sbal_number);
48807 +                        q_req->sbal_first, sbal_number, NULL);
48809         if (unlikely(retval)) {
48810                 /* Failed to submit the IO, roll back our modifications. */
48811 @@ -463,7 +463,8 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
48812                 sbale->addr = 0;
48813         }
48815 -       if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
48816 +       if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q,
48817 +                   NULL))
48818                 goto failed_qdio;
48820         /* set index of first available SBALS / number of available SBALS */
48821 diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
48822 index ea436a14087f..5eff3368143d 100644
48823 --- a/drivers/scsi/device_handler/scsi_dh_alua.c
48824 +++ b/drivers/scsi/device_handler/scsi_dh_alua.c
48825 @@ -573,10 +573,11 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
48826                  * even though it shouldn't according to T10.
48827                  * The retry without rtpg_ext_hdr_req set
48828                  * handles this.
48829 +                * Note:  some arrays return a sense key of ILLEGAL_REQUEST
48830 +                * with ASC 00h if they don't support the extended header.
48831                  */
48832                 if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) &&
48833 -                   sense_hdr.sense_key == ILLEGAL_REQUEST &&
48834 -                   sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) {
48835 +                   sense_hdr.sense_key == ILLEGAL_REQUEST) {
48836                         pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP;
48837                         goto retry;
48838                 }
48839 diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
48840 index 36744968378f..09e49e21deb6 100644
48841 --- a/drivers/scsi/fnic/fnic_scsi.c
48842 +++ b/drivers/scsi/fnic/fnic_scsi.c
48843 @@ -217,7 +217,7 @@ int fnic_fw_reset_handler(struct fnic *fnic)
48845         /* wait for io cmpl */
48846         while (atomic_read(&fnic->in_flight))
48847 -               schedule_timeout(msecs_to_jiffies(1));
48848 +               schedule_msec_hrtimeout((1));
48850         spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
48852 @@ -2277,7 +2277,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
48853                 }
48854         }
48856 -       schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
48857 +       schedule_msec_hrtimeout((2 * fnic->config.ed_tov));
48859         /* walk again to check, if IOs are still pending in fw */
48860         if (fnic_is_abts_pending(fnic, lr_sc))
48861 diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
48862 index 7451377c4cb6..3e359ac752fd 100644
48863 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
48864 +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
48865 @@ -1646,7 +1646,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
48866                 idx = i * HISI_SAS_PHY_INT_NR;
48867                 for (j = 0; j < HISI_SAS_PHY_INT_NR; j++, idx++) {
48868                         irq = platform_get_irq(pdev, idx);
48869 -                       if (!irq) {
48870 +                       if (irq < 0) {
48871                                 dev_err(dev, "irq init: fail map phy interrupt %d\n",
48872                                         idx);
48873                                 return -ENOENT;
48874 @@ -1665,7 +1665,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
48875         idx = hisi_hba->n_phy * HISI_SAS_PHY_INT_NR;
48876         for (i = 0; i < hisi_hba->queue_count; i++, idx++) {
48877                 irq = platform_get_irq(pdev, idx);
48878 -               if (!irq) {
48879 +               if (irq < 0) {
48880                         dev_err(dev, "irq init: could not map cq interrupt %d\n",
48881                                 idx);
48882                         return -ENOENT;
48883 @@ -1683,7 +1683,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
48884         idx = (hisi_hba->n_phy * HISI_SAS_PHY_INT_NR) + hisi_hba->queue_count;
48885         for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++, idx++) {
48886                 irq = platform_get_irq(pdev, idx);
48887 -               if (!irq) {
48888 +               if (irq < 0) {
48889                         dev_err(dev, "irq init: could not map fatal interrupt %d\n",
48890                                 idx);
48891                         return -ENOENT;
48892 diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
48893 index 61831f2fdb30..d6675a25719d 100644
48894 --- a/drivers/scsi/ibmvscsi/ibmvfc.c
48895 +++ b/drivers/scsi/ibmvscsi/ibmvfc.c
48896 @@ -603,8 +603,17 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
48897                 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
48898                         vhost->action = action;
48899                 break;
48900 +       case IBMVFC_HOST_ACTION_REENABLE:
48901 +       case IBMVFC_HOST_ACTION_RESET:
48902 +               vhost->action = action;
48903 +               break;
48904         case IBMVFC_HOST_ACTION_INIT:
48905         case IBMVFC_HOST_ACTION_TGT_DEL:
48906 +       case IBMVFC_HOST_ACTION_LOGO:
48907 +       case IBMVFC_HOST_ACTION_QUERY_TGTS:
48908 +       case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
48909 +       case IBMVFC_HOST_ACTION_NONE:
48910 +       default:
48911                 switch (vhost->action) {
48912                 case IBMVFC_HOST_ACTION_RESET:
48913                 case IBMVFC_HOST_ACTION_REENABLE:
48914 @@ -614,15 +623,6 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
48915                         break;
48916                 }
48917                 break;
48918 -       case IBMVFC_HOST_ACTION_LOGO:
48919 -       case IBMVFC_HOST_ACTION_QUERY_TGTS:
48920 -       case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
48921 -       case IBMVFC_HOST_ACTION_NONE:
48922 -       case IBMVFC_HOST_ACTION_RESET:
48923 -       case IBMVFC_HOST_ACTION_REENABLE:
48924 -       default:
48925 -               vhost->action = action;
48926 -               break;
48927         }
48930 @@ -5373,30 +5373,49 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
48931         case IBMVFC_HOST_ACTION_INIT_WAIT:
48932                 break;
48933         case IBMVFC_HOST_ACTION_RESET:
48934 -               vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
48935                 list_splice_init(&vhost->purge, &purge);
48936                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
48937                 ibmvfc_complete_purge(&purge);
48938                 rc = ibmvfc_reset_crq(vhost);
48940                 spin_lock_irqsave(vhost->host->host_lock, flags);
48941 -               if (rc == H_CLOSED)
48942 +               if (!rc || rc == H_CLOSED)
48943                         vio_enable_interrupts(to_vio_dev(vhost->dev));
48944 -               if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
48945 -                   (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
48946 -                       ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
48947 -                       dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
48948 +               if (vhost->action == IBMVFC_HOST_ACTION_RESET) {
48949 +                       /*
48950 +                        * The only action we could have changed to would have
48951 +                        * been reenable, in which case, we skip the rest of
48952 +                        * this path and wait until we've done the re-enable
48953 +                        * before sending the crq init.
48954 +                        */
48955 +                       vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
48957 +                       if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
48958 +                           (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
48959 +                               ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
48960 +                               dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
48961 +                       }
48962                 }
48963                 break;
48964         case IBMVFC_HOST_ACTION_REENABLE:
48965 -               vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
48966                 list_splice_init(&vhost->purge, &purge);
48967                 spin_unlock_irqrestore(vhost->host->host_lock, flags);
48968                 ibmvfc_complete_purge(&purge);
48969                 rc = ibmvfc_reenable_crq_queue(vhost);
48971                 spin_lock_irqsave(vhost->host->host_lock, flags);
48972 -               if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
48973 -                       ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
48974 -                       dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
48975 +               if (vhost->action == IBMVFC_HOST_ACTION_REENABLE) {
48976 +                       /*
48977 +                        * The only action we could have changed to would have
48978 +                        * been reset, in which case, we skip the rest of this
48979 +                        * path and wait until we've done the reset before
48980 +                        * sending the crq init.
48981 +                        */
48982 +                       vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
48983 +                       if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
48984 +                               ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
48985 +                               dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
48986 +                       }
48987                 }
48988                 break;
48989         case IBMVFC_HOST_ACTION_LOGO:
48990 diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
48991 index f0ed6863cc70..60a88a95a8e2 100644
48992 --- a/drivers/scsi/jazz_esp.c
48993 +++ b/drivers/scsi/jazz_esp.c
48994 @@ -143,7 +143,9 @@ static int esp_jazz_probe(struct platform_device *dev)
48995         if (!esp->command_block)
48996                 goto fail_unmap_regs;
48998 -       host->irq = platform_get_irq(dev, 0);
48999 +       host->irq = err = platform_get_irq(dev, 0);
49000 +       if (err < 0)
49001 +               goto fail_unmap_command_block;
49002         err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
49003         if (err < 0)
49004                 goto fail_unmap_command_block;
49005 diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
49006 index 22826544da7e..9989669beec3 100644
49007 --- a/drivers/scsi/libfc/fc_lport.c
49008 +++ b/drivers/scsi/libfc/fc_lport.c
49009 @@ -1731,7 +1731,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
49011         if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) {
49012                 FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
49013 -                            "lport->mfs:%hu\n", mfs, lport->mfs);
49014 +                            "lport->mfs:%u\n", mfs, lport->mfs);
49015                 fc_lport_error(lport, fp);
49016                 goto out;
49017         }
49018 diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
49019 index bdd9a29f4201..0496a60735ef 100644
49020 --- a/drivers/scsi/lpfc/lpfc_attr.c
49021 +++ b/drivers/scsi/lpfc/lpfc_attr.c
49022 @@ -1687,8 +1687,7 @@ lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
49023                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
49024                                 "0071 Set trunk mode failed with status: %d",
49025                                 rc);
49026 -       if (rc != MBX_TIMEOUT)
49027 -               mempool_free(mbox, phba->mbox_mem_pool);
49028 +       mempool_free(mbox, phba->mbox_mem_pool);
49030         return 0;
49032 @@ -6793,15 +6792,19 @@ lpfc_get_stats(struct Scsi_Host *shost)
49033         pmboxq->ctx_buf = NULL;
49034         pmboxq->vport = vport;
49036 -       if (vport->fc_flag & FC_OFFLINE_MODE)
49037 +       if (vport->fc_flag & FC_OFFLINE_MODE) {
49038                 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
49039 -       else
49040 -               rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
49042 -       if (rc != MBX_SUCCESS) {
49043 -               if (rc != MBX_TIMEOUT)
49044 +               if (rc != MBX_SUCCESS) {
49045                         mempool_free(pmboxq, phba->mbox_mem_pool);
49046 -               return NULL;
49047 +                       return NULL;
49048 +               }
49049 +       } else {
49050 +               rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
49051 +               if (rc != MBX_SUCCESS) {
49052 +                       if (rc != MBX_TIMEOUT)
49053 +                               mempool_free(pmboxq, phba->mbox_mem_pool);
49054 +                       return NULL;
49055 +               }
49056         }
49058         memset(hs, 0, sizeof (struct fc_host_statistics));
49059 @@ -6825,15 +6828,19 @@ lpfc_get_stats(struct Scsi_Host *shost)
49060         pmboxq->ctx_buf = NULL;
49061         pmboxq->vport = vport;
49063 -       if (vport->fc_flag & FC_OFFLINE_MODE)
49064 +       if (vport->fc_flag & FC_OFFLINE_MODE) {
49065                 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
49066 -       else
49067 -               rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
49069 -       if (rc != MBX_SUCCESS) {
49070 -               if (rc != MBX_TIMEOUT)
49071 +               if (rc != MBX_SUCCESS) {
49072                         mempool_free(pmboxq, phba->mbox_mem_pool);
49073 -               return NULL;
49074 +                       return NULL;
49075 +               }
49076 +       } else {
49077 +               rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
49078 +               if (rc != MBX_SUCCESS) {
49079 +                       if (rc != MBX_TIMEOUT)
49080 +                               mempool_free(pmboxq, phba->mbox_mem_pool);
49081 +                       return NULL;
49082 +               }
49083         }
49085         hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
49086 @@ -6906,15 +6913,19 @@ lpfc_reset_stats(struct Scsi_Host *shost)
49087         pmboxq->vport = vport;
49089         if ((vport->fc_flag & FC_OFFLINE_MODE) ||
49090 -               (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
49091 +               (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
49092                 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
49093 -       else
49094 -               rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
49096 -       if (rc != MBX_SUCCESS) {
49097 -               if (rc != MBX_TIMEOUT)
49098 +               if (rc != MBX_SUCCESS) {
49099                         mempool_free(pmboxq, phba->mbox_mem_pool);
49100 -               return;
49101 +                       return;
49102 +               }
49103 +       } else {
49104 +               rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
49105 +               if (rc != MBX_SUCCESS) {
49106 +                       if (rc != MBX_TIMEOUT)
49107 +                               mempool_free(pmboxq, phba->mbox_mem_pool);
49108 +                       return;
49109 +               }
49110         }
49112         memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
49113 @@ -6924,15 +6935,19 @@ lpfc_reset_stats(struct Scsi_Host *shost)
49114         pmboxq->vport = vport;
49116         if ((vport->fc_flag & FC_OFFLINE_MODE) ||
49117 -           (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
49118 +           (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
49119                 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
49120 -       else
49121 +               if (rc != MBX_SUCCESS) {
49122 +                       mempool_free(pmboxq, phba->mbox_mem_pool);
49123 +                       return;
49124 +               }
49125 +       } else {
49126                 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
49128 -       if (rc != MBX_SUCCESS) {
49129 -               if (rc != MBX_TIMEOUT)
49130 -                       mempool_free( pmboxq, phba->mbox_mem_pool);
49131 -               return;
49132 +               if (rc != MBX_SUCCESS) {
49133 +                       if (rc != MBX_TIMEOUT)
49134 +                               mempool_free(pmboxq, phba->mbox_mem_pool);
49135 +                       return;
49136 +               }
49137         }
49139         lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
49140 diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
49141 index a0aad4896a45..763b1eeb0ca8 100644
49142 --- a/drivers/scsi/lpfc/lpfc_crtn.h
49143 +++ b/drivers/scsi/lpfc/lpfc_crtn.h
49144 @@ -55,9 +55,6 @@ void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
49145  void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
49146  void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
49147  void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
49148 -void lpfc_supported_pages(struct lpfcMboxq *);
49149 -void lpfc_pc_sli4_params(struct lpfcMboxq *);
49150 -int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
49151  int lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *, struct lpfcMboxq *,
49152                            uint16_t, uint16_t, bool);
49153  int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
49154 @@ -351,8 +348,8 @@ int lpfc_sli_hbq_size(void);
49155  int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *,
49156                                struct lpfc_iocbq *, void *);
49157  int lpfc_sli_sum_iocb(struct lpfc_vport *, uint16_t, uint64_t, lpfc_ctx_cmd);
49158 -int lpfc_sli_abort_iocb(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t,
49159 -                       uint64_t, lpfc_ctx_cmd);
49160 +int lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
49161 +                       lpfc_ctx_cmd abort_cmd);
49162  int
49163  lpfc_sli_abort_taskmgmt(struct lpfc_vport *, struct lpfc_sli_ring *,
49164                         uint16_t, uint64_t, lpfc_ctx_cmd);
49165 diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
49166 index f0a758138ae8..3dd22da3153f 100644
49167 --- a/drivers/scsi/lpfc/lpfc_els.c
49168 +++ b/drivers/scsi/lpfc/lpfc_els.c
49169 @@ -1,7 +1,7 @@
49170  /*******************************************************************
49171   * This file is part of the Emulex Linux Device Driver for         *
49172   * Fibre Channel Host Bus Adapters.                                *
49173 - * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
49174 + * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
49175   * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
49176   * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
49177   * EMULEX and SLI are trademarks of Emulex.                        *
49178 @@ -1600,7 +1600,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
49179         struct lpfc_nodelist *new_ndlp;
49180         struct serv_parm *sp;
49181         uint8_t  name[sizeof(struct lpfc_name)];
49182 -       uint32_t rc, keepDID = 0, keep_nlp_flag = 0;
49183 +       uint32_t keepDID = 0, keep_nlp_flag = 0;
49184         uint32_t keep_new_nlp_flag = 0;
49185         uint16_t keep_nlp_state;
49186         u32 keep_nlp_fc4_type = 0;
49187 @@ -1622,7 +1622,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
49188         new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
49190         /* return immediately if the WWPN matches ndlp */
49191 -       if (new_ndlp == ndlp)
49192 +       if (!new_ndlp || (new_ndlp == ndlp))
49193                 return ndlp;
49195         if (phba->sli_rev == LPFC_SLI_REV4) {
49196 @@ -1641,30 +1641,11 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
49197                          (new_ndlp ? new_ndlp->nlp_flag : 0),
49198                          (new_ndlp ? new_ndlp->nlp_fc4_type : 0));
49200 -       if (!new_ndlp) {
49201 -               rc = memcmp(&ndlp->nlp_portname, name,
49202 -                           sizeof(struct lpfc_name));
49203 -               if (!rc) {
49204 -                       if (active_rrqs_xri_bitmap)
49205 -                               mempool_free(active_rrqs_xri_bitmap,
49206 -                                            phba->active_rrq_pool);
49207 -                       return ndlp;
49208 -               }
49209 -               new_ndlp = lpfc_nlp_init(vport, ndlp->nlp_DID);
49210 -               if (!new_ndlp) {
49211 -                       if (active_rrqs_xri_bitmap)
49212 -                               mempool_free(active_rrqs_xri_bitmap,
49213 -                                            phba->active_rrq_pool);
49214 -                       return ndlp;
49215 -               }
49216 -       } else {
49217 -               keepDID = new_ndlp->nlp_DID;
49218 -               if (phba->sli_rev == LPFC_SLI_REV4 &&
49219 -                   active_rrqs_xri_bitmap)
49220 -                       memcpy(active_rrqs_xri_bitmap,
49221 -                              new_ndlp->active_rrqs_xri_bitmap,
49222 -                              phba->cfg_rrq_xri_bitmap_sz);
49223 -       }
49224 +       keepDID = new_ndlp->nlp_DID;
49226 +       if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap)
49227 +               memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap,
49228 +                      phba->cfg_rrq_xri_bitmap_sz);
49230         /* At this point in this routine, we know new_ndlp will be
49231          * returned. however, any previous GID_FTs that were done
49232 @@ -2063,13 +2044,12 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
49233   * This routine issues a Port Login (PLOGI) command to a remote N_Port
49234   * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
49235   * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
49236 - * This routine constructs the proper feilds of the PLOGI IOCB and invokes
49237 + * This routine constructs the proper fields of the PLOGI IOCB and invokes
49238   * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
49239   *
49240 - * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
49241 - * will be incremented by 1 for holding the ndlp and the reference to ndlp
49242 - * will be stored into the context1 field of the IOCB for the completion
49243 - * callback function to the PLOGI ELS command.
49244 + * Note that the ndlp reference count will be incremented by 1 for holding
49245 + * the ndlp and the reference to ndlp will be stored into the context1 field
49246 + * of the IOCB for the completion callback function to the PLOGI ELS command.
49247   *
49248   * Return code
49249   *   0 - Successfully issued a plogi for @vport
49250 @@ -2087,29 +2067,28 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
49251         int ret;
49253         ndlp = lpfc_findnode_did(vport, did);
49254 +       if (!ndlp)
49255 +               return 1;
49257 -       if (ndlp) {
49258 -               /* Defer the processing of the issue PLOGI until after the
49259 -                * outstanding UNREG_RPI mbox command completes, unless we
49260 -                * are going offline. This logic does not apply for Fabric DIDs
49261 -                */
49262 -               if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
49263 -                   ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
49264 -                   !(vport->fc_flag & FC_OFFLINE_MODE)) {
49265 -                       lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
49266 -                                        "4110 Issue PLOGI x%x deferred "
49267 -                                        "on NPort x%x rpi x%x Data: x%px\n",
49268 -                                        ndlp->nlp_defer_did, ndlp->nlp_DID,
49269 -                                        ndlp->nlp_rpi, ndlp);
49271 -                       /* We can only defer 1st PLOGI */
49272 -                       if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING)
49273 -                               ndlp->nlp_defer_did = did;
49274 -                       return 0;
49275 -               }
49276 +       /* Defer the processing of the issue PLOGI until after the
49277 +        * outstanding UNREG_RPI mbox command completes, unless we
49278 +        * are going offline. This logic does not apply for Fabric DIDs
49279 +        */
49280 +       if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
49281 +           ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
49282 +           !(vport->fc_flag & FC_OFFLINE_MODE)) {
49283 +               lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
49284 +                                "4110 Issue PLOGI x%x deferred "
49285 +                                "on NPort x%x rpi x%x Data: x%px\n",
49286 +                                ndlp->nlp_defer_did, ndlp->nlp_DID,
49287 +                                ndlp->nlp_rpi, ndlp);
49289 +               /* We can only defer 1st PLOGI */
49290 +               if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING)
49291 +                       ndlp->nlp_defer_did = did;
49292 +               return 0;
49293         }
49295 -       /* If ndlp is not NULL, we will bump the reference count on it */
49296         cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
49297         elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
49298                                      ELS_CMD_PLOGI);
49299 @@ -3829,7 +3808,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
49300                 did = irsp->un.elsreq64.remoteID;
49301                 ndlp = lpfc_findnode_did(vport, did);
49302                 if (!ndlp && (cmd != ELS_CMD_PLOGI))
49303 -                       return 1;
49304 +                       return 0;
49305         }
49307         lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
49308 @@ -4473,10 +4452,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
49309   * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
49310   * field in the command IOCB is not NULL, the referred mailbox command will
49311   * be send out, and then invokes the lpfc_els_free_iocb() routine to release
49312 - * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
49313 - * link down event occurred during the discovery, the lpfc_nlp_not_used()
49314 - * routine shall be invoked trying to release the ndlp if no other threads
49315 - * are currently referring it.
49316 + * the IOCB.
49317   **/
49318  static void
49319  lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
49320 @@ -4486,10 +4462,8 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
49321         struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
49322         struct Scsi_Host  *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
49323         IOCB_t  *irsp;
49324 -       uint8_t *pcmd;
49325         LPFC_MBOXQ_t *mbox = NULL;
49326         struct lpfc_dmabuf *mp = NULL;
49327 -       uint32_t ls_rjt = 0;
49329         irsp = &rspiocb->iocb;
49331 @@ -4501,18 +4475,6 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
49332         if (cmdiocb->context_un.mbox)
49333                 mbox = cmdiocb->context_un.mbox;
49335 -       /* First determine if this is a LS_RJT cmpl. Note, this callback
49336 -        * function can have cmdiocb->contest1 (ndlp) field set to NULL.
49337 -        */
49338 -       pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
49339 -       if (ndlp && (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
49340 -               /* A LS_RJT associated with Default RPI cleanup has its own
49341 -                * separate code path.
49342 -                */
49343 -               if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
49344 -                       ls_rjt = 1;
49345 -       }
49347         /* Check to see if link went down during discovery */
49348         if (!ndlp || lpfc_els_chk_latt(vport)) {
49349                 if (mbox) {
49350 @@ -4523,15 +4485,6 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
49351                         }
49352                         mempool_free(mbox, phba->mbox_mem_pool);
49353                 }
49354 -               if (ndlp && (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
49355 -                       if (lpfc_nlp_not_used(ndlp)) {
49356 -                               ndlp = NULL;
49357 -                               /* Indicate the node has already released,
49358 -                                * should not reference to it from within
49359 -                                * the routine lpfc_els_free_iocb.
49360 -                                */
49361 -                               cmdiocb->context1 = NULL;
49362 -                       }
49363                 goto out;
49364         }
49366 @@ -4609,29 +4562,6 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
49367                                 "Data: x%x x%x x%x\n",
49368                                 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
49369                                 ndlp->nlp_rpi);
49371 -                       if (lpfc_nlp_not_used(ndlp)) {
49372 -                               ndlp = NULL;
49373 -                               /* Indicate node has already been released,
49374 -                                * should not reference to it from within
49375 -                                * the routine lpfc_els_free_iocb.
49376 -                                */
49377 -                               cmdiocb->context1 = NULL;
49378 -                       }
49379 -               } else {
49380 -                       /* Do not drop node for lpfc_els_abort'ed ELS cmds */
49381 -                       if (!lpfc_error_lost_link(irsp) &&
49382 -                           ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
49383 -                               if (lpfc_nlp_not_used(ndlp)) {
49384 -                                       ndlp = NULL;
49385 -                                       /* Indicate node has already been
49386 -                                        * released, should not reference
49387 -                                        * to it from within the routine
49388 -                                        * lpfc_els_free_iocb.
49389 -                                        */
49390 -                                       cmdiocb->context1 = NULL;
49391 -                               }
49392 -                       }
49393                 }
49394                 mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
49395                 if (mp) {
49396 @@ -4647,19 +4577,6 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
49397                         ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
49398                 ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI;
49399                 spin_unlock_irq(&ndlp->lock);
49401 -               /* If the node is not being used by another discovery thread,
49402 -                * and we are sending a reject, we are done with it.
49403 -                * Release driver reference count here and free associated
49404 -                * resources.
49405 -                */
49406 -               if (ls_rjt)
49407 -                       if (lpfc_nlp_not_used(ndlp))
49408 -                               /* Indicate node has already been released,
49409 -                                * should not reference to it from within
49410 -                                * the routine lpfc_els_free_iocb.
49411 -                                */
49412 -                               cmdiocb->context1 = NULL;
49413         }
49415         /* Release the originating I/O reference. */
49416 diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
49417 index 48ca4a612f80..c5176f406386 100644
49418 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c
49419 +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
49420 @@ -140,11 +140,8 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
49421                               "rport terminate: sid:x%x did:x%x flg:x%x",
49422                               ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
49424 -       if (ndlp->nlp_sid != NLP_NO_SID) {
49425 -               lpfc_sli_abort_iocb(vport,
49426 -                                   &vport->phba->sli.sli3_ring[LPFC_FCP_RING],
49427 -                                   ndlp->nlp_sid, 0, LPFC_CTX_TGT);
49428 -       }
49429 +       if (ndlp->nlp_sid != NLP_NO_SID)
49430 +               lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
49433  /*
49434 @@ -299,8 +296,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
49436         if (ndlp->nlp_sid != NLP_NO_SID) {
49437                 warn_on = 1;
49438 -               lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
49439 -                                   ndlp->nlp_sid, 0, LPFC_CTX_TGT);
49440 +               lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
49441         }
49443         if (warn_on) {
49444 diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
49445 index 541b9aef6bfe..f5bc2c32a817 100644
49446 --- a/drivers/scsi/lpfc/lpfc_hw4.h
49447 +++ b/drivers/scsi/lpfc/lpfc_hw4.h
49448 @@ -124,6 +124,7 @@ struct lpfc_sli_intf {
49449  /* Define SLI4 Alignment requirements. */
49450  #define LPFC_ALIGN_16_BYTE     16
49451  #define LPFC_ALIGN_64_BYTE     64
49452 +#define SLI4_PAGE_SIZE         4096
49454  /* Define SLI4 specific definitions. */
49455  #define LPFC_MQ_CQE_BYTE_OFFSET        256
49456 @@ -2976,62 +2977,6 @@ struct lpfc_mbx_request_features {
49457  #define lpfc_mbx_rq_ftr_rsp_mrqp_WORD          word3
49458  };
49460 -struct lpfc_mbx_supp_pages {
49461 -       uint32_t word1;
49462 -#define qs_SHIFT                               0
49463 -#define qs_MASK                                        0x00000001
49464 -#define qs_WORD                                        word1
49465 -#define wr_SHIFT                               1
49466 -#define wr_MASK                                0x00000001
49467 -#define wr_WORD                                        word1
49468 -#define pf_SHIFT                               8
49469 -#define pf_MASK                                        0x000000ff
49470 -#define pf_WORD                                        word1
49471 -#define cpn_SHIFT                              16
49472 -#define cpn_MASK                               0x000000ff
49473 -#define cpn_WORD                               word1
49474 -       uint32_t word2;
49475 -#define list_offset_SHIFT                      0
49476 -#define list_offset_MASK                       0x000000ff
49477 -#define list_offset_WORD                       word2
49478 -#define next_offset_SHIFT                      8
49479 -#define next_offset_MASK                       0x000000ff
49480 -#define next_offset_WORD                       word2
49481 -#define elem_cnt_SHIFT                         16
49482 -#define elem_cnt_MASK                          0x000000ff
49483 -#define elem_cnt_WORD                          word2
49484 -       uint32_t word3;
49485 -#define pn_0_SHIFT                             24
49486 -#define pn_0_MASK                              0x000000ff
49487 -#define pn_0_WORD                              word3
49488 -#define pn_1_SHIFT                             16
49489 -#define pn_1_MASK                              0x000000ff
49490 -#define pn_1_WORD                              word3
49491 -#define pn_2_SHIFT                             8
49492 -#define pn_2_MASK                              0x000000ff
49493 -#define pn_2_WORD                              word3
49494 -#define pn_3_SHIFT                             0
49495 -#define pn_3_MASK                              0x000000ff
49496 -#define pn_3_WORD                              word3
49497 -       uint32_t word4;
49498 -#define pn_4_SHIFT                             24
49499 -#define pn_4_MASK                              0x000000ff
49500 -#define pn_4_WORD                              word4
49501 -#define pn_5_SHIFT                             16
49502 -#define pn_5_MASK                              0x000000ff
49503 -#define pn_5_WORD                              word4
49504 -#define pn_6_SHIFT                             8
49505 -#define pn_6_MASK                              0x000000ff
49506 -#define pn_6_WORD                              word4
49507 -#define pn_7_SHIFT                             0
49508 -#define pn_7_MASK                              0x000000ff
49509 -#define pn_7_WORD                              word4
49510 -       uint32_t rsvd[27];
49511 -#define LPFC_SUPP_PAGES                        0
49512 -#define LPFC_BLOCK_GUARD_PROFILES      1
49513 -#define LPFC_SLI4_PARAMETERS           2
49516  struct lpfc_mbx_memory_dump_type3 {
49517         uint32_t word1;
49518  #define lpfc_mbx_memory_dump_type3_type_SHIFT    0
49519 @@ -3248,121 +3193,6 @@ struct user_eeprom {
49520         uint8_t reserved191[57];
49521  };
49523 -struct lpfc_mbx_pc_sli4_params {
49524 -       uint32_t word1;
49525 -#define qs_SHIFT                               0
49526 -#define qs_MASK                                        0x00000001
49527 -#define qs_WORD                                        word1
49528 -#define wr_SHIFT                               1
49529 -#define wr_MASK                                        0x00000001
49530 -#define wr_WORD                                        word1
49531 -#define pf_SHIFT                               8
49532 -#define pf_MASK                                        0x000000ff
49533 -#define pf_WORD                                        word1
49534 -#define cpn_SHIFT                              16
49535 -#define cpn_MASK                               0x000000ff
49536 -#define cpn_WORD                               word1
49537 -       uint32_t word2;
49538 -#define if_type_SHIFT                          0
49539 -#define if_type_MASK                           0x00000007
49540 -#define if_type_WORD                           word2
49541 -#define sli_rev_SHIFT                          4
49542 -#define sli_rev_MASK                           0x0000000f
49543 -#define sli_rev_WORD                           word2
49544 -#define sli_family_SHIFT                       8
49545 -#define sli_family_MASK                                0x000000ff
49546 -#define sli_family_WORD                                word2
49547 -#define featurelevel_1_SHIFT                   16
49548 -#define featurelevel_1_MASK                    0x000000ff
49549 -#define featurelevel_1_WORD                    word2
49550 -#define featurelevel_2_SHIFT                   24
49551 -#define featurelevel_2_MASK                    0x0000001f
49552 -#define featurelevel_2_WORD                    word2
49553 -       uint32_t word3;
49554 -#define fcoe_SHIFT                             0
49555 -#define fcoe_MASK                              0x00000001
49556 -#define fcoe_WORD                              word3
49557 -#define fc_SHIFT                               1
49558 -#define fc_MASK                                        0x00000001
49559 -#define fc_WORD                                        word3
49560 -#define nic_SHIFT                              2
49561 -#define nic_MASK                               0x00000001
49562 -#define nic_WORD                               word3
49563 -#define iscsi_SHIFT                            3
49564 -#define iscsi_MASK                             0x00000001
49565 -#define iscsi_WORD                             word3
49566 -#define rdma_SHIFT                             4
49567 -#define rdma_MASK                              0x00000001
49568 -#define rdma_WORD                              word3
49569 -       uint32_t sge_supp_len;
49570 -#define SLI4_PAGE_SIZE 4096
49571 -       uint32_t word5;
49572 -#define if_page_sz_SHIFT                       0
49573 -#define if_page_sz_MASK                                0x0000ffff
49574 -#define if_page_sz_WORD                                word5
49575 -#define loopbk_scope_SHIFT                     24
49576 -#define loopbk_scope_MASK                      0x0000000f
49577 -#define loopbk_scope_WORD                      word5
49578 -#define rq_db_window_SHIFT                     28
49579 -#define rq_db_window_MASK                      0x0000000f
49580 -#define rq_db_window_WORD                      word5
49581 -       uint32_t word6;
49582 -#define eq_pages_SHIFT                         0
49583 -#define eq_pages_MASK                          0x0000000f
49584 -#define eq_pages_WORD                          word6
49585 -#define eqe_size_SHIFT                         8
49586 -#define eqe_size_MASK                          0x000000ff
49587 -#define eqe_size_WORD                          word6
49588 -       uint32_t word7;
49589 -#define cq_pages_SHIFT                         0
49590 -#define cq_pages_MASK                          0x0000000f
49591 -#define cq_pages_WORD                          word7
49592 -#define cqe_size_SHIFT                         8
49593 -#define cqe_size_MASK                          0x000000ff
49594 -#define cqe_size_WORD                          word7
49595 -       uint32_t word8;
49596 -#define mq_pages_SHIFT                         0
49597 -#define mq_pages_MASK                          0x0000000f
49598 -#define mq_pages_WORD                          word8
49599 -#define mqe_size_SHIFT                         8
49600 -#define mqe_size_MASK                          0x000000ff
49601 -#define mqe_size_WORD                          word8
49602 -#define mq_elem_cnt_SHIFT                      16
49603 -#define mq_elem_cnt_MASK                       0x000000ff
49604 -#define mq_elem_cnt_WORD                       word8
49605 -       uint32_t word9;
49606 -#define wq_pages_SHIFT                         0
49607 -#define wq_pages_MASK                          0x0000ffff
49608 -#define wq_pages_WORD                          word9
49609 -#define wqe_size_SHIFT                         8
49610 -#define wqe_size_MASK                          0x000000ff
49611 -#define wqe_size_WORD                          word9
49612 -       uint32_t word10;
49613 -#define rq_pages_SHIFT                         0
49614 -#define rq_pages_MASK                          0x0000ffff
49615 -#define rq_pages_WORD                          word10
49616 -#define rqe_size_SHIFT                         8
49617 -#define rqe_size_MASK                          0x000000ff
49618 -#define rqe_size_WORD                          word10
49619 -       uint32_t word11;
49620 -#define hdr_pages_SHIFT                                0
49621 -#define hdr_pages_MASK                         0x0000000f
49622 -#define hdr_pages_WORD                         word11
49623 -#define hdr_size_SHIFT                         8
49624 -#define hdr_size_MASK                          0x0000000f
49625 -#define hdr_size_WORD                          word11
49626 -#define hdr_pp_align_SHIFT                     16
49627 -#define hdr_pp_align_MASK                      0x0000ffff
49628 -#define hdr_pp_align_WORD                      word11
49629 -       uint32_t word12;
49630 -#define sgl_pages_SHIFT                                0
49631 -#define sgl_pages_MASK                         0x0000000f
49632 -#define sgl_pages_WORD                         word12
49633 -#define sgl_pp_align_SHIFT                     16
49634 -#define sgl_pp_align_MASK                      0x0000ffff
49635 -#define sgl_pp_align_WORD                      word12
49636 -       uint32_t rsvd_13_63[51];
49638  #define SLI4_PAGE_ALIGN(addr) (((addr)+((SLI4_PAGE_SIZE)-1)) \
49639                                &(~((SLI4_PAGE_SIZE)-1)))
49641 @@ -3994,8 +3824,6 @@ struct lpfc_mqe {
49642                 struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
49643                 struct lpfc_mbx_query_fw_config query_fw_cfg;
49644                 struct lpfc_mbx_set_beacon_config beacon_config;
49645 -               struct lpfc_mbx_supp_pages supp_pages;
49646 -               struct lpfc_mbx_pc_sli4_params sli4_params;
49647                 struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
49648                 struct lpfc_mbx_set_link_diag_state link_diag_state;
49649                 struct lpfc_mbx_set_link_diag_loopback link_diag_loopback;
49650 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
49651 index 71f340dd4fbd..a67051ba3f12 100644
49652 --- a/drivers/scsi/lpfc/lpfc_init.c
49653 +++ b/drivers/scsi/lpfc/lpfc_init.c
49654 @@ -6573,8 +6573,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
49655         LPFC_MBOXQ_t *mboxq;
49656         MAILBOX_t *mb;
49657         int rc, i, max_buf_size;
49658 -       uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
49659 -       struct lpfc_mqe *mqe;
49660         int longs;
49661         int extra;
49662         uint64_t wwn;
49663 @@ -6808,32 +6806,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
49665         lpfc_nvme_mod_param_dep(phba);
49667 -       /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
49668 -       lpfc_supported_pages(mboxq);
49669 -       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
49670 -       if (!rc) {
49671 -               mqe = &mboxq->u.mqe;
49672 -               memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
49673 -                      LPFC_MAX_SUPPORTED_PAGES);
49674 -               for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
49675 -                       switch (pn_page[i]) {
49676 -                       case LPFC_SLI4_PARAMETERS:
49677 -                               phba->sli4_hba.pc_sli4_params.supported = 1;
49678 -                               break;
49679 -                       default:
49680 -                               break;
49681 -                       }
49682 -               }
49683 -               /* Read the port's SLI4 Parameters capabilities if supported. */
49684 -               if (phba->sli4_hba.pc_sli4_params.supported)
49685 -                       rc = lpfc_pc_sli4_params_get(phba, mboxq);
49686 -               if (rc) {
49687 -                       mempool_free(mboxq, phba->mbox_mem_pool);
49688 -                       rc = -EIO;
49689 -                       goto out_free_bsmbx;
49690 -               }
49691 -       }
49693         /*
49694          * Get sli4 parameters that override parameters from Port capabilities.
49695          * If this call fails, it isn't critical unless the SLI4 parameters come
49696 @@ -9660,8 +9632,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
49697                                 "3250 QUERY_FW_CFG mailbox failed with status "
49698                                 "x%x add_status x%x, mbx status x%x\n",
49699                                 shdr_status, shdr_add_status, rc);
49700 -               if (rc != MBX_TIMEOUT)
49701 -                       mempool_free(mboxq, phba->mbox_mem_pool);
49702 +               mempool_free(mboxq, phba->mbox_mem_pool);
49703                 rc = -ENXIO;
49704                 goto out_error;
49705         }
49706 @@ -9677,8 +9648,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
49707                         "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
49708                         phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
49710 -       if (rc != MBX_TIMEOUT)
49711 -               mempool_free(mboxq, phba->mbox_mem_pool);
49712 +       mempool_free(mboxq, phba->mbox_mem_pool);
49714         /*
49715          * Set up HBA Event Queues (EQs)
49716 @@ -10276,8 +10246,7 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
49717                 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
49718                 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
49719                                          &shdr->response);
49720 -               if (rc != MBX_TIMEOUT)
49721 -                       mempool_free(mboxq, phba->mbox_mem_pool);
49722 +               mempool_free(mboxq, phba->mbox_mem_pool);
49723                 if (shdr_status || shdr_add_status || rc) {
49724                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
49725                                         "0495 SLI_FUNCTION_RESET mailbox "
49726 @@ -12075,78 +12044,6 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
49727                 phba->pport->work_port_events = 0;
49730 - /**
49731 - * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
49732 - * @phba: Pointer to HBA context object.
49733 - * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
49734 - *
49735 - * This function is called in the SLI4 code path to read the port's
49736 - * sli4 capabilities.
49737 - *
49738 - * This function may be be called from any context that can block-wait
49739 - * for the completion.  The expectation is that this routine is called
49740 - * typically from probe_one or from the online routine.
49741 - **/
49742 -int
49743 -lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
49745 -       int rc;
49746 -       struct lpfc_mqe *mqe;
49747 -       struct lpfc_pc_sli4_params *sli4_params;
49748 -       uint32_t mbox_tmo;
49750 -       rc = 0;
49751 -       mqe = &mboxq->u.mqe;
49753 -       /* Read the port's SLI4 Parameters port capabilities */
49754 -       lpfc_pc_sli4_params(mboxq);
49755 -       if (!phba->sli4_hba.intr_enable)
49756 -               rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
49757 -       else {
49758 -               mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
49759 -               rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
49760 -       }
49762 -       if (unlikely(rc))
49763 -               return 1;
49765 -       sli4_params = &phba->sli4_hba.pc_sli4_params;
49766 -       sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
49767 -       sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
49768 -       sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
49769 -       sli4_params->featurelevel_1 = bf_get(featurelevel_1,
49770 -                                            &mqe->un.sli4_params);
49771 -       sli4_params->featurelevel_2 = bf_get(featurelevel_2,
49772 -                                            &mqe->un.sli4_params);
49773 -       sli4_params->proto_types = mqe->un.sli4_params.word3;
49774 -       sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
49775 -       sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
49776 -       sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
49777 -       sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
49778 -       sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
49779 -       sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
49780 -       sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
49781 -       sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
49782 -       sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
49783 -       sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
49784 -       sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
49785 -       sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
49786 -       sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
49787 -       sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
49788 -       sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
49789 -       sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
49790 -       sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
49791 -       sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
49792 -       sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
49793 -       sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
49795 -       /* Make sure that sge_supp_len can be handled by the driver */
49796 -       if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
49797 -               sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
49799 -       return rc;
49802  /**
49803   * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
49804   * @phba: Pointer to HBA context object.
49805 @@ -12205,7 +12102,8 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
49806         else
49807                 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
49808         sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
49809 -       sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
49810 +       sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
49811 +                                          mbx_sli4_parameters);
49812         sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
49813         sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
49814         sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
49815 diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
49816 index c03a7f12dd65..72dd22ad5dcc 100644
49817 --- a/drivers/scsi/lpfc/lpfc_mbox.c
49818 +++ b/drivers/scsi/lpfc/lpfc_mbox.c
49819 @@ -2624,39 +2624,3 @@ lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
49820         resume_rpi->event_tag = ndlp->phba->fc_eventTag;
49823 -/**
49824 - * lpfc_supported_pages - Initialize the PORT_CAPABILITIES supported pages
49825 - *                        mailbox command.
49826 - * @mbox: pointer to lpfc mbox command to initialize.
49827 - *
49828 - * The PORT_CAPABILITIES supported pages mailbox command is issued to
49829 - * retrieve the particular feature pages supported by the port.
49830 - **/
49831 -void
49832 -lpfc_supported_pages(struct lpfcMboxq *mbox)
49834 -       struct lpfc_mbx_supp_pages *supp_pages;
49836 -       memset(mbox, 0, sizeof(*mbox));
49837 -       supp_pages = &mbox->u.mqe.un.supp_pages;
49838 -       bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
49839 -       bf_set(cpn, supp_pages, LPFC_SUPP_PAGES);
49842 -/**
49843 - * lpfc_pc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params mbox cmd.
49844 - * @mbox: pointer to lpfc mbox command to initialize.
49845 - *
49846 - * The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to
49847 - * retrieve the particular SLI4 features supported by the port.
49848 - **/
49849 -void
49850 -lpfc_pc_sli4_params(struct lpfcMboxq *mbox)
49852 -       struct lpfc_mbx_pc_sli4_params *sli4_params;
49854 -       memset(mbox, 0, sizeof(*mbox));
49855 -       sli4_params = &mbox->u.mqe.un.sli4_params;
49856 -       bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
49857 -       bf_set(cpn, sli4_params, LPFC_SLI4_PARAMETERS);
49859 diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
49860 index 135d8e8a42ba..9f05f5e329c6 100644
49861 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c
49862 +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
49863 @@ -279,106 +279,43 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
49864         lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
49867 -/* lpfc_defer_pt2pt_acc - Complete SLI3 pt2pt processing on link up
49868 +/* lpfc_defer_plogi_acc - Issue PLOGI ACC after reg_login completes
49869   * @phba: pointer to lpfc hba data structure.
49870 - * @link_mbox: pointer to CONFIG_LINK mailbox object
49871 + * @login_mbox: pointer to REG_RPI mailbox object
49872   *
49873 - * This routine is only called if we are SLI3, direct connect pt2pt
49874 - * mode and the remote NPort issues the PLOGI after link up.
49875 + * The ACC for a rcv'ed PLOGI is deferred until AFTER the REG_RPI completes
49876   */
49877  static void
49878 -lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox)
49879 +lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox)
49881 -       LPFC_MBOXQ_t *login_mbox;
49882 -       MAILBOX_t *mb = &link_mbox->u.mb;
49883         struct lpfc_iocbq *save_iocb;
49884         struct lpfc_nodelist *ndlp;
49885 +       MAILBOX_t *mb = &login_mbox->u.mb;
49887         int rc;
49889 -       ndlp = link_mbox->ctx_ndlp;
49890 -       login_mbox = link_mbox->context3;
49891 +       ndlp = login_mbox->ctx_ndlp;
49892         save_iocb = login_mbox->context3;
49893 -       link_mbox->context3 = NULL;
49894 -       login_mbox->context3 = NULL;
49896 -       /* Check for CONFIG_LINK error */
49897 -       if (mb->mbxStatus) {
49898 -               lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
49899 -                               "4575 CONFIG_LINK fails pt2pt discovery: %x\n",
49900 -                               mb->mbxStatus);
49901 -               mempool_free(login_mbox, phba->mbox_mem_pool);
49902 -               mempool_free(link_mbox, phba->mbox_mem_pool);
49903 -               kfree(save_iocb);
49904 -               return;
49905 -       }
49907 -       /* Now that CONFIG_LINK completed, and our SID is configured,
49908 -        * we can now proceed with sending the PLOGI ACC.
49909 -        */
49910 -       rc = lpfc_els_rsp_acc(link_mbox->vport, ELS_CMD_PLOGI,
49911 -                             save_iocb, ndlp, login_mbox);
49912 -       if (rc) {
49913 -               lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
49914 -                               "4576 PLOGI ACC fails pt2pt discovery: %x\n",
49915 -                               rc);
49916 -               mempool_free(login_mbox, phba->mbox_mem_pool);
49917 +       if (mb->mbxStatus == MBX_SUCCESS) {
49918 +               /* Now that REG_RPI completed successfully,
49919 +                * we can now proceed with sending the PLOGI ACC.
49920 +                */
49921 +               rc = lpfc_els_rsp_acc(login_mbox->vport, ELS_CMD_PLOGI,
49922 +                                     save_iocb, ndlp, NULL);
49923 +               if (rc) {
49924 +                       lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
49925 +                                       "4576 PLOGI ACC fails pt2pt discovery: "
49926 +                                       "DID %x Data: %x\n", ndlp->nlp_DID, rc);
49927 +               }
49928         }
49930 -       mempool_free(link_mbox, phba->mbox_mem_pool);
49931 +       /* Now process the REG_RPI cmpl */
49932 +       lpfc_mbx_cmpl_reg_login(phba, login_mbox);
49933 +       ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
49934         kfree(save_iocb);
49937 -/**
49938 - * lpfc_defer_tgt_acc - Progress SLI4 target rcv PLOGI handler
49939 - * @phba: Pointer to HBA context object.
49940 - * @pmb: Pointer to mailbox object.
49941 - *
49942 - * This function provides the unreg rpi mailbox completion handler for a tgt.
49943 - * The routine frees the memory resources associated with the completed
49944 - * mailbox command and transmits the ELS ACC.
49945 - *
49946 - * This routine is only called if we are SLI4, acting in target
49947 - * mode and the remote NPort issues the PLOGI after link up.
49948 - **/
49949 -static void
49950 -lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
49952 -       struct lpfc_vport *vport = pmb->vport;
49953 -       struct lpfc_nodelist *ndlp = pmb->ctx_ndlp;
49954 -       LPFC_MBOXQ_t *mbox = pmb->context3;
49955 -       struct lpfc_iocbq *piocb = NULL;
49956 -       int rc;
49958 -       if (mbox) {
49959 -               pmb->context3 = NULL;
49960 -               piocb = mbox->context3;
49961 -               mbox->context3 = NULL;
49962 -       }
49964 -       /*
49965 -        * Complete the unreg rpi mbx request, and update flags.
49966 -        * This will also restart any deferred events.
49967 -        */
49968 -       lpfc_sli4_unreg_rpi_cmpl_clr(phba, pmb);
49970 -       if (!piocb) {
49971 -               lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
49972 -                                "4578 PLOGI ACC fail\n");
49973 -               if (mbox)
49974 -                       mempool_free(mbox, phba->mbox_mem_pool);
49975 -               return;
49976 -       }
49978 -       rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, piocb, ndlp, mbox);
49979 -       if (rc) {
49980 -               lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
49981 -                                "4579 PLOGI ACC fail %x\n", rc);
49982 -               if (mbox)
49983 -                       mempool_free(mbox, phba->mbox_mem_pool);
49984 -       }
49985 -       kfree(piocb);
49988  static int
49989  lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
49990                struct lpfc_iocbq *cmdiocb)
49991 @@ -395,8 +332,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
49992         struct lpfc_iocbq *save_iocb;
49993         struct ls_rjt stat;
49994         uint32_t vid, flag;
49995 -       u16 rpi;
49996 -       int rc, defer_acc;
49997 +       int rc;
49999         memset(&stat, 0, sizeof (struct ls_rjt));
50000         pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
50001 @@ -445,7 +381,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
50002         else
50003                 ndlp->nlp_fcp_info |= CLASS3;
50005 -       defer_acc = 0;
50006         ndlp->nlp_class_sup = 0;
50007         if (sp->cls1.classValid)
50008                 ndlp->nlp_class_sup |= FC_COS_CLASS1;
50009 @@ -539,27 +474,26 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
50011                 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
50013 -               /* Issue config_link / reg_vfi to account for updated TOV's */
50015 +               /* Issue CONFIG_LINK for SLI3 or REG_VFI for SLI4,
50016 +                * to account for updated TOV's / parameters
50017 +                */
50018                 if (phba->sli_rev == LPFC_SLI_REV4)
50019                         lpfc_issue_reg_vfi(vport);
50020                 else {
50021 -                       defer_acc = 1;
50022                         link_mbox = mempool_alloc(phba->mbox_mem_pool,
50023                                                   GFP_KERNEL);
50024                         if (!link_mbox)
50025                                 goto out;
50026                         lpfc_config_link(phba, link_mbox);
50027 -                       link_mbox->mbox_cmpl = lpfc_defer_pt2pt_acc;
50028 +                       link_mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
50029                         link_mbox->vport = vport;
50030                         link_mbox->ctx_ndlp = ndlp;
50032 -                       save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
50033 -                       if (!save_iocb)
50034 +                       rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
50035 +                       if (rc == MBX_NOT_FINISHED) {
50036 +                               mempool_free(link_mbox, phba->mbox_mem_pool);
50037                                 goto out;
50038 -                       /* Save info from cmd IOCB used in rsp */
50039 -                       memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
50040 -                              sizeof(struct lpfc_iocbq));
50041 +                       }
50042                 }
50044                 lpfc_can_disctmo(vport);
50045 @@ -578,59 +512,28 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
50046         if (!login_mbox)
50047                 goto out;
50049 -       /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
50050 -       if (phba->nvmet_support && !defer_acc) {
50051 -               link_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
50052 -               if (!link_mbox)
50053 -                       goto out;
50055 -               /* As unique identifiers such as iotag would be overwritten
50056 -                * with those from the cmdiocb, allocate separate temporary
50057 -                * storage for the copy.
50058 -                */
50059 -               save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
50060 -               if (!save_iocb)
50061 -                       goto out;
50063 -               /* Unreg RPI is required for SLI4. */
50064 -               rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
50065 -               lpfc_unreg_login(phba, vport->vpi, rpi, link_mbox);
50066 -               link_mbox->vport = vport;
50067 -               link_mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
50068 -               if (!link_mbox->ctx_ndlp)
50069 -                       goto out;
50071 -               link_mbox->mbox_cmpl = lpfc_defer_acc_rsp;
50073 -               if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
50074 -                   (!(vport->fc_flag & FC_OFFLINE_MODE)))
50075 -                       ndlp->nlp_flag |= NLP_UNREG_INP;
50076 +       save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL);
50077 +       if (!save_iocb)
50078 +               goto out;
50080 -               /* Save info from cmd IOCB used in rsp */
50081 -               memcpy(save_iocb, cmdiocb, sizeof(*save_iocb));
50082 +       /* Save info from cmd IOCB to be used in rsp after all mbox completes */
50083 +       memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb,
50084 +              sizeof(struct lpfc_iocbq));
50086 -               /* Delay sending ACC till unreg RPI completes. */
50087 -               defer_acc = 1;
50088 -       } else if (phba->sli_rev == LPFC_SLI_REV4)
50089 +       /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
50090 +       if (phba->sli_rev == LPFC_SLI_REV4)
50091                 lpfc_unreg_rpi(vport, ndlp);
50093 +       /* Issue REG_LOGIN first, before ACCing the PLOGI, thus we will
50094 +        * always be deferring the ACC.
50095 +        */
50096         rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
50097                             (uint8_t *)sp, login_mbox, ndlp->nlp_rpi);
50098         if (rc)
50099                 goto out;
50101 -       /* ACC PLOGI rsp command needs to execute first,
50102 -        * queue this login_mbox command to be processed later.
50103 -        */
50104         login_mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
50105 -       /*
50106 -        * login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox
50107 -        * command issued in lpfc_cmpl_els_acc().
50108 -        */
50109         login_mbox->vport = vport;
50110 -       spin_lock_irq(&ndlp->lock);
50111 -       ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
50112 -       spin_unlock_irq(&ndlp->lock);
50114         /*
50115          * If there is an outstanding PLOGI issued, abort it before
50116 @@ -660,7 +563,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
50117                  * to register, then unregister the RPI.
50118                  */
50119                 spin_lock_irq(&ndlp->lock);
50120 -               ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
50121 +               ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN |
50122 +                                  NLP_RCV_PLOGI);
50123                 spin_unlock_irq(&ndlp->lock);
50124                 stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
50125                 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
50126 @@ -670,42 +574,39 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
50127                         mempool_free(login_mbox, phba->mbox_mem_pool);
50128                 return 1;
50129         }
50130 -       if (defer_acc) {
50131 -               /* So the order here should be:
50132 -                * SLI3 pt2pt
50133 -                *   Issue CONFIG_LINK mbox
50134 -                *   CONFIG_LINK cmpl
50135 -                * SLI4 tgt
50136 -                *   Issue UNREG RPI mbx
50137 -                *   UNREG RPI cmpl
50138 -                * Issue PLOGI ACC
50139 -                * PLOGI ACC cmpl
50140 -                * Issue REG_LOGIN mbox
50141 -                */
50143 -               /* Save the REG_LOGIN mbox for and rcv IOCB copy later */
50144 -               link_mbox->context3 = login_mbox;
50145 -               login_mbox->context3 = save_iocb;
50146 +       /* So the order here should be:
50147 +        * SLI3 pt2pt
50148 +        *   Issue CONFIG_LINK mbox
50149 +        *   CONFIG_LINK cmpl
50150 +        * SLI4 pt2pt
50151 +        *   Issue REG_VFI mbox
50152 +        *   REG_VFI cmpl
50153 +        * SLI4
50154 +        *   Issue UNREG RPI mbx
50155 +        *   UNREG RPI cmpl
50156 +        * Issue REG_RPI mbox
50157 +        * REG RPI cmpl
50158 +        * Issue PLOGI ACC
50159 +        * PLOGI ACC cmpl
50160 +        */
50161 +       login_mbox->mbox_cmpl = lpfc_defer_plogi_acc;
50162 +       login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
50163 +       login_mbox->context3 = save_iocb; /* For PLOGI ACC */
50165 -               /* Start the ball rolling by issuing CONFIG_LINK here */
50166 -               rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT);
50167 -               if (rc == MBX_NOT_FINISHED)
50168 -                       goto out;
50169 -               return 1;
50170 -       }
50171 +       spin_lock_irq(&ndlp->lock);
50172 +       ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
50173 +       spin_unlock_irq(&ndlp->lock);
50175 +       /* Start the ball rolling by issuing REG_LOGIN here */
50176 +       rc = lpfc_sli_issue_mbox(phba, login_mbox, MBX_NOWAIT);
50177 +       if (rc == MBX_NOT_FINISHED)
50178 +               goto out;
50179 +       lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
50181 -       rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, login_mbox);
50182 -       if (rc)
50183 -               mempool_free(login_mbox, phba->mbox_mem_pool);
50184         return 1;
50185  out:
50186 -       if (defer_acc)
50187 -               lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
50188 -                               "4577 discovery failure: %p %p %p\n",
50189 -                               save_iocb, link_mbox, login_mbox);
50190         kfree(save_iocb);
50191 -       if (link_mbox)
50192 -               mempool_free(link_mbox, phba->mbox_mem_pool);
50193         if (login_mbox)
50194                 mempool_free(login_mbox, phba->mbox_mem_pool);
50196 @@ -913,9 +814,14 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
50197                 }
50198         } else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
50199                 ((ndlp->nlp_type & NLP_FCP_TARGET) ||
50200 -               !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
50201 +               (ndlp->nlp_type & NLP_NVME_TARGET) ||
50202 +               (vport->fc_flag & FC_PT2PT))) ||
50203                 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
50204 -               /* Only try to re-login if this is NOT a Fabric Node */
50205 +               /* Only try to re-login if this is NOT a Fabric Node
50206 +                * AND the remote NPORT is a FCP/NVME Target or we
50207 +                * are in pt2pt mode. NLP_STE_ADISC_ISSUE is a special
50208 +                * case for LOGO as a response to ADISC behavior.
50209 +                */
50210                 mod_timer(&ndlp->nlp_delayfunc,
50211                           jiffies + msecs_to_jiffies(1000 * 1));
50212                 spin_lock_irq(&ndlp->lock);
50213 @@ -1985,8 +1891,6 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
50214                 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
50216                 lpfc_issue_els_logo(vport, ndlp, 0);
50217 -               ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
50218 -               lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
50219                 return ndlp->nlp_state;
50220         }
50222 @@ -2633,12 +2537,10 @@ static uint32_t
50223  lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
50224                           void *arg, uint32_t evt)
50226 -       struct lpfc_hba  *phba = vport->phba;
50227         struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
50229         /* flush the target */
50230 -       lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
50231 -                           ndlp->nlp_sid, 0, LPFC_CTX_TGT);
50232 +       lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
50234         /* Treat like rcv logo */
50235         lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
50236 diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
50237 index bb2a4a0d1295..a3fd959f7431 100644
50238 --- a/drivers/scsi/lpfc/lpfc_nvmet.c
50239 +++ b/drivers/scsi/lpfc/lpfc_nvmet.c
50240 @@ -3304,7 +3304,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
50241         bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
50243         /* Word 10 */
50244 -       bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
50245         bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
50246         bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
50247                LPFC_WQE_LENLOC_WORD12);
50248 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
50249 index a4d697373c71..fab9ea6fe965 100644
50250 --- a/drivers/scsi/lpfc/lpfc_scsi.c
50251 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
50252 @@ -5815,7 +5815,7 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
50253                                         tgt_id, lun_id, context);
50254         later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
50255         while (time_after(later, jiffies) && cnt) {
50256 -               schedule_timeout_uninterruptible(msecs_to_jiffies(20));
50257 +               schedule_msec_hrtimeout_uninterruptible((20));
50258                 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
50259         }
50260         if (cnt) {
50261 diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
50262 index fa1a714a78f0..920cf329268b 100644
50263 --- a/drivers/scsi/lpfc/lpfc_sli.c
50264 +++ b/drivers/scsi/lpfc/lpfc_sli.c
50265 @@ -5683,12 +5683,10 @@ lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
50266                         phba->sli4_hba.lnk_info.lnk_no,
50267                         phba->BIOSVersion);
50268  out_free_mboxq:
50269 -       if (rc != MBX_TIMEOUT) {
50270 -               if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
50271 -                       lpfc_sli4_mbox_cmd_free(phba, mboxq);
50272 -               else
50273 -                       mempool_free(mboxq, phba->mbox_mem_pool);
50274 -       }
50275 +       if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
50276 +               lpfc_sli4_mbox_cmd_free(phba, mboxq);
50277 +       else
50278 +               mempool_free(mboxq, phba->mbox_mem_pool);
50279         return rc;
50282 @@ -5789,12 +5787,10 @@ lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
50283         }
50285  out_free_mboxq:
50286 -       if (rc != MBX_TIMEOUT) {
50287 -               if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
50288 -                       lpfc_sli4_mbox_cmd_free(phba, mboxq);
50289 -               else
50290 -                       mempool_free(mboxq, phba->mbox_mem_pool);
50291 -       }
50292 +       if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
50293 +               lpfc_sli4_mbox_cmd_free(phba, mboxq);
50294 +       else
50295 +               mempool_free(mboxq, phba->mbox_mem_pool);
50296         return rc;
50299 @@ -11647,7 +11643,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
50300         icmd = &cmdiocb->iocb;
50301         if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
50302             icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
50303 -           (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
50304 +           cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED)
50305                 return IOCB_ABORTING;
50307         if (!pring) {
50308 @@ -11811,13 +11807,20 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
50309                            lpfc_ctx_cmd ctx_cmd)
50311         struct lpfc_io_buf *lpfc_cmd;
50312 +       IOCB_t *icmd = NULL;
50313         int rc = 1;
50315         if (!iocbq || iocbq->vport != vport)
50316                 return rc;
50318 -       if (!(iocbq->iocb_flag &  LPFC_IO_FCP) ||
50319 -           !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
50320 +       if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
50321 +           !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ) ||
50322 +             iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
50323 +               return rc;
50325 +       icmd = &iocbq->iocb;
50326 +       if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
50327 +           icmd->ulpCommand == CMD_CLOSE_XRI_CN)
50328                 return rc;
50330         lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
50331 @@ -11945,7 +11948,6 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
50332  /**
50333   * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
50334   * @vport: Pointer to virtual port.
50335 - * @pring: Pointer to driver SLI ring object.
50336   * @tgt_id: SCSI ID of the target.
50337   * @lun_id: LUN ID of the scsi device.
50338   * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
50339 @@ -11960,18 +11962,22 @@ lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
50340   * FCP iocbs associated with SCSI target specified by tgt_id parameter.
50341   * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
50342   * FCP iocbs associated with virtual port.
50343 + * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4
50344 + * lpfc_sli4_calc_ring is used.
50345   * This function returns number of iocbs it failed to abort.
50346   * This function is called with no locks held.
50347   **/
50348  int
50349 -lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
50350 -                   uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
50351 +lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
50352 +                   lpfc_ctx_cmd abort_cmd)
50354         struct lpfc_hba *phba = vport->phba;
50355 +       struct lpfc_sli_ring *pring = NULL;
50356         struct lpfc_iocbq *iocbq;
50357         int errcnt = 0, ret_val = 0;
50358         unsigned long iflags;
50359         int i;
50360 +       void *fcp_cmpl = NULL;
50362         /* all I/Os are in process of being flushed */
50363         if (phba->hba_flag & HBA_IOQ_FLUSH)
50364 @@ -11985,8 +11991,15 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
50365                         continue;
50367                 spin_lock_irqsave(&phba->hbalock, iflags);
50368 +               if (phba->sli_rev == LPFC_SLI_REV3) {
50369 +                       pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
50370 +                       fcp_cmpl = lpfc_sli_abort_fcp_cmpl;
50371 +               } else if (phba->sli_rev == LPFC_SLI_REV4) {
50372 +                       pring = lpfc_sli4_calc_ring(phba, iocbq);
50373 +                       fcp_cmpl = lpfc_sli4_abort_fcp_cmpl;
50374 +               }
50375                 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
50376 -                                                    lpfc_sli_abort_fcp_cmpl);
50377 +                                                    fcp_cmpl);
50378                 spin_unlock_irqrestore(&phba->hbalock, iflags);
50379                 if (ret_val != IOCB_SUCCESS)
50380                         errcnt++;
50381 @@ -17072,8 +17085,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
50382                                 "2509 RQ_DESTROY mailbox failed with "
50383                                 "status x%x add_status x%x, mbx status x%x\n",
50384                                 shdr_status, shdr_add_status, rc);
50385 -               if (rc != MBX_TIMEOUT)
50386 -                       mempool_free(mbox, hrq->phba->mbox_mem_pool);
50387 +               mempool_free(mbox, hrq->phba->mbox_mem_pool);
50388                 return -ENXIO;
50389         }
50390         bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
50391 @@ -17170,7 +17182,9 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
50392         shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
50393         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
50394         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
50395 -       if (rc != MBX_TIMEOUT)
50396 +       if (!phba->sli4_hba.intr_enable)
50397 +               mempool_free(mbox, phba->mbox_mem_pool);
50398 +       else if (rc != MBX_TIMEOUT)
50399                 mempool_free(mbox, phba->mbox_mem_pool);
50400         if (shdr_status || shdr_add_status || rc) {
50401                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
50402 @@ -17367,7 +17381,9 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
50403         shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
50404         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
50405         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
50406 -       if (rc != MBX_TIMEOUT)
50407 +       if (!phba->sli4_hba.intr_enable)
50408 +               lpfc_sli4_mbox_cmd_free(phba, mbox);
50409 +       else if (rc != MBX_TIMEOUT)
50410                 lpfc_sli4_mbox_cmd_free(phba, mbox);
50411         if (shdr_status || shdr_add_status || rc) {
50412                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
50413 @@ -17480,7 +17496,9 @@ lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
50414         shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
50415         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
50416         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
50417 -       if (rc != MBX_TIMEOUT)
50418 +       if (!phba->sli4_hba.intr_enable)
50419 +               lpfc_sli4_mbox_cmd_free(phba, mbox);
50420 +       else if (rc != MBX_TIMEOUT)
50421                 lpfc_sli4_mbox_cmd_free(phba, mbox);
50422         if (shdr_status || shdr_add_status || rc) {
50423                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
50424 @@ -18064,7 +18082,6 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
50425         if (cmd_iocbq) {
50426                 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
50427                 lpfc_nlp_put(ndlp);
50428 -               lpfc_nlp_not_used(ndlp);
50429                 lpfc_sli_release_iocbq(phba, cmd_iocbq);
50430         }
50432 @@ -18831,8 +18848,7 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
50433         shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
50434         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
50435         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
50436 -       if (rc != MBX_TIMEOUT)
50437 -               mempool_free(mboxq, phba->mbox_mem_pool);
50438 +       mempool_free(mboxq, phba->mbox_mem_pool);
50439         if (shdr_status || shdr_add_status || rc) {
50440                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
50441                                 "2514 POST_RPI_HDR mailbox failed with "
50442 @@ -20076,7 +20092,9 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
50443                         break;
50444                 }
50445         }
50446 -       if (rc != MBX_TIMEOUT)
50447 +       if (!phba->sli4_hba.intr_enable)
50448 +               mempool_free(mbox, phba->mbox_mem_pool);
50449 +       else if (rc != MBX_TIMEOUT)
50450                 mempool_free(mbox, phba->mbox_mem_pool);
50451         if (shdr_status || shdr_add_status || rc) {
50452                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
50453 diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
50454 index ac0eef975f17..b6beacfd0f62 100644
50455 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c
50456 +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
50457 @@ -7252,6 +7252,8 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
50459         ioc_info(ioc, "sending diag reset !!\n");
50461 +       pci_cfg_access_lock(ioc->pdev);
50463         drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
50465         count = 0;
50466 @@ -7342,10 +7344,12 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
50467                 goto out;
50468         }
50470 +       pci_cfg_access_unlock(ioc->pdev);
50471         ioc_info(ioc, "diag reset: SUCCESS\n");
50472         return 0;
50474   out:
50475 +       pci_cfg_access_unlock(ioc->pdev);
50476         ioc_err(ioc, "diag reset: FAILED\n");
50477         return -EFAULT;
50479 diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
50480 index 44f9a05db94e..2ec11be62a82 100644
50481 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
50482 +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
50483 @@ -2507,7 +2507,7 @@ _ctl_addnl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
50484                     __func__, karg.unique_id);
50485                 return -EPERM;
50486         }
50487 -       memset(&karg.buffer_rel_condition, 0, sizeof(struct htb_rel_query));
50488 +       memset(&karg.rel_query, 0, sizeof(karg.rel_query));
50489         if ((ioc->diag_buffer_status[buffer_type] &
50490             MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
50491                 ioc_info(ioc, "%s: buffer_type(0x%02x) is not registered\n",
50492 @@ -2520,8 +2520,7 @@ _ctl_addnl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
50493                     __func__, buffer_type);
50494                 return -EPERM;
50495         }
50496 -       memcpy(&karg.buffer_rel_condition, &ioc->htb_rel,
50497 -           sizeof(struct  htb_rel_query));
50498 +       memcpy(&karg.rel_query, &ioc->htb_rel, sizeof(karg.rel_query));
50499  out:
50500         if (copy_to_user(arg, &karg, sizeof(struct mpt3_addnl_diag_query))) {
50501                 ioc_err(ioc, "%s: unable to write mpt3_addnl_diag_query data @ %p\n",
50502 diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
50503 index d2ccdafb8df2..8f6ffb40261c 100644
50504 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h
50505 +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
50506 @@ -50,6 +50,8 @@
50507  #include <linux/miscdevice.h>
50508  #endif
50510 +#include "mpt3sas_base.h"
50512  #ifndef MPT2SAS_MINOR
50513  #define MPT2SAS_MINOR          (MPT_MINOR + 1)
50514  #endif
50515 @@ -436,19 +438,13 @@ struct mpt3_diag_read_buffer {
50516   * struct mpt3_addnl_diag_query - diagnostic buffer release reason
50517   * @hdr - generic header
50518   * @unique_id - unique id associated with this buffer.
50519 - * @buffer_rel_condition - Release condition ioctl/sysfs/reset
50520 - * @reserved1
50521 - * @trigger_type - Master/Event/scsi/MPI
50522 - * @trigger_info_dwords - Data Correspondig to trigger type
50523 + * @rel_query - release query.
50524   * @reserved2
50525   */
50526  struct mpt3_addnl_diag_query {
50527         struct mpt3_ioctl_header hdr;
50528         uint32_t unique_id;
50529 -       uint16_t buffer_rel_condition;
50530 -       uint16_t reserved1;
50531 -       uint32_t trigger_type;
50532 -       uint32_t trigger_info_dwords[2];
50533 +       struct htb_rel_query rel_query;
50534         uint32_t reserved2[2];
50535  };
50537 diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
50538 index 6aa6de729187..ae1973878cc7 100644
50539 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
50540 +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
50541 @@ -6483,6 +6483,9 @@ _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
50542                 if (!vphy)
50543                         return NULL;
50545 +               if (!port->vphys_mask)
50546 +                       INIT_LIST_HEAD(&port->vphys_list);
50548                 /*
50549                  * Enable bit corresponding to HBA phy number on its
50550                  * parent hba_port object's vphys_mask field.
50551 @@ -6490,7 +6493,6 @@ _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
50552                 port->vphys_mask |= (1 << phy_num);
50553                 vphy->phy_mask |= (1 << phy_num);
50555 -               INIT_LIST_HEAD(&port->vphys_list);
50556                 list_add_tail(&vphy->list, &port->vphys_list);
50558                 ioc_info(ioc,
50559 diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
50560 index 31e5455d280c..1b1a57f46989 100644
50561 --- a/drivers/scsi/pm8001/pm8001_hwi.c
50562 +++ b/drivers/scsi/pm8001/pm8001_hwi.c
50563 @@ -643,7 +643,7 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
50564   */
50565  static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
50567 -       u8 i = 0;
50568 +       u32 i = 0;
50569         u16 deviceid;
50570         pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
50571         /* 8081 controllers need BAR shift to access MPI space
50572 diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
50573 index 84315560e8e1..c6b0834e3806 100644
50574 --- a/drivers/scsi/pm8001/pm80xx_hwi.c
50575 +++ b/drivers/scsi/pm8001/pm80xx_hwi.c
50576 @@ -1502,9 +1502,9 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
50578         /* wait until Inbound DoorBell Clear Register toggled */
50579         if (IS_SPCV_12G(pm8001_ha->pdev)) {
50580 -               max_wait_count = 4 * 1000 * 1000;/* 4 sec */
50581 +               max_wait_count = 30 * 1000 * 1000; /* 30 sec */
50582         } else {
50583 -               max_wait_count = 2 * 1000 * 1000;/* 2 sec */
50584 +               max_wait_count = 15 * 1000 * 1000; /* 15 sec */
50585         }
50586         do {
50587                 udelay(1);
50588 diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
50589 index 63391c9be05d..3aa9869f6fae 100644
50590 --- a/drivers/scsi/qla2xxx/qla_attr.c
50591 +++ b/drivers/scsi/qla2xxx/qla_attr.c
50592 @@ -2864,6 +2864,8 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost)
50593         vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
50595         if (IS_FWI2_CAPABLE(ha)) {
50596 +               int rval;
50598                 stats = dma_alloc_coherent(&ha->pdev->dev,
50599                     sizeof(*stats), &stats_dma, GFP_KERNEL);
50600                 if (!stats) {
50601 @@ -2873,7 +2875,11 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost)
50602                 }
50604                 /* reset firmware statistics */
50605 -               qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
50606 +               rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
50607 +               if (rval != QLA_SUCCESS)
50608 +                       ql_log(ql_log_warn, vha, 0x70de,
50609 +                              "Resetting ISP statistics failed: rval = %d\n",
50610 +                              rval);
50612                 dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
50613                     stats, stats_dma);
50614 diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
50615 index bee8cf9f8123..aef2f7cc89d3 100644
50616 --- a/drivers/scsi/qla2xxx/qla_bsg.c
50617 +++ b/drivers/scsi/qla2xxx/qla_bsg.c
50618 @@ -25,10 +25,11 @@ void qla2x00_bsg_job_done(srb_t *sp, int res)
50619         struct bsg_job *bsg_job = sp->u.bsg_job;
50620         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
50622 +       sp->free(sp);
50624         bsg_reply->result = res;
50625         bsg_job_done(bsg_job, bsg_reply->result,
50626                        bsg_reply->reply_payload_rcv_len);
50627 -       sp->free(sp);
50630  void qla2x00_bsg_sp_free(srb_t *sp)
50631 @@ -2583,6 +2584,10 @@ qla2x00_get_host_stats(struct bsg_job *bsg_job)
50632         }
50634         data = kzalloc(response_len, GFP_KERNEL);
50635 +       if (!data) {
50636 +               kfree(req_data);
50637 +               return -ENOMEM;
50638 +       }
50640         ret = qla2xxx_get_ini_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
50641                                     data, response_len);
50642 diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
50643 index f01f07116bd3..8cb0574cfa91 100644
50644 --- a/drivers/scsi/qla2xxx/qla_init.c
50645 +++ b/drivers/scsi/qla2xxx/qla_init.c
50646 @@ -1194,6 +1194,9 @@ static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
50648         struct qla_work_evt *e;
50650 +       if (vha->host->active_mode == MODE_TARGET)
50651 +               return QLA_FUNCTION_FAILED;
50653         e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
50654         if (!e)
50655                 return QLA_FUNCTION_FAILED;
50656 diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
50657 index 5e188375c871..af4831c9edf9 100644
50658 --- a/drivers/scsi/qla2xxx/qla_isr.c
50659 +++ b/drivers/scsi/qla2xxx/qla_isr.c
50660 @@ -4005,11 +4005,11 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
50661         if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
50662                 /* user wants to control IRQ setting for target mode */
50663                 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
50664 -                   min((u16)ha->msix_count, (u16)num_online_cpus()),
50665 +                   min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
50666                     PCI_IRQ_MSIX);
50667         } else
50668                 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
50669 -                   min((u16)ha->msix_count, (u16)num_online_cpus()),
50670 +                   min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
50671                     PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
50672                     &desc);
50674 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
50675 index 074392560f3d..0e07b98dfae8 100644
50676 --- a/drivers/scsi/qla2xxx/qla_os.c
50677 +++ b/drivers/scsi/qla2xxx/qla_os.c
50678 @@ -1013,8 +1013,6 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
50679         if (rval != QLA_SUCCESS) {
50680                 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
50681                     "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
50682 -               if (rval == QLA_INTERFACE_ERROR)
50683 -                       goto qc24_free_sp_fail_command;
50684                 goto qc24_host_busy_free_sp;
50685         }
50687 @@ -1026,11 +1024,6 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
50688  qc24_target_busy:
50689         return SCSI_MLQUEUE_TARGET_BUSY;
50691 -qc24_free_sp_fail_command:
50692 -       sp->free(sp);
50693 -       CMD_SP(cmd) = NULL;
50694 -       qla2xxx_rel_qpair_sp(sp->qpair, sp);
50696  qc24_fail_command:
50697         cmd->scsi_done(cmd);
50699 diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
50700 index a1dacb6e993e..c30f6047410f 100644
50701 --- a/drivers/scsi/smartpqi/smartpqi_init.c
50702 +++ b/drivers/scsi/smartpqi/smartpqi_init.c
50703 @@ -5488,6 +5488,8 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
50705                                 list_del(&io_request->request_list_entry);
50706                                 set_host_byte(scmd, DID_RESET);
50707 +                               pqi_free_io_request(io_request);
50708 +                               scsi_dma_unmap(scmd);
50709                                 pqi_scsi_done(scmd);
50710                         }
50712 @@ -5524,6 +5526,8 @@ static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info)
50714                                 list_del(&io_request->request_list_entry);
50715                                 set_host_byte(scmd, DID_RESET);
50716 +                               pqi_free_io_request(io_request);
50717 +                               scsi_dma_unmap(scmd);
50718                                 pqi_scsi_done(scmd);
50719                         }
50721 @@ -6598,6 +6602,7 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
50722         shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
50723         shost->unique_id = shost->irq;
50724         shost->nr_hw_queues = ctrl_info->num_queue_groups;
50725 +       shost->host_tagset = 1;
50726         shost->hostdata[0] = (unsigned long)ctrl_info;
50728         rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
50729 @@ -8216,6 +8221,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
50730                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50731                                0x152d, 0x8a37)
50732         },
50733 +       {
50734 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50735 +                              0x193d, 0x8460)
50736 +       },
50737         {
50738                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50739                                0x193d, 0x1104)
50740 @@ -8288,6 +8297,22 @@ static const struct pci_device_id pqi_pci_id_table[] = {
50741                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50742                                0x1bd4, 0x004f)
50743         },
50744 +       {
50745 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50746 +                              0x1bd4, 0x0051)
50747 +       },
50748 +       {
50749 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50750 +                              0x1bd4, 0x0052)
50751 +       },
50752 +       {
50753 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50754 +                              0x1bd4, 0x0053)
50755 +       },
50756 +       {
50757 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50758 +                              0x1bd4, 0x0054)
50759 +       },
50760         {
50761                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50762                                0x19e5, 0xd227)
50763 @@ -8448,6 +8473,122 @@ static const struct pci_device_id pqi_pci_id_table[] = {
50764                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50765                                PCI_VENDOR_ID_ADAPTEC2, 0x1380)
50766         },
50767 +       {
50768 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50769 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1400)
50770 +       },
50771 +       {
50772 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50773 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1402)
50774 +       },
50775 +       {
50776 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50777 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1410)
50778 +       },
50779 +       {
50780 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50781 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1411)
50782 +       },
50783 +       {
50784 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50785 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1412)
50786 +       },
50787 +       {
50788 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50789 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1420)
50790 +       },
50791 +       {
50792 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50793 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1430)
50794 +       },
50795 +       {
50796 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50797 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1440)
50798 +       },
50799 +       {
50800 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50801 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1441)
50802 +       },
50803 +       {
50804 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50805 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1450)
50806 +       },
50807 +       {
50808 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50809 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1452)
50810 +       },
50811 +       {
50812 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50813 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1460)
50814 +       },
50815 +       {
50816 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50817 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1461)
50818 +       },
50819 +       {
50820 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50821 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1462)
50822 +       },
50823 +       {
50824 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50825 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1470)
50826 +       },
50827 +       {
50828 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50829 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1471)
50830 +       },
50831 +       {
50832 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50833 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1472)
50834 +       },
50835 +       {
50836 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50837 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1480)
50838 +       },
50839 +       {
50840 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50841 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1490)
50842 +       },
50843 +       {
50844 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50845 +                              PCI_VENDOR_ID_ADAPTEC2, 0x1491)
50846 +       },
50847 +       {
50848 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50849 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
50850 +       },
50851 +       {
50852 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50853 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
50854 +       },
50855 +       {
50856 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50857 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
50858 +       },
50859 +       {
50860 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50861 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
50862 +       },
50863 +       {
50864 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50865 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
50866 +       },
50867 +       {
50868 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50869 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
50870 +       },
50871 +       {
50872 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50873 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
50874 +       },
50875 +       {
50876 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50877 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
50878 +       },
50879 +       {
50880 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50881 +                              PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
50882 +       },
50883         {
50884                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50885                                PCI_VENDOR_ID_ADVANTECH, 0x8312)
50886 @@ -8512,6 +8653,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
50887                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50888                                PCI_VENDOR_ID_HP, 0x1001)
50889         },
50890 +       {
50891 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50892 +                              PCI_VENDOR_ID_HP, 0x1002)
50893 +       },
50894         {
50895                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50896                                PCI_VENDOR_ID_HP, 0x1100)
50897 @@ -8520,6 +8665,22 @@ static const struct pci_device_id pqi_pci_id_table[] = {
50898                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50899                                PCI_VENDOR_ID_HP, 0x1101)
50900         },
50901 +       {
50902 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50903 +                              0x1590, 0x0294)
50904 +       },
50905 +       {
50906 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50907 +                              0x1590, 0x02db)
50908 +       },
50909 +       {
50910 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50911 +                              0x1590, 0x02dc)
50912 +       },
50913 +       {
50914 +               PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50915 +                              0x1590, 0x032e)
50916 +       },
50917         {
50918                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
50919                                0x1d8d, 0x0800)
50920 diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
50921 index 9e2e196bc202..97c6f81b1d2a 100644
50922 --- a/drivers/scsi/sni_53c710.c
50923 +++ b/drivers/scsi/sni_53c710.c
50924 @@ -58,6 +58,7 @@ static int snirm710_probe(struct platform_device *dev)
50925         struct NCR_700_Host_Parameters *hostdata;
50926         struct Scsi_Host *host;
50927         struct  resource *res;
50928 +       int rc;
50930         res = platform_get_resource(dev, IORESOURCE_MEM, 0);
50931         if (!res)
50932 @@ -83,7 +84,9 @@ static int snirm710_probe(struct platform_device *dev)
50933                 goto out_kfree;
50934         host->this_id = 7;
50935         host->base = base;
50936 -       host->irq = platform_get_irq(dev, 0);
50937 +       host->irq = rc = platform_get_irq(dev, 0);
50938 +       if (rc < 0)
50939 +               goto out_put_host;
50940         if(request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "snirm710", host)) {
50941                 printk(KERN_ERR "snirm710: request_irq failed!\n");
50942                 goto out_put_host;
50943 diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
50944 index 6dd0ff188bb4..aedf0b78f622 100644
50945 --- a/drivers/scsi/snic/snic_scsi.c
50946 +++ b/drivers/scsi/snic/snic_scsi.c
50947 @@ -2349,7 +2349,7 @@ snic_reset(struct Scsi_Host *shost, struct scsi_cmnd *sc)
50949         /* Wait for all the IOs that are entered in Qcmd */
50950         while (atomic_read(&snic->ios_inflight))
50951 -               schedule_timeout(msecs_to_jiffies(1));
50952 +               schedule_msec_hrtimeout((1));
50954         ret = snic_issue_hba_reset(snic, sc);
50955         if (ret) {
50956 diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
50957 index 7de82f2c9757..d3489ac7ab28 100644
50958 --- a/drivers/scsi/sun3x_esp.c
50959 +++ b/drivers/scsi/sun3x_esp.c
50960 @@ -206,7 +206,9 @@ static int esp_sun3x_probe(struct platform_device *dev)
50961         if (!esp->command_block)
50962                 goto fail_unmap_regs_dma;
50964 -       host->irq = platform_get_irq(dev, 0);
50965 +       host->irq = err = platform_get_irq(dev, 0);
50966 +       if (err < 0)
50967 +               goto fail_unmap_command_block;
50968         err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED,
50969                           "SUN3X ESP", esp);
50970         if (err < 0)
50971 diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
50972 index 1a69949a4ea1..b56d9b4e5f03 100644
50973 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c
50974 +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
50975 @@ -377,7 +377,7 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
50977         irq = platform_get_irq(pdev, 0);
50978         if (irq < 0) {
50979 -               err = -ENODEV;
50980 +               err = irq;
50981                 goto out;
50982         }
50984 diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
50985 index d3d05e997c13..0c71a159d08f 100644
50986 --- a/drivers/scsi/ufs/ufshcd.c
50987 +++ b/drivers/scsi/ufs/ufshcd.c
50988 @@ -8599,7 +8599,7 @@ static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
50989         } else if (!ufshcd_is_ufs_dev_active(hba)) {
50990                 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
50991                 vcc_off = true;
50992 -               if (!ufshcd_is_link_active(hba)) {
50993 +               if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) {
50994                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
50995                         ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
50996                 }
50997 @@ -8621,7 +8621,7 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
50998             !hba->dev_info.is_lu_power_on_wp) {
50999                 ret = ufshcd_setup_vreg(hba, true);
51000         } else if (!ufshcd_is_ufs_dev_active(hba)) {
51001 -               if (!ret && !ufshcd_is_link_active(hba)) {
51002 +               if (!ufshcd_is_link_active(hba)) {
51003                         ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
51004                         if (ret)
51005                                 goto vcc_disable;
51006 @@ -8978,10 +8978,13 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
51007         if (!hba->is_powered)
51008                 return 0;
51010 +       cancel_delayed_work_sync(&hba->rpm_dev_flush_recheck_work);
51012         if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
51013              hba->curr_dev_pwr_mode) &&
51014             (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
51015              hba->uic_link_state) &&
51016 +            pm_runtime_suspended(hba->dev) &&
51017              !hba->dev_info.b_rpm_dev_flush_capable)
51018                 goto out;
51020 diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c
51021 index 20acac6342ef..5828f94b8a7d 100644
51022 --- a/drivers/soc/aspeed/aspeed-lpc-snoop.c
51023 +++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c
51024 @@ -95,8 +95,10 @@ static ssize_t snoop_file_read(struct file *file, char __user *buffer,
51025                         return -EINTR;
51026         }
51027         ret = kfifo_to_user(&chan->fifo, buffer, count, &copied);
51028 +       if (ret)
51029 +               return ret;
51031 -       return ret ? ret : copied;
51032 +       return copied;
51035  static __poll_t snoop_file_poll(struct file *file,
51036 diff --git a/drivers/soc/mediatek/mt8173-pm-domains.h b/drivers/soc/mediatek/mt8173-pm-domains.h
51037 index 3e8ee5dabb43..654c717e5467 100644
51038 --- a/drivers/soc/mediatek/mt8173-pm-domains.h
51039 +++ b/drivers/soc/mediatek/mt8173-pm-domains.h
51040 @@ -12,24 +12,28 @@
51042  static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
51043         [MT8173_POWER_DOMAIN_VDEC] = {
51044 +               .name = "vdec",
51045                 .sta_mask = PWR_STATUS_VDEC,
51046                 .ctl_offs = SPM_VDE_PWR_CON,
51047                 .sram_pdn_bits = GENMASK(11, 8),
51048                 .sram_pdn_ack_bits = GENMASK(12, 12),
51049         },
51050         [MT8173_POWER_DOMAIN_VENC] = {
51051 +               .name = "venc",
51052                 .sta_mask = PWR_STATUS_VENC,
51053                 .ctl_offs = SPM_VEN_PWR_CON,
51054                 .sram_pdn_bits = GENMASK(11, 8),
51055                 .sram_pdn_ack_bits = GENMASK(15, 12),
51056         },
51057         [MT8173_POWER_DOMAIN_ISP] = {
51058 +               .name = "isp",
51059                 .sta_mask = PWR_STATUS_ISP,
51060                 .ctl_offs = SPM_ISP_PWR_CON,
51061                 .sram_pdn_bits = GENMASK(11, 8),
51062                 .sram_pdn_ack_bits = GENMASK(13, 12),
51063         },
51064         [MT8173_POWER_DOMAIN_MM] = {
51065 +               .name = "mm",
51066                 .sta_mask = PWR_STATUS_DISP,
51067                 .ctl_offs = SPM_DIS_PWR_CON,
51068                 .sram_pdn_bits = GENMASK(11, 8),
51069 @@ -40,18 +44,21 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
51070                 },
51071         },
51072         [MT8173_POWER_DOMAIN_VENC_LT] = {
51073 +               .name = "venc_lt",
51074                 .sta_mask = PWR_STATUS_VENC_LT,
51075                 .ctl_offs = SPM_VEN2_PWR_CON,
51076                 .sram_pdn_bits = GENMASK(11, 8),
51077                 .sram_pdn_ack_bits = GENMASK(15, 12),
51078         },
51079         [MT8173_POWER_DOMAIN_AUDIO] = {
51080 +               .name = "audio",
51081                 .sta_mask = PWR_STATUS_AUDIO,
51082                 .ctl_offs = SPM_AUDIO_PWR_CON,
51083                 .sram_pdn_bits = GENMASK(11, 8),
51084                 .sram_pdn_ack_bits = GENMASK(15, 12),
51085         },
51086         [MT8173_POWER_DOMAIN_USB] = {
51087 +               .name = "usb",
51088                 .sta_mask = PWR_STATUS_USB,
51089                 .ctl_offs = SPM_USB_PWR_CON,
51090                 .sram_pdn_bits = GENMASK(11, 8),
51091 @@ -59,18 +66,21 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
51092                 .caps = MTK_SCPD_ACTIVE_WAKEUP,
51093         },
51094         [MT8173_POWER_DOMAIN_MFG_ASYNC] = {
51095 +               .name = "mfg_async",
51096                 .sta_mask = PWR_STATUS_MFG_ASYNC,
51097                 .ctl_offs = SPM_MFG_ASYNC_PWR_CON,
51098                 .sram_pdn_bits = GENMASK(11, 8),
51099                 .sram_pdn_ack_bits = 0,
51100         },
51101         [MT8173_POWER_DOMAIN_MFG_2D] = {
51102 +               .name = "mfg_2d",
51103                 .sta_mask = PWR_STATUS_MFG_2D,
51104                 .ctl_offs = SPM_MFG_2D_PWR_CON,
51105                 .sram_pdn_bits = GENMASK(11, 8),
51106                 .sram_pdn_ack_bits = GENMASK(13, 12),
51107         },
51108         [MT8173_POWER_DOMAIN_MFG] = {
51109 +               .name = "mfg",
51110                 .sta_mask = PWR_STATUS_MFG,
51111                 .ctl_offs = SPM_MFG_PWR_CON,
51112                 .sram_pdn_bits = GENMASK(13, 8),
51113 diff --git a/drivers/soc/mediatek/mt8183-pm-domains.h b/drivers/soc/mediatek/mt8183-pm-domains.h
51114 index aa5230e6c12f..98a9940d05fb 100644
51115 --- a/drivers/soc/mediatek/mt8183-pm-domains.h
51116 +++ b/drivers/soc/mediatek/mt8183-pm-domains.h
51117 @@ -12,12 +12,14 @@
51119  static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
51120         [MT8183_POWER_DOMAIN_AUDIO] = {
51121 +               .name = "audio",
51122                 .sta_mask = PWR_STATUS_AUDIO,
51123                 .ctl_offs = 0x0314,
51124                 .sram_pdn_bits = GENMASK(11, 8),
51125                 .sram_pdn_ack_bits = GENMASK(15, 12),
51126         },
51127         [MT8183_POWER_DOMAIN_CONN] = {
51128 +               .name = "conn",
51129                 .sta_mask = PWR_STATUS_CONN,
51130                 .ctl_offs = 0x032c,
51131                 .sram_pdn_bits = 0,
51132 @@ -28,12 +30,14 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
51133                 },
51134         },
51135         [MT8183_POWER_DOMAIN_MFG_ASYNC] = {
51136 +               .name = "mfg_async",
51137                 .sta_mask = PWR_STATUS_MFG_ASYNC,
51138                 .ctl_offs = 0x0334,
51139                 .sram_pdn_bits = 0,
51140                 .sram_pdn_ack_bits = 0,
51141         },
51142         [MT8183_POWER_DOMAIN_MFG] = {
51143 +               .name = "mfg",
51144                 .sta_mask = PWR_STATUS_MFG,
51145                 .ctl_offs = 0x0338,
51146                 .sram_pdn_bits = GENMASK(8, 8),
51147 @@ -41,18 +45,21 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
51148                 .caps = MTK_SCPD_DOMAIN_SUPPLY,
51149         },
51150         [MT8183_POWER_DOMAIN_MFG_CORE0] = {
51151 +               .name = "mfg_core0",
51152                 .sta_mask = BIT(7),
51153                 .ctl_offs = 0x034c,
51154                 .sram_pdn_bits = GENMASK(8, 8),
51155                 .sram_pdn_ack_bits = GENMASK(12, 12),
51156         },
51157         [MT8183_POWER_DOMAIN_MFG_CORE1] = {
51158 +               .name = "mfg_core1",
51159                 .sta_mask = BIT(20),
51160                 .ctl_offs = 0x0310,
51161                 .sram_pdn_bits = GENMASK(8, 8),
51162                 .sram_pdn_ack_bits = GENMASK(12, 12),
51163         },
51164         [MT8183_POWER_DOMAIN_MFG_2D] = {
51165 +               .name = "mfg_2d",
51166                 .sta_mask = PWR_STATUS_MFG_2D,
51167                 .ctl_offs = 0x0348,
51168                 .sram_pdn_bits = GENMASK(8, 8),
51169 @@ -65,6 +72,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
51170                 },
51171         },
51172         [MT8183_POWER_DOMAIN_DISP] = {
51173 +               .name = "disp",
51174                 .sta_mask = PWR_STATUS_DISP,
51175                 .ctl_offs = 0x030c,
51176                 .sram_pdn_bits = GENMASK(8, 8),
51177 @@ -83,6 +91,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
51178                 },
51179         },
51180         [MT8183_POWER_DOMAIN_CAM] = {
51181 +               .name = "cam",
51182                 .sta_mask = BIT(25),
51183                 .ctl_offs = 0x0344,
51184                 .sram_pdn_bits = GENMASK(9, 8),
51185 @@ -105,6 +114,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
51186                 },
51187         },
51188         [MT8183_POWER_DOMAIN_ISP] = {
51189 +               .name = "isp",
51190                 .sta_mask = PWR_STATUS_ISP,
51191                 .ctl_offs = 0x0308,
51192                 .sram_pdn_bits = GENMASK(9, 8),
51193 @@ -127,6 +137,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
51194                 },
51195         },
51196         [MT8183_POWER_DOMAIN_VDEC] = {
51197 +               .name = "vdec",
51198                 .sta_mask = BIT(31),
51199                 .ctl_offs = 0x0300,
51200                 .sram_pdn_bits = GENMASK(8, 8),
51201 @@ -139,6 +150,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
51202                 },
51203         },
51204         [MT8183_POWER_DOMAIN_VENC] = {
51205 +               .name = "venc",
51206                 .sta_mask = PWR_STATUS_VENC,
51207                 .ctl_offs = 0x0304,
51208                 .sram_pdn_bits = GENMASK(11, 8),
51209 @@ -151,6 +163,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
51210                 },
51211         },
51212         [MT8183_POWER_DOMAIN_VPU_TOP] = {
51213 +               .name = "vpu_top",
51214                 .sta_mask = BIT(26),
51215                 .ctl_offs = 0x0324,
51216                 .sram_pdn_bits = GENMASK(8, 8),
51217 @@ -177,6 +190,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
51218                 },
51219         },
51220         [MT8183_POWER_DOMAIN_VPU_CORE0] = {
51221 +               .name = "vpu_core0",
51222                 .sta_mask = BIT(27),
51223                 .ctl_offs = 0x33c,
51224                 .sram_pdn_bits = GENMASK(11, 8),
51225 @@ -194,6 +208,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
51226                 .caps = MTK_SCPD_SRAM_ISO,
51227         },
51228         [MT8183_POWER_DOMAIN_VPU_CORE1] = {
51229 +               .name = "vpu_core1",
51230                 .sta_mask = BIT(28),
51231                 .ctl_offs = 0x0340,
51232                 .sram_pdn_bits = GENMASK(11, 8),
51233 diff --git a/drivers/soc/mediatek/mt8192-pm-domains.h b/drivers/soc/mediatek/mt8192-pm-domains.h
51234 index 0fdf6dc6231f..543dda70de01 100644
51235 --- a/drivers/soc/mediatek/mt8192-pm-domains.h
51236 +++ b/drivers/soc/mediatek/mt8192-pm-domains.h
51237 @@ -12,6 +12,7 @@
51239  static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
51240         [MT8192_POWER_DOMAIN_AUDIO] = {
51241 +               .name = "audio",
51242                 .sta_mask = BIT(21),
51243                 .ctl_offs = 0x0354,
51244                 .sram_pdn_bits = GENMASK(8, 8),
51245 @@ -24,6 +25,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
51246                 },
51247         },
51248         [MT8192_POWER_DOMAIN_CONN] = {
51249 +               .name = "conn",
51250                 .sta_mask = PWR_STATUS_CONN,
51251                 .ctl_offs = 0x0304,
51252                 .sram_pdn_bits = 0,
51253 @@ -45,12 +47,14 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
51254                 .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
51255         },
51256         [MT8192_POWER_DOMAIN_MFG0] = {
51257 +               .name = "mfg0",
51258                 .sta_mask = BIT(2),
51259                 .ctl_offs = 0x0308,
51260                 .sram_pdn_bits = GENMASK(8, 8),
51261                 .sram_pdn_ack_bits = GENMASK(12, 12),
51262         },
51263         [MT8192_POWER_DOMAIN_MFG1] = {
51264 +               .name = "mfg1",
51265                 .sta_mask = BIT(3),
51266                 .ctl_offs = 0x030c,
51267                 .sram_pdn_bits = GENMASK(8, 8),
51268 @@ -75,36 +79,42 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
51269                 },
51270         },
51271         [MT8192_POWER_DOMAIN_MFG2] = {
51272 +               .name = "mfg2",
51273                 .sta_mask = BIT(4),
51274                 .ctl_offs = 0x0310,
51275                 .sram_pdn_bits = GENMASK(8, 8),
51276                 .sram_pdn_ack_bits = GENMASK(12, 12),
51277         },
51278         [MT8192_POWER_DOMAIN_MFG3] = {
51279 +               .name = "mfg3",
51280                 .sta_mask = BIT(5),
51281                 .ctl_offs = 0x0314,
51282                 .sram_pdn_bits = GENMASK(8, 8),
51283                 .sram_pdn_ack_bits = GENMASK(12, 12),
51284         },
51285         [MT8192_POWER_DOMAIN_MFG4] = {
51286 +               .name = "mfg4",
51287                 .sta_mask = BIT(6),
51288                 .ctl_offs = 0x0318,
51289                 .sram_pdn_bits = GENMASK(8, 8),
51290                 .sram_pdn_ack_bits = GENMASK(12, 12),
51291         },
51292         [MT8192_POWER_DOMAIN_MFG5] = {
51293 +               .name = "mfg5",
51294                 .sta_mask = BIT(7),
51295                 .ctl_offs = 0x031c,
51296                 .sram_pdn_bits = GENMASK(8, 8),
51297                 .sram_pdn_ack_bits = GENMASK(12, 12),
51298         },
51299         [MT8192_POWER_DOMAIN_MFG6] = {
51300 +               .name = "mfg6",
51301                 .sta_mask = BIT(8),
51302                 .ctl_offs = 0x0320,
51303                 .sram_pdn_bits = GENMASK(8, 8),
51304                 .sram_pdn_ack_bits = GENMASK(12, 12),
51305         },
51306         [MT8192_POWER_DOMAIN_DISP] = {
51307 +               .name = "disp",
51308                 .sta_mask = BIT(20),
51309                 .ctl_offs = 0x0350,
51310                 .sram_pdn_bits = GENMASK(8, 8),
51311 @@ -133,6 +143,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
51312                 },
51313         },
51314         [MT8192_POWER_DOMAIN_IPE] = {
51315 +               .name = "ipe",
51316                 .sta_mask = BIT(14),
51317                 .ctl_offs = 0x0338,
51318                 .sram_pdn_bits = GENMASK(8, 8),
51319 @@ -149,6 +160,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
51320                 },
51321         },
51322         [MT8192_POWER_DOMAIN_ISP] = {
51323 +               .name = "isp",
51324                 .sta_mask = BIT(12),
51325                 .ctl_offs = 0x0330,
51326                 .sram_pdn_bits = GENMASK(8, 8),
51327 @@ -165,6 +177,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
51328                 },
51329         },
51330         [MT8192_POWER_DOMAIN_ISP2] = {
51331 +               .name = "isp2",
51332                 .sta_mask = BIT(13),
51333                 .ctl_offs = 0x0334,
51334                 .sram_pdn_bits = GENMASK(8, 8),
51335 @@ -181,6 +194,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
51336                 },
51337         },
51338         [MT8192_POWER_DOMAIN_MDP] = {
51339 +               .name = "mdp",
51340                 .sta_mask = BIT(19),
51341                 .ctl_offs = 0x034c,
51342                 .sram_pdn_bits = GENMASK(8, 8),
51343 @@ -197,6 +211,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
51344                 },
51345         },
51346         [MT8192_POWER_DOMAIN_VENC] = {
51347 +               .name = "venc",
51348                 .sta_mask = BIT(17),
51349                 .ctl_offs = 0x0344,
51350                 .sram_pdn_bits = GENMASK(8, 8),
51351 @@ -213,6 +228,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
51352                 },
51353         },
51354         [MT8192_POWER_DOMAIN_VDEC] = {
51355 +               .name = "vdec",
51356                 .sta_mask = BIT(15),
51357                 .ctl_offs = 0x033c,
51358                 .sram_pdn_bits = GENMASK(8, 8),
51359 @@ -229,12 +245,14 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
51360                 },
51361         },
51362         [MT8192_POWER_DOMAIN_VDEC2] = {
51363 +               .name = "vdec2",
51364                 .sta_mask = BIT(16),
51365                 .ctl_offs = 0x0340,
51366                 .sram_pdn_bits = GENMASK(8, 8),
51367                 .sram_pdn_ack_bits = GENMASK(12, 12),
51368         },
51369         [MT8192_POWER_DOMAIN_CAM] = {
51370 +               .name = "cam",
51371                 .sta_mask = BIT(23),
51372                 .ctl_offs = 0x035c,
51373                 .sram_pdn_bits = GENMASK(8, 8),
51374 @@ -263,18 +281,21 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
51375                 },
51376         },
51377         [MT8192_POWER_DOMAIN_CAM_RAWA] = {
51378 +               .name = "cam_rawa",
51379                 .sta_mask = BIT(24),
51380                 .ctl_offs = 0x0360,
51381                 .sram_pdn_bits = GENMASK(8, 8),
51382                 .sram_pdn_ack_bits = GENMASK(12, 12),
51383         },
51384         [MT8192_POWER_DOMAIN_CAM_RAWB] = {
51385 +               .name = "cam_rawb",
51386                 .sta_mask = BIT(25),
51387                 .ctl_offs = 0x0364,
51388                 .sram_pdn_bits = GENMASK(8, 8),
51389                 .sram_pdn_ack_bits = GENMASK(12, 12),
51390         },
51391         [MT8192_POWER_DOMAIN_CAM_RAWC] = {
51392 +               .name = "cam_rawc",
51393                 .sta_mask = BIT(26),
51394                 .ctl_offs = 0x0368,
51395                 .sram_pdn_bits = GENMASK(8, 8),
51396 diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c
51397 index b7f697666bdd..0af00efa0ef8 100644
51398 --- a/drivers/soc/mediatek/mtk-pm-domains.c
51399 +++ b/drivers/soc/mediatek/mtk-pm-domains.c
51400 @@ -438,7 +438,11 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no
51401                 goto err_unprepare_subsys_clocks;
51402         }
51404 -       pd->genpd.name = node->name;
51405 +       if (!pd->data->name)
51406 +               pd->genpd.name = node->name;
51407 +       else
51408 +               pd->genpd.name = pd->data->name;
51410         pd->genpd.power_off = scpsys_power_off;
51411         pd->genpd.power_on = scpsys_power_on;
51413 @@ -487,8 +491,9 @@ static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *paren
51415                 child_pd = scpsys_add_one_domain(scpsys, child);
51416                 if (IS_ERR(child_pd)) {
51417 -                       dev_err_probe(scpsys->dev, PTR_ERR(child_pd),
51418 -                                     "%pOF: failed to get child domain id\n", child);
51419 +                       ret = PTR_ERR(child_pd);
51420 +                       dev_err_probe(scpsys->dev, ret, "%pOF: failed to get child domain id\n",
51421 +                                     child);
51422                         goto err_put_node;
51423                 }
51425 diff --git a/drivers/soc/mediatek/mtk-pm-domains.h b/drivers/soc/mediatek/mtk-pm-domains.h
51426 index 141dc76054e6..21a4e113bbec 100644
51427 --- a/drivers/soc/mediatek/mtk-pm-domains.h
51428 +++ b/drivers/soc/mediatek/mtk-pm-domains.h
51429 @@ -76,6 +76,7 @@ struct scpsys_bus_prot_data {
51431  /**
51432   * struct scpsys_domain_data - scp domain data for power on/off flow
51433 + * @name: The name of the power domain.
51434   * @sta_mask: The mask for power on/off status bit.
51435   * @ctl_offs: The offset for main power control register.
51436   * @sram_pdn_bits: The mask for sram power control bits.
51437 @@ -85,6 +86,7 @@ struct scpsys_bus_prot_data {
51438   * @bp_smi: bus protection for smi subsystem
51439   */
51440  struct scpsys_domain_data {
51441 +       const char *name;
51442         u32 sta_mask;
51443         int ctl_offs;
51444         u32 sram_pdn_bits;
51445 diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
51446 index 24cd193dec55..eba7f76f9d61 100644
51447 --- a/drivers/soc/qcom/mdt_loader.c
51448 +++ b/drivers/soc/qcom/mdt_loader.c
51449 @@ -230,6 +230,14 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
51450                         break;
51451                 }
51453 +               if (phdr->p_filesz > phdr->p_memsz) {
51454 +                       dev_err(dev,
51455 +                               "refusing to load segment %d with p_filesz > p_memsz\n",
51456 +                               i);
51457 +                       ret = -EINVAL;
51458 +                       break;
51459 +               }
51461                 ptr = mem_region + offset;
51463                 if (phdr->p_filesz && phdr->p_offset < fw->size) {
51464 @@ -253,6 +261,15 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
51465                                 break;
51466                         }
51468 +                       if (seg_fw->size != phdr->p_filesz) {
51469 +                               dev_err(dev,
51470 +                                       "failed to load segment %d from truncated file %s\n",
51471 +                                       i, fw_name);
51472 +                               release_firmware(seg_fw);
51473 +                               ret = -EINVAL;
51474 +                               break;
51475 +                       }
51477                         release_firmware(seg_fw);
51478                 }
51480 diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
51481 index 209dcdca923f..915d5bc3d46e 100644
51482 --- a/drivers/soc/qcom/pdr_interface.c
51483 +++ b/drivers/soc/qcom/pdr_interface.c
51484 @@ -153,7 +153,7 @@ static int pdr_register_listener(struct pdr_handle *pdr,
51485         if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
51486                 pr_err("PDR: %s register listener failed: 0x%x\n",
51487                        pds->service_path, resp.resp.error);
51488 -               return ret;
51489 +               return -EREMOTEIO;
51490         }
51492         pds->state = resp.curr_state;
51493 diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
51494 index df9a5ca8c99c..0118bd986f90 100644
51495 --- a/drivers/soc/tegra/pmc.c
51496 +++ b/drivers/soc/tegra/pmc.c
51497 @@ -317,6 +317,8 @@ struct tegra_pmc_soc {
51498                                    bool invert);
51499         int (*irq_set_wake)(struct irq_data *data, unsigned int on);
51500         int (*irq_set_type)(struct irq_data *data, unsigned int type);
51501 +       int (*powergate_set)(struct tegra_pmc *pmc, unsigned int id,
51502 +                            bool new_state);
51504         const char * const *reset_sources;
51505         unsigned int num_reset_sources;
51506 @@ -517,6 +519,63 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
51507         return -ENODEV;
51510 +static int tegra20_powergate_set(struct tegra_pmc *pmc, unsigned int id,
51511 +                                bool new_state)
51513 +       unsigned int retries = 100;
51514 +       bool status;
51515 +       int ret;
51517 +       /*
51518 +        * As per TRM documentation, the toggle command will be dropped by PMC
51519 +        * if there is contention with a HW-initiated toggling (i.e. CPU core
51520 +        * power-gated), the command should be retried in that case.
51521 +        */
51522 +       do {
51523 +               tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
51525 +               /* wait for PMC to execute the command */
51526 +               ret = readx_poll_timeout(tegra_powergate_state, id, status,
51527 +                                        status == new_state, 1, 10);
51528 +       } while (ret == -ETIMEDOUT && retries--);
51530 +       return ret;
51533 +static inline bool tegra_powergate_toggle_ready(struct tegra_pmc *pmc)
51535 +       return !(tegra_pmc_readl(pmc, PWRGATE_TOGGLE) & PWRGATE_TOGGLE_START);
51538 +static int tegra114_powergate_set(struct tegra_pmc *pmc, unsigned int id,
51539 +                                 bool new_state)
51541 +       bool status;
51542 +       int err;
51544 +       /* wait while PMC power gating is contended */
51545 +       err = readx_poll_timeout(tegra_powergate_toggle_ready, pmc, status,
51546 +                                status == true, 1, 100);
51547 +       if (err)
51548 +               return err;
51550 +       tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
51552 +       /* wait for PMC to accept the command */
51553 +       err = readx_poll_timeout(tegra_powergate_toggle_ready, pmc, status,
51554 +                                status == true, 1, 100);
51555 +       if (err)
51556 +               return err;
51558 +       /* wait for PMC to execute the command */
51559 +       err = readx_poll_timeout(tegra_powergate_state, id, status,
51560 +                                status == new_state, 10, 100000);
51561 +       if (err)
51562 +               return err;
51564 +       return 0;
51567  /**
51568   * tegra_powergate_set() - set the state of a partition
51569   * @pmc: power management controller
51570 @@ -526,7 +585,6 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
51571  static int tegra_powergate_set(struct tegra_pmc *pmc, unsigned int id,
51572                                bool new_state)
51574 -       bool status;
51575         int err;
51577         if (id == TEGRA_POWERGATE_3D && pmc->soc->has_gpu_clamps)
51578 @@ -539,10 +597,7 @@ static int tegra_powergate_set(struct tegra_pmc *pmc, unsigned int id,
51579                 return 0;
51580         }
51582 -       tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
51584 -       err = readx_poll_timeout(tegra_powergate_state, id, status,
51585 -                                status == new_state, 10, 100000);
51586 +       err = pmc->soc->powergate_set(pmc, id, new_state);
51588         mutex_unlock(&pmc->powergates_lock);
51590 @@ -2699,6 +2754,7 @@ static const struct tegra_pmc_soc tegra20_pmc_soc = {
51591         .regs = &tegra20_pmc_regs,
51592         .init = tegra20_pmc_init,
51593         .setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
51594 +       .powergate_set = tegra20_powergate_set,
51595         .reset_sources = NULL,
51596         .num_reset_sources = 0,
51597         .reset_levels = NULL,
51598 @@ -2757,6 +2813,7 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = {
51599         .regs = &tegra20_pmc_regs,
51600         .init = tegra20_pmc_init,
51601         .setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
51602 +       .powergate_set = tegra20_powergate_set,
51603         .reset_sources = tegra30_reset_sources,
51604         .num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
51605         .reset_levels = NULL,
51606 @@ -2811,6 +2868,7 @@ static const struct tegra_pmc_soc tegra114_pmc_soc = {
51607         .regs = &tegra20_pmc_regs,
51608         .init = tegra20_pmc_init,
51609         .setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
51610 +       .powergate_set = tegra114_powergate_set,
51611         .reset_sources = tegra30_reset_sources,
51612         .num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
51613         .reset_levels = NULL,
51614 @@ -2925,6 +2983,7 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = {
51615         .regs = &tegra20_pmc_regs,
51616         .init = tegra20_pmc_init,
51617         .setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
51618 +       .powergate_set = tegra114_powergate_set,
51619         .reset_sources = tegra30_reset_sources,
51620         .num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
51621         .reset_levels = NULL,
51622 @@ -3048,6 +3107,7 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = {
51623         .regs = &tegra20_pmc_regs,
51624         .init = tegra20_pmc_init,
51625         .setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
51626 +       .powergate_set = tegra114_powergate_set,
51627         .irq_set_wake = tegra210_pmc_irq_set_wake,
51628         .irq_set_type = tegra210_pmc_irq_set_type,
51629         .reset_sources = tegra210_reset_sources,
51630 diff --git a/drivers/soc/tegra/regulators-tegra30.c b/drivers/soc/tegra/regulators-tegra30.c
51631 index 7f21f31de09d..0e776b20f625 100644
51632 --- a/drivers/soc/tegra/regulators-tegra30.c
51633 +++ b/drivers/soc/tegra/regulators-tegra30.c
51634 @@ -178,7 +178,7 @@ static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra,
51635          * survive the voltage drop if it's running on a higher frequency.
51636          */
51637         if (!cpu_min_uV_consumers)
51638 -               cpu_min_uV = cpu_uV;
51639 +               cpu_min_uV = max(cpu_uV, cpu_min_uV);
51641         /*
51642          * Bootloader shall set up voltages correctly, but if it
51643 diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
51644 index 46885429928a..4ec29338ce9a 100644
51645 --- a/drivers/soundwire/bus.c
51646 +++ b/drivers/soundwire/bus.c
51647 @@ -705,7 +705,7 @@ static int sdw_program_device_num(struct sdw_bus *bus)
51648         struct sdw_slave *slave, *_s;
51649         struct sdw_slave_id id;
51650         struct sdw_msg msg;
51651 -       bool found = false;
51652 +       bool found;
51653         int count = 0, ret;
51654         u64 addr;
51656 @@ -737,6 +737,7 @@ static int sdw_program_device_num(struct sdw_bus *bus)
51658                 sdw_extract_slave_id(bus, addr, &id);
51660 +               found = false;
51661                 /* Now compare with entries */
51662                 list_for_each_entry_safe(slave, _s, &bus->slaves, node) {
51663                         if (sdw_compare_devid(slave, id) == 0) {
51664 diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
51665 index d05442e646a3..57c59a33ce61 100644
51666 --- a/drivers/soundwire/cadence_master.c
51667 +++ b/drivers/soundwire/cadence_master.c
51668 @@ -1450,10 +1450,12 @@ int sdw_cdns_clock_stop(struct sdw_cdns *cdns, bool block_wake)
51669         }
51671         /* Prepare slaves for clock stop */
51672 -       ret = sdw_bus_prep_clk_stop(&cdns->bus);
51673 -       if (ret < 0) {
51674 -               dev_err(cdns->dev, "prepare clock stop failed %d", ret);
51675 -               return ret;
51676 +       if (slave_present) {
51677 +               ret = sdw_bus_prep_clk_stop(&cdns->bus);
51678 +               if (ret < 0 && ret != -ENODATA) {
51679 +                       dev_err(cdns->dev, "prepare clock stop failed %d\n", ret);
51680 +                       return ret;
51681 +               }
51682         }
51684         /*
51685 diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
51686 index 1099b5d1262b..a418c3c7001c 100644
51687 --- a/drivers/soundwire/stream.c
51688 +++ b/drivers/soundwire/stream.c
51689 @@ -1375,8 +1375,16 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
51690         }
51692         ret = sdw_config_stream(&slave->dev, stream, stream_config, true);
51693 -       if (ret)
51694 +       if (ret) {
51695 +               /*
51696 +                * sdw_release_master_stream will release s_rt in slave_rt_list in
51697 +                * stream_error case, but s_rt is only added to slave_rt_list
51698 +                * when sdw_config_stream is successful, so free s_rt explicitly
51699 +                * when sdw_config_stream is failed.
51700 +                */
51701 +               kfree(s_rt);
51702                 goto stream_error;
51703 +       }
51705         list_add_tail(&s_rt->m_rt_node, &m_rt->slave_rt_list);
51707 diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
51708 index eb9a243e9526..98ace748cd98 100644
51709 --- a/drivers/spi/spi-ath79.c
51710 +++ b/drivers/spi/spi-ath79.c
51711 @@ -156,8 +156,7 @@ static int ath79_spi_probe(struct platform_device *pdev)
51713         master->use_gpio_descriptors = true;
51714         master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
51715 -       master->setup = spi_bitbang_setup;
51716 -       master->cleanup = spi_bitbang_cleanup;
51717 +       master->flags = SPI_MASTER_GPIO_SS;
51718         if (pdata) {
51719                 master->bus_num = pdata->bus_num;
51720                 master->num_chipselect = pdata->num_chipselect;
51721 diff --git a/drivers/spi/spi-dln2.c b/drivers/spi/spi-dln2.c
51722 index 75b33d7d14b0..9a4d942fafcf 100644
51723 --- a/drivers/spi/spi-dln2.c
51724 +++ b/drivers/spi/spi-dln2.c
51725 @@ -780,7 +780,7 @@ static int dln2_spi_probe(struct platform_device *pdev)
51727  static int dln2_spi_remove(struct platform_device *pdev)
51729 -       struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
51730 +       struct spi_master *master = platform_get_drvdata(pdev);
51731         struct dln2_spi *dln2 = spi_master_get_devdata(master);
51733         pm_runtime_disable(&pdev->dev);
51734 diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
51735 index a2886ee44e4c..5d98611dd999 100644
51736 --- a/drivers/spi/spi-fsl-lpspi.c
51737 +++ b/drivers/spi/spi-fsl-lpspi.c
51738 @@ -200,7 +200,7 @@ static int lpspi_prepare_xfer_hardware(struct spi_controller *controller)
51739                                 spi_controller_get_devdata(controller);
51740         int ret;
51742 -       ret = pm_runtime_get_sync(fsl_lpspi->dev);
51743 +       ret = pm_runtime_resume_and_get(fsl_lpspi->dev);
51744         if (ret < 0) {
51745                 dev_err(fsl_lpspi->dev, "failed to enable clock\n");
51746                 return ret;
51747 diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
51748 index e4a8d203f940..d0e5aa18b7ba 100644
51749 --- a/drivers/spi/spi-fsl-spi.c
51750 +++ b/drivers/spi/spi-fsl-spi.c
51751 @@ -707,6 +707,11 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
51752         struct resource mem;
51753         int irq, type;
51754         int ret;
51755 +       bool spisel_boot = false;
51756 +#if IS_ENABLED(CONFIG_FSL_SOC)
51757 +       struct mpc8xxx_spi_probe_info *pinfo = NULL;
51758 +#endif
51761         ret = of_mpc8xxx_spi_probe(ofdev);
51762         if (ret)
51763 @@ -715,9 +720,8 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
51764         type = fsl_spi_get_type(&ofdev->dev);
51765         if (type == TYPE_FSL) {
51766                 struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
51767 -               bool spisel_boot = false;
51768  #if IS_ENABLED(CONFIG_FSL_SOC)
51769 -               struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
51770 +               pinfo = to_of_pinfo(pdata);
51772                 spisel_boot = of_property_read_bool(np, "fsl,spisel_boot");
51773                 if (spisel_boot) {
51774 @@ -746,15 +750,24 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
51776         ret = of_address_to_resource(np, 0, &mem);
51777         if (ret)
51778 -               return ret;
51779 +               goto unmap_out;
51781         irq = platform_get_irq(ofdev, 0);
51782 -       if (irq < 0)
51783 -               return irq;
51784 +       if (irq < 0) {
51785 +               ret = irq;
51786 +               goto unmap_out;
51787 +       }
51789         master = fsl_spi_probe(dev, &mem, irq);
51791         return PTR_ERR_OR_ZERO(master);
51793 +unmap_out:
51794 +#if IS_ENABLED(CONFIG_FSL_SOC)
51795 +       if (spisel_boot)
51796 +               iounmap(pinfo->immr_spi_cs);
51797 +#endif
51798 +       return ret;
51801  static int of_fsl_spi_remove(struct platform_device *ofdev)
51802 diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
51803 index 36a4922a134a..ccd817ee4917 100644
51804 --- a/drivers/spi/spi-omap-100k.c
51805 +++ b/drivers/spi/spi-omap-100k.c
51806 @@ -424,7 +424,7 @@ static int omap1_spi100k_probe(struct platform_device *pdev)
51808  static int omap1_spi100k_remove(struct platform_device *pdev)
51810 -       struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
51811 +       struct spi_master *master = platform_get_drvdata(pdev);
51812         struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
51814         pm_runtime_disable(&pdev->dev);
51815 @@ -438,7 +438,7 @@ static int omap1_spi100k_remove(struct platform_device *pdev)
51816  #ifdef CONFIG_PM
51817  static int omap1_spi100k_runtime_suspend(struct device *dev)
51819 -       struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
51820 +       struct spi_master *master = dev_get_drvdata(dev);
51821         struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
51823         clk_disable_unprepare(spi100k->ick);
51824 @@ -449,7 +449,7 @@ static int omap1_spi100k_runtime_suspend(struct device *dev)
51826  static int omap1_spi100k_runtime_resume(struct device *dev)
51828 -       struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
51829 +       struct spi_master *master = dev_get_drvdata(dev);
51830         struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
51831         int ret;
51833 diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
51834 index 8dcb2e70735c..d39dec6d1c91 100644
51835 --- a/drivers/spi/spi-qup.c
51836 +++ b/drivers/spi/spi-qup.c
51837 @@ -1263,7 +1263,7 @@ static int spi_qup_remove(struct platform_device *pdev)
51838         struct spi_qup *controller = spi_master_get_devdata(master);
51839         int ret;
51841 -       ret = pm_runtime_get_sync(&pdev->dev);
51842 +       ret = pm_runtime_resume_and_get(&pdev->dev);
51843         if (ret < 0)
51844                 return ret;
51846 diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
51847 index 936ef54e0903..0d75080da648 100644
51848 --- a/drivers/spi/spi-rockchip.c
51849 +++ b/drivers/spi/spi-rockchip.c
51850 @@ -476,7 +476,7 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
51851         return 1;
51854 -static void rockchip_spi_config(struct rockchip_spi *rs,
51855 +static int rockchip_spi_config(struct rockchip_spi *rs,
51856                 struct spi_device *spi, struct spi_transfer *xfer,
51857                 bool use_dma, bool slave_mode)
51859 @@ -521,7 +521,9 @@ static void rockchip_spi_config(struct rockchip_spi *rs,
51860                  * ctlr->bits_per_word_mask, so this shouldn't
51861                  * happen
51862                  */
51863 -               unreachable();
51864 +               dev_err(rs->dev, "unknown bits per word: %d\n",
51865 +                       xfer->bits_per_word);
51866 +               return -EINVAL;
51867         }
51869         if (use_dma) {
51870 @@ -554,6 +556,8 @@ static void rockchip_spi_config(struct rockchip_spi *rs,
51871          */
51872         writel_relaxed(2 * DIV_ROUND_UP(rs->freq, 2 * xfer->speed_hz),
51873                         rs->regs + ROCKCHIP_SPI_BAUDR);
51875 +       return 0;
51878  static size_t rockchip_spi_max_transfer_size(struct spi_device *spi)
51879 @@ -577,6 +581,7 @@ static int rockchip_spi_transfer_one(
51880                 struct spi_transfer *xfer)
51882         struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
51883 +       int ret;
51884         bool use_dma;
51886         WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
51887 @@ -596,7 +601,9 @@ static int rockchip_spi_transfer_one(
51889         use_dma = ctlr->can_dma ? ctlr->can_dma(ctlr, spi, xfer) : false;
51891 -       rockchip_spi_config(rs, spi, xfer, use_dma, ctlr->slave);
51892 +       ret = rockchip_spi_config(rs, spi, xfer, use_dma, ctlr->slave);
51893 +       if (ret)
51894 +               return ret;
51896         if (use_dma)
51897                 return rockchip_spi_prepare_dma(rs, ctlr, xfer);
51898 diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
51899 index 947e6b9dc9f4..2786470a5201 100644
51900 --- a/drivers/spi/spi-stm32-qspi.c
51901 +++ b/drivers/spi/spi-stm32-qspi.c
51902 @@ -727,21 +727,31 @@ static int __maybe_unused stm32_qspi_suspend(struct device *dev)
51904         pinctrl_pm_select_sleep_state(dev);
51906 -       return 0;
51907 +       return pm_runtime_force_suspend(dev);
51910  static int __maybe_unused stm32_qspi_resume(struct device *dev)
51912         struct stm32_qspi *qspi = dev_get_drvdata(dev);
51913 +       int ret;
51915 +       ret = pm_runtime_force_resume(dev);
51916 +       if (ret < 0)
51917 +               return ret;
51919         pinctrl_pm_select_default_state(dev);
51920 -       clk_prepare_enable(qspi->clk);
51922 +       ret = pm_runtime_get_sync(dev);
51923 +       if (ret < 0) {
51924 +               pm_runtime_put_noidle(dev);
51925 +               return ret;
51926 +       }
51928         writel_relaxed(qspi->cr_reg, qspi->io_base + QSPI_CR);
51929         writel_relaxed(qspi->dcr_reg, qspi->io_base + QSPI_DCR);
51931 -       pm_runtime_mark_last_busy(qspi->dev);
51932 -       pm_runtime_put_autosuspend(qspi->dev);
51933 +       pm_runtime_mark_last_busy(dev);
51934 +       pm_runtime_put_autosuspend(dev);
51936         return 0;
51938 diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
51939 index 25c076461011..7f0244a246e9 100644
51940 --- a/drivers/spi/spi-stm32.c
51941 +++ b/drivers/spi/spi-stm32.c
51942 @@ -1803,7 +1803,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
51943         struct reset_control *rst;
51944         int ret;
51946 -       master = spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
51947 +       master = devm_spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
51948         if (!master) {
51949                 dev_err(&pdev->dev, "spi master allocation failed\n");
51950                 return -ENOMEM;
51951 @@ -1821,18 +1821,16 @@ static int stm32_spi_probe(struct platform_device *pdev)
51953         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
51954         spi->base = devm_ioremap_resource(&pdev->dev, res);
51955 -       if (IS_ERR(spi->base)) {
51956 -               ret = PTR_ERR(spi->base);
51957 -               goto err_master_put;
51958 -       }
51959 +       if (IS_ERR(spi->base))
51960 +               return PTR_ERR(spi->base);
51962         spi->phys_addr = (dma_addr_t)res->start;
51964         spi->irq = platform_get_irq(pdev, 0);
51965 -       if (spi->irq <= 0) {
51966 -               ret = dev_err_probe(&pdev->dev, spi->irq, "failed to get irq\n");
51967 -               goto err_master_put;
51968 -       }
51969 +       if (spi->irq <= 0)
51970 +               return dev_err_probe(&pdev->dev, spi->irq,
51971 +                                    "failed to get irq\n");
51973         ret = devm_request_threaded_irq(&pdev->dev, spi->irq,
51974                                         spi->cfg->irq_handler_event,
51975                                         spi->cfg->irq_handler_thread,
51976 @@ -1840,20 +1838,20 @@ static int stm32_spi_probe(struct platform_device *pdev)
51977         if (ret) {
51978                 dev_err(&pdev->dev, "irq%d request failed: %d\n", spi->irq,
51979                         ret);
51980 -               goto err_master_put;
51981 +               return ret;
51982         }
51984         spi->clk = devm_clk_get(&pdev->dev, NULL);
51985         if (IS_ERR(spi->clk)) {
51986                 ret = PTR_ERR(spi->clk);
51987                 dev_err(&pdev->dev, "clk get failed: %d\n", ret);
51988 -               goto err_master_put;
51989 +               return ret;
51990         }
51992         ret = clk_prepare_enable(spi->clk);
51993         if (ret) {
51994                 dev_err(&pdev->dev, "clk enable failed: %d\n", ret);
51995 -               goto err_master_put;
51996 +               return ret;
51997         }
51998         spi->clk_rate = clk_get_rate(spi->clk);
51999         if (!spi->clk_rate) {
52000 @@ -1929,7 +1927,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
52001         pm_runtime_set_active(&pdev->dev);
52002         pm_runtime_enable(&pdev->dev);
52004 -       ret = devm_spi_register_master(&pdev->dev, master);
52005 +       ret = spi_register_master(master);
52006         if (ret) {
52007                 dev_err(&pdev->dev, "spi master registration failed: %d\n",
52008                         ret);
52009 @@ -1949,8 +1947,6 @@ static int stm32_spi_probe(struct platform_device *pdev)
52010                 dma_release_channel(spi->dma_rx);
52011  err_clk_disable:
52012         clk_disable_unprepare(spi->clk);
52013 -err_master_put:
52014 -       spi_master_put(master);
52016         return ret;
52018 @@ -1960,6 +1956,7 @@ static int stm32_spi_remove(struct platform_device *pdev)
52019         struct spi_master *master = platform_get_drvdata(pdev);
52020         struct stm32_spi *spi = spi_master_get_devdata(master);
52022 +       spi_unregister_master(master);
52023         spi->cfg->disable(spi);
52025         if (master->dma_tx)
52026 diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
52027 index 9417385c0921..e06aafe169e0 100644
52028 --- a/drivers/spi/spi-ti-qspi.c
52029 +++ b/drivers/spi/spi-ti-qspi.c
52030 @@ -733,6 +733,17 @@ static int ti_qspi_runtime_resume(struct device *dev)
52031         return 0;
52034 +static void ti_qspi_dma_cleanup(struct ti_qspi *qspi)
52036 +       if (qspi->rx_bb_addr)
52037 +               dma_free_coherent(qspi->dev, QSPI_DMA_BUFFER_SIZE,
52038 +                                 qspi->rx_bb_addr,
52039 +                                 qspi->rx_bb_dma_addr);
52041 +       if (qspi->rx_chan)
52042 +               dma_release_channel(qspi->rx_chan);
52045  static const struct of_device_id ti_qspi_match[] = {
52046         {.compatible = "ti,dra7xxx-qspi" },
52047         {.compatible = "ti,am4372-qspi" },
52048 @@ -886,6 +897,8 @@ static int ti_qspi_probe(struct platform_device *pdev)
52049         if (!ret)
52050                 return 0;
52052 +       ti_qspi_dma_cleanup(qspi);
52054         pm_runtime_disable(&pdev->dev);
52055  free_master:
52056         spi_master_put(master);
52057 @@ -904,12 +917,7 @@ static int ti_qspi_remove(struct platform_device *pdev)
52058         pm_runtime_put_sync(&pdev->dev);
52059         pm_runtime_disable(&pdev->dev);
52061 -       if (qspi->rx_bb_addr)
52062 -               dma_free_coherent(qspi->dev, QSPI_DMA_BUFFER_SIZE,
52063 -                                 qspi->rx_bb_addr,
52064 -                                 qspi->rx_bb_dma_addr);
52065 -       if (qspi->rx_chan)
52066 -               dma_release_channel(qspi->rx_chan);
52067 +       ti_qspi_dma_cleanup(qspi);
52069         return 0;
52071 diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
52072 index c8fa6ee18ae7..7162387b9f96 100644
52073 --- a/drivers/spi/spi-zynqmp-gqspi.c
52074 +++ b/drivers/spi/spi-zynqmp-gqspi.c
52075 @@ -157,6 +157,7 @@ enum mode_type {GQSPI_MODE_IO, GQSPI_MODE_DMA};
52076   * @data_completion:   completion structure
52077   */
52078  struct zynqmp_qspi {
52079 +       struct spi_controller *ctlr;
52080         void __iomem *regs;
52081         struct clk *refclk;
52082         struct clk *pclk;
52083 @@ -173,6 +174,7 @@ struct zynqmp_qspi {
52084         u32 genfifoentry;
52085         enum mode_type mode;
52086         struct completion data_completion;
52087 +       struct mutex op_lock;
52088  };
52090  /**
52091 @@ -486,24 +488,10 @@ static int zynqmp_qspi_setup_op(struct spi_device *qspi)
52093         struct spi_controller *ctlr = qspi->master;
52094         struct zynqmp_qspi *xqspi = spi_controller_get_devdata(ctlr);
52095 -       struct device *dev = &ctlr->dev;
52096 -       int ret;
52098         if (ctlr->busy)
52099                 return -EBUSY;
52101 -       ret = clk_enable(xqspi->refclk);
52102 -       if (ret) {
52103 -               dev_err(dev, "Cannot enable device clock.\n");
52104 -               return ret;
52105 -       }
52107 -       ret = clk_enable(xqspi->pclk);
52108 -       if (ret) {
52109 -               dev_err(dev, "Cannot enable APB clock.\n");
52110 -               clk_disable(xqspi->refclk);
52111 -               return ret;
52112 -       }
52113         zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
52115         return 0;
52116 @@ -520,7 +508,7 @@ static void zynqmp_qspi_filltxfifo(struct zynqmp_qspi *xqspi, int size)
52118         u32 count = 0, intermediate;
52120 -       while ((xqspi->bytes_to_transfer > 0) && (count < size)) {
52121 +       while ((xqspi->bytes_to_transfer > 0) && (count < size) && (xqspi->txbuf)) {
52122                 memcpy(&intermediate, xqspi->txbuf, 4);
52123                 zynqmp_gqspi_write(xqspi, GQSPI_TXD_OFST, intermediate);
52125 @@ -579,7 +567,7 @@ static void zynqmp_qspi_fillgenfifo(struct zynqmp_qspi *xqspi, u8 nbits,
52126                 genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
52127                 genfifoentry |= GQSPI_GENFIFO_TX;
52128                 transfer_len = xqspi->bytes_to_transfer;
52129 -       } else {
52130 +       } else if (xqspi->rxbuf) {
52131                 genfifoentry &= ~GQSPI_GENFIFO_TX;
52132                 genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
52133                 genfifoentry |= GQSPI_GENFIFO_RX;
52134 @@ -587,6 +575,11 @@ static void zynqmp_qspi_fillgenfifo(struct zynqmp_qspi *xqspi, u8 nbits,
52135                         transfer_len = xqspi->dma_rx_bytes;
52136                 else
52137                         transfer_len = xqspi->bytes_to_receive;
52138 +       } else {
52139 +               /* Sending dummy circles here */
52140 +               genfifoentry &= ~(GQSPI_GENFIFO_TX | GQSPI_GENFIFO_RX);
52141 +               genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
52142 +               transfer_len = xqspi->bytes_to_transfer;
52143         }
52144         genfifoentry |= zynqmp_qspi_selectspimode(xqspi, nbits);
52145         xqspi->genfifoentry = genfifoentry;
52146 @@ -738,7 +731,7 @@ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
52147   * zynqmp_qspi_setuprxdma - This function sets up the RX DMA operation
52148   * @xqspi:     xqspi is a pointer to the GQSPI instance.
52149   */
52150 -static void zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
52151 +static int zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
52153         u32 rx_bytes, rx_rem, config_reg;
52154         dma_addr_t addr;
52155 @@ -752,7 +745,7 @@ static void zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
52156                 zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
52157                 xqspi->mode = GQSPI_MODE_IO;
52158                 xqspi->dma_rx_bytes = 0;
52159 -               return;
52160 +               return 0;
52161         }
52163         rx_rem = xqspi->bytes_to_receive % 4;
52164 @@ -760,8 +753,10 @@ static void zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
52166         addr = dma_map_single(xqspi->dev, (void *)xqspi->rxbuf,
52167                               rx_bytes, DMA_FROM_DEVICE);
52168 -       if (dma_mapping_error(xqspi->dev, addr))
52169 +       if (dma_mapping_error(xqspi->dev, addr)) {
52170                 dev_err(xqspi->dev, "ERR:rxdma:memory not mapped\n");
52171 +               return -ENOMEM;
52172 +       }
52174         xqspi->dma_rx_bytes = rx_bytes;
52175         xqspi->dma_addr = addr;
52176 @@ -782,6 +777,8 @@ static void zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
52178         /* Write the number of bytes to transfer */
52179         zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_SIZE_OFST, rx_bytes);
52181 +       return 0;
52184  /**
52185 @@ -818,11 +815,17 @@ static void zynqmp_qspi_write_op(struct zynqmp_qspi *xqspi, u8 tx_nbits,
52186   * @genfifoentry:      genfifoentry is pointer to the variable in which
52187   *                     GENFIFO mask is returned to calling function
52188   */
52189 -static void zynqmp_qspi_read_op(struct zynqmp_qspi *xqspi, u8 rx_nbits,
52190 +static int zynqmp_qspi_read_op(struct zynqmp_qspi *xqspi, u8 rx_nbits,
52191                                 u32 genfifoentry)
52193 +       int ret;
52195 +       ret = zynqmp_qspi_setuprxdma(xqspi);
52196 +       if (ret)
52197 +               return ret;
52198         zynqmp_qspi_fillgenfifo(xqspi, rx_nbits, genfifoentry);
52199 -       zynqmp_qspi_setuprxdma(xqspi);
52201 +       return 0;
52204  /**
52205 @@ -835,10 +838,13 @@ static void zynqmp_qspi_read_op(struct zynqmp_qspi *xqspi, u8 rx_nbits,
52206   */
52207  static int __maybe_unused zynqmp_qspi_suspend(struct device *dev)
52209 -       struct spi_controller *ctlr = dev_get_drvdata(dev);
52210 -       struct zynqmp_qspi *xqspi = spi_controller_get_devdata(ctlr);
52211 +       struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
52212 +       struct spi_controller *ctlr = xqspi->ctlr;
52213 +       int ret;
52215 -       spi_controller_suspend(ctlr);
52216 +       ret = spi_controller_suspend(ctlr);
52217 +       if (ret)
52218 +               return ret;
52220         zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
52222 @@ -856,27 +862,13 @@ static int __maybe_unused zynqmp_qspi_suspend(struct device *dev)
52223   */
52224  static int __maybe_unused zynqmp_qspi_resume(struct device *dev)
52226 -       struct spi_controller *ctlr = dev_get_drvdata(dev);
52227 -       struct zynqmp_qspi *xqspi = spi_controller_get_devdata(ctlr);
52228 -       int ret = 0;
52230 -       ret = clk_enable(xqspi->pclk);
52231 -       if (ret) {
52232 -               dev_err(dev, "Cannot enable APB clock.\n");
52233 -               return ret;
52234 -       }
52235 +       struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
52236 +       struct spi_controller *ctlr = xqspi->ctlr;
52238 -       ret = clk_enable(xqspi->refclk);
52239 -       if (ret) {
52240 -               dev_err(dev, "Cannot enable device clock.\n");
52241 -               clk_disable(xqspi->pclk);
52242 -               return ret;
52243 -       }
52244 +       zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
52246         spi_controller_resume(ctlr);
52248 -       clk_disable(xqspi->refclk);
52249 -       clk_disable(xqspi->pclk);
52250         return 0;
52253 @@ -890,10 +882,10 @@ static int __maybe_unused zynqmp_qspi_resume(struct device *dev)
52254   */
52255  static int __maybe_unused zynqmp_runtime_suspend(struct device *dev)
52257 -       struct zynqmp_qspi *xqspi = (struct zynqmp_qspi *)dev_get_drvdata(dev);
52258 +       struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
52260 -       clk_disable(xqspi->refclk);
52261 -       clk_disable(xqspi->pclk);
52262 +       clk_disable_unprepare(xqspi->refclk);
52263 +       clk_disable_unprepare(xqspi->pclk);
52265         return 0;
52267 @@ -908,19 +900,19 @@ static int __maybe_unused zynqmp_runtime_suspend(struct device *dev)
52268   */
52269  static int __maybe_unused zynqmp_runtime_resume(struct device *dev)
52271 -       struct zynqmp_qspi *xqspi = (struct zynqmp_qspi *)dev_get_drvdata(dev);
52272 +       struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
52273         int ret;
52275 -       ret = clk_enable(xqspi->pclk);
52276 +       ret = clk_prepare_enable(xqspi->pclk);
52277         if (ret) {
52278                 dev_err(dev, "Cannot enable APB clock.\n");
52279                 return ret;
52280         }
52282 -       ret = clk_enable(xqspi->refclk);
52283 +       ret = clk_prepare_enable(xqspi->refclk);
52284         if (ret) {
52285                 dev_err(dev, "Cannot enable device clock.\n");
52286 -               clk_disable(xqspi->pclk);
52287 +               clk_disable_unprepare(xqspi->pclk);
52288                 return ret;
52289         }
52291 @@ -944,25 +936,23 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
52292         struct zynqmp_qspi *xqspi = spi_controller_get_devdata
52293                                     (mem->spi->master);
52294         int err = 0, i;
52295 -       u8 *tmpbuf;
52296         u32 genfifoentry = 0;
52297 +       u16 opcode = op->cmd.opcode;
52298 +       u64 opaddr;
52300         dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
52301                 op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
52302                 op->dummy.buswidth, op->data.buswidth);
52304 +       mutex_lock(&xqspi->op_lock);
52305         zynqmp_qspi_config_op(xqspi, mem->spi);
52306         zynqmp_qspi_chipselect(mem->spi, false);
52307         genfifoentry |= xqspi->genfifocs;
52308         genfifoentry |= xqspi->genfifobus;
52310         if (op->cmd.opcode) {
52311 -               tmpbuf = kzalloc(op->cmd.nbytes, GFP_KERNEL | GFP_DMA);
52312 -               if (!tmpbuf)
52313 -                       return -ENOMEM;
52314 -               tmpbuf[0] = op->cmd.opcode;
52315                 reinit_completion(&xqspi->data_completion);
52316 -               xqspi->txbuf = tmpbuf;
52317 +               xqspi->txbuf = &opcode;
52318                 xqspi->rxbuf = NULL;
52319                 xqspi->bytes_to_transfer = op->cmd.nbytes;
52320                 xqspi->bytes_to_receive = 0;
52321 @@ -973,16 +963,15 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
52322                 zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
52323                                    GQSPI_IER_GENFIFOEMPTY_MASK |
52324                                    GQSPI_IER_TXNOT_FULL_MASK);
52325 -               if (!wait_for_completion_interruptible_timeout
52326 +               if (!wait_for_completion_timeout
52327                     (&xqspi->data_completion, msecs_to_jiffies(1000))) {
52328                         err = -ETIMEDOUT;
52329 -                       kfree(tmpbuf);
52330                         goto return_err;
52331                 }
52332 -               kfree(tmpbuf);
52333         }
52335         if (op->addr.nbytes) {
52336 +               xqspi->txbuf = &opaddr;
52337                 for (i = 0; i < op->addr.nbytes; i++) {
52338                         *(((u8 *)xqspi->txbuf) + i) = op->addr.val >>
52339                                         (8 * (op->addr.nbytes - i - 1));
52340 @@ -1001,7 +990,7 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
52341                                    GQSPI_IER_TXEMPTY_MASK |
52342                                    GQSPI_IER_GENFIFOEMPTY_MASK |
52343                                    GQSPI_IER_TXNOT_FULL_MASK);
52344 -               if (!wait_for_completion_interruptible_timeout
52345 +               if (!wait_for_completion_timeout
52346                     (&xqspi->data_completion, msecs_to_jiffies(1000))) {
52347                         err = -ETIMEDOUT;
52348                         goto return_err;
52349 @@ -1009,32 +998,23 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
52350         }
52352         if (op->dummy.nbytes) {
52353 -               tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL | GFP_DMA);
52354 -               if (!tmpbuf)
52355 -                       return -ENOMEM;
52356 -               memset(tmpbuf, 0xff, op->dummy.nbytes);
52357 -               reinit_completion(&xqspi->data_completion);
52358 -               xqspi->txbuf = tmpbuf;
52359 +               xqspi->txbuf = NULL;
52360                 xqspi->rxbuf = NULL;
52361 -               xqspi->bytes_to_transfer = op->dummy.nbytes;
52362 +               /*
52363 +                * xqspi->bytes_to_transfer here represents the dummy circles
52364 +                * which need to be sent.
52365 +                */
52366 +               xqspi->bytes_to_transfer = op->dummy.nbytes * 8 / op->dummy.buswidth;
52367                 xqspi->bytes_to_receive = 0;
52368 -               zynqmp_qspi_write_op(xqspi, op->dummy.buswidth,
52369 +               /*
52370 +                * Using op->data.buswidth instead of op->dummy.buswidth here because
52371 +                * we need to use it to configure the correct SPI mode.
52372 +                */
52373 +               zynqmp_qspi_write_op(xqspi, op->data.buswidth,
52374                                      genfifoentry);
52375                 zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
52376                                    zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |
52377                                    GQSPI_CFG_START_GEN_FIFO_MASK);
52378 -               zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
52379 -                                  GQSPI_IER_TXEMPTY_MASK |
52380 -                                  GQSPI_IER_GENFIFOEMPTY_MASK |
52381 -                                  GQSPI_IER_TXNOT_FULL_MASK);
52382 -               if (!wait_for_completion_interruptible_timeout
52383 -                   (&xqspi->data_completion, msecs_to_jiffies(1000))) {
52384 -                       err = -ETIMEDOUT;
52385 -                       kfree(tmpbuf);
52386 -                       goto return_err;
52387 -               }
52389 -               kfree(tmpbuf);
52390         }
52392         if (op->data.nbytes) {
52393 @@ -1059,8 +1039,11 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
52394                         xqspi->rxbuf = (u8 *)op->data.buf.in;
52395                         xqspi->bytes_to_receive = op->data.nbytes;
52396                         xqspi->bytes_to_transfer = 0;
52397 -                       zynqmp_qspi_read_op(xqspi, op->data.buswidth,
52398 +                       err = zynqmp_qspi_read_op(xqspi, op->data.buswidth,
52399                                             genfifoentry);
52400 +                       if (err)
52401 +                               goto return_err;
52403                         zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
52404                                            zynqmp_gqspi_read
52405                                            (xqspi, GQSPI_CONFIG_OFST) |
52406 @@ -1076,7 +1059,7 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
52407                                                    GQSPI_IER_RXEMPTY_MASK);
52408                         }
52409                 }
52410 -               if (!wait_for_completion_interruptible_timeout
52411 +               if (!wait_for_completion_timeout
52412                     (&xqspi->data_completion, msecs_to_jiffies(1000)))
52413                         err = -ETIMEDOUT;
52414         }
52415 @@ -1084,6 +1067,7 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
52416  return_err:
52418         zynqmp_qspi_chipselect(mem->spi, true);
52419 +       mutex_unlock(&xqspi->op_lock);
52421         return err;
52423 @@ -1120,6 +1104,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
52425         xqspi = spi_controller_get_devdata(ctlr);
52426         xqspi->dev = dev;
52427 +       xqspi->ctlr = ctlr;
52428         platform_set_drvdata(pdev, xqspi);
52430         xqspi->regs = devm_platform_ioremap_resource(pdev, 0);
52431 @@ -1135,13 +1120,11 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
52432                 goto remove_master;
52433         }
52435 -       init_completion(&xqspi->data_completion);
52437         xqspi->refclk = devm_clk_get(&pdev->dev, "ref_clk");
52438         if (IS_ERR(xqspi->refclk)) {
52439                 dev_err(dev, "ref_clk clock not found.\n");
52440                 ret = PTR_ERR(xqspi->refclk);
52441 -               goto clk_dis_pclk;
52442 +               goto remove_master;
52443         }
52445         ret = clk_prepare_enable(xqspi->pclk);
52446 @@ -1156,15 +1139,24 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
52447                 goto clk_dis_pclk;
52448         }
52450 +       init_completion(&xqspi->data_completion);
52452 +       mutex_init(&xqspi->op_lock);
52454         pm_runtime_use_autosuspend(&pdev->dev);
52455         pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
52456         pm_runtime_set_active(&pdev->dev);
52457         pm_runtime_enable(&pdev->dev);
52459 +       ret = pm_runtime_get_sync(&pdev->dev);
52460 +       if (ret < 0) {
52461 +               dev_err(&pdev->dev, "Failed to pm_runtime_get_sync: %d\n", ret);
52462 +               goto clk_dis_all;
52463 +       }
52465         /* QSPI controller initializations */
52466         zynqmp_qspi_init_hw(xqspi);
52468 -       pm_runtime_mark_last_busy(&pdev->dev);
52469 -       pm_runtime_put_autosuspend(&pdev->dev);
52470         xqspi->irq = platform_get_irq(pdev, 0);
52471         if (xqspi->irq <= 0) {
52472                 ret = -ENXIO;
52473 @@ -1178,6 +1170,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
52474                 goto clk_dis_all;
52475         }
52477 +       dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
52478         ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
52479         ctlr->num_chipselect = GQSPI_DEFAULT_NUM_CS;
52480         ctlr->mem_ops = &zynqmp_qspi_mem_ops;
52481 @@ -1187,6 +1180,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
52482         ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |
52483                             SPI_TX_DUAL | SPI_TX_QUAD;
52484         ctlr->dev.of_node = np;
52485 +       ctlr->auto_runtime_pm = true;
52487         ret = devm_spi_register_controller(&pdev->dev, ctlr);
52488         if (ret) {
52489 @@ -1194,9 +1188,13 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
52490                 goto clk_dis_all;
52491         }
52493 +       pm_runtime_mark_last_busy(&pdev->dev);
52494 +       pm_runtime_put_autosuspend(&pdev->dev);
52496         return 0;
52498  clk_dis_all:
52499 +       pm_runtime_put_sync(&pdev->dev);
52500         pm_runtime_set_suspended(&pdev->dev);
52501         pm_runtime_disable(&pdev->dev);
52502         clk_disable_unprepare(xqspi->refclk);
52503 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
52504 index b08efe88ccd6..8da4fe475b84 100644
52505 --- a/drivers/spi/spi.c
52506 +++ b/drivers/spi/spi.c
52507 @@ -795,7 +795,7 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n)
52509  /*-------------------------------------------------------------------------*/
52511 -static void spi_set_cs(struct spi_device *spi, bool enable)
52512 +static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
52514         bool enable1 = enable;
52516 @@ -803,7 +803,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
52517          * Avoid calling into the driver (or doing delays) if the chip select
52518          * isn't actually changing from the last time this was called.
52519          */
52520 -       if ((spi->controller->last_cs_enable == enable) &&
52521 +       if (!force && (spi->controller->last_cs_enable == enable) &&
52522             (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
52523                 return;
52525 @@ -1253,7 +1253,7 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
52526         struct spi_statistics *statm = &ctlr->statistics;
52527         struct spi_statistics *stats = &msg->spi->statistics;
52529 -       spi_set_cs(msg->spi, true);
52530 +       spi_set_cs(msg->spi, true, false);
52532         SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
52533         SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
52534 @@ -1321,9 +1321,9 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
52535                                          &msg->transfers)) {
52536                                 keep_cs = true;
52537                         } else {
52538 -                               spi_set_cs(msg->spi, false);
52539 +                               spi_set_cs(msg->spi, false, false);
52540                                 _spi_transfer_cs_change_delay(msg, xfer);
52541 -                               spi_set_cs(msg->spi, true);
52542 +                               spi_set_cs(msg->spi, true, false);
52543                         }
52544                 }
52546 @@ -1332,7 +1332,7 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
52548  out:
52549         if (ret != 0 || !keep_cs)
52550 -               spi_set_cs(msg->spi, false);
52551 +               spi_set_cs(msg->spi, false, false);
52553         if (msg->status == -EINPROGRESS)
52554                 msg->status = ret;
52555 @@ -2496,6 +2496,7 @@ struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
52557         ctlr = __spi_alloc_controller(dev, size, slave);
52558         if (ctlr) {
52559 +               ctlr->devm_allocated = true;
52560                 *ptr = ctlr;
52561                 devres_add(dev, ptr);
52562         } else {
52563 @@ -2842,11 +2843,6 @@ int devm_spi_register_controller(struct device *dev,
52565  EXPORT_SYMBOL_GPL(devm_spi_register_controller);
52567 -static int devm_spi_match_controller(struct device *dev, void *res, void *ctlr)
52569 -       return *(struct spi_controller **)res == ctlr;
52572  static int __unregister(struct device *dev, void *null)
52574         spi_unregister_device(to_spi_device(dev));
52575 @@ -2893,8 +2889,7 @@ void spi_unregister_controller(struct spi_controller *ctlr)
52576         /* Release the last reference on the controller if its driver
52577          * has not yet been converted to devm_spi_alloc_master/slave().
52578          */
52579 -       if (!devres_find(ctlr->dev.parent, devm_spi_release_controller,
52580 -                        devm_spi_match_controller, ctlr))
52581 +       if (!ctlr->devm_allocated)
52582                 put_device(&ctlr->dev);
52584         /* free bus id */
52585 @@ -3423,11 +3418,11 @@ int spi_setup(struct spi_device *spi)
52586                  */
52587                 status = 0;
52589 -               spi_set_cs(spi, false);
52590 +               spi_set_cs(spi, false, true);
52591                 pm_runtime_mark_last_busy(spi->controller->dev.parent);
52592                 pm_runtime_put_autosuspend(spi->controller->dev.parent);
52593         } else {
52594 -               spi_set_cs(spi, false);
52595 +               spi_set_cs(spi, false, true);
52596         }
52598         mutex_unlock(&spi->controller->io_mutex);
52599 diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
52600 index 70498adb1575..5c35653ed36d 100644
52601 --- a/drivers/staging/android/Kconfig
52602 +++ b/drivers/staging/android/Kconfig
52603 @@ -4,7 +4,7 @@ menu "Android"
52604  if ANDROID
52606  config ASHMEM
52607 -       bool "Enable the Anonymous Shared Memory Subsystem"
52608 +       tristate "Enable the Anonymous Shared Memory Subsystem"
52609         depends on SHMEM
52610         help
52611           The ashmem subsystem is a new shared memory allocator, similar to
52612 diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
52613 index e9a55a5e6529..3d794218dd4b 100644
52614 --- a/drivers/staging/android/Makefile
52615 +++ b/drivers/staging/android/Makefile
52616 @@ -1,4 +1,5 @@
52617  # SPDX-License-Identifier: GPL-2.0
52618  ccflags-y += -I$(src)                  # needed for trace events
52620 -obj-$(CONFIG_ASHMEM)                   += ashmem.o
52621 +ashmem_linux-y                         += ashmem.o
52622 +obj-$(CONFIG_ASHMEM)                   += ashmem_linux.o
52623 diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
52624 index d66a64e42273..e28d9a2ce7f1 100644
52625 --- a/drivers/staging/android/ashmem.c
52626 +++ b/drivers/staging/android/ashmem.c
52627 @@ -19,6 +19,7 @@
52628  #include <linux/security.h>
52629  #include <linux/mm.h>
52630  #include <linux/mman.h>
52631 +#include <linux/module.h>
52632  #include <linux/uaccess.h>
52633  #include <linux/personality.h>
52634  #include <linux/bitops.h>
52635 @@ -964,4 +965,18 @@ static int __init ashmem_init(void)
52636  out:
52637         return ret;
52639 -device_initcall(ashmem_init);
52641 +static void __exit ashmem_exit(void)
52643 +       misc_deregister(&ashmem_misc);
52644 +       unregister_shrinker(&ashmem_shrinker);
52645 +       kmem_cache_destroy(ashmem_range_cachep);
52646 +       kmem_cache_destroy(ashmem_area_cachep);
52649 +module_init(ashmem_init);
52650 +module_exit(ashmem_exit);
52652 +MODULE_AUTHOR("Google, Inc.");
52653 +MODULE_DESCRIPTION("Driver for Android shared memory device");
52654 +MODULE_LICENSE("GPL v2");
52655 diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
52656 index 4f80a4991f95..c164c8524909 100644
52657 --- a/drivers/staging/comedi/drivers/ni_mio_common.c
52658 +++ b/drivers/staging/comedi/drivers/ni_mio_common.c
52659 @@ -4747,7 +4747,7 @@ static int cs5529_wait_for_idle(struct comedi_device *dev)
52660                 if ((status & NI67XX_CAL_STATUS_BUSY) == 0)
52661                         break;
52662                 set_current_state(TASK_INTERRUPTIBLE);
52663 -               if (schedule_timeout(1))
52664 +               if (schedule_min_hrtimeout())
52665                         return -EIO;
52666         }
52667         if (i == timeout) {
52668 diff --git a/drivers/staging/comedi/drivers/tests/ni_routes_test.c b/drivers/staging/comedi/drivers/tests/ni_routes_test.c
52669 index 4061b3b5f8e9..68defeb53de4 100644
52670 --- a/drivers/staging/comedi/drivers/tests/ni_routes_test.c
52671 +++ b/drivers/staging/comedi/drivers/tests/ni_routes_test.c
52672 @@ -217,7 +217,8 @@ void test_ni_assign_device_routes(void)
52673         const u8 *table, *oldtable;
52675         init_pci_6070e();
52676 -       ni_assign_device_routes(ni_eseries, pci_6070e, &private.routing_tables);
52677 +       ni_assign_device_routes(ni_eseries, pci_6070e, NULL,
52678 +                               &private.routing_tables);
52679         devroutes = private.routing_tables.valid_routes;
52680         table = private.routing_tables.route_values;
52682 @@ -253,7 +254,8 @@ void test_ni_assign_device_routes(void)
52683         olddevroutes = devroutes;
52684         oldtable = table;
52685         init_pci_6220();
52686 -       ni_assign_device_routes(ni_mseries, pci_6220, &private.routing_tables);
52687 +       ni_assign_device_routes(ni_mseries, pci_6220, NULL,
52688 +                               &private.routing_tables);
52689         devroutes = private.routing_tables.valid_routes;
52690         table = private.routing_tables.route_values;
52692 diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
52693 index c368082aae1a..0f4655d7d520 100644
52694 --- a/drivers/staging/fwserial/fwserial.c
52695 +++ b/drivers/staging/fwserial/fwserial.c
52696 @@ -1218,13 +1218,12 @@ static int get_serial_info(struct tty_struct *tty,
52697         struct fwtty_port *port = tty->driver_data;
52699         mutex_lock(&port->port.mutex);
52700 -       ss->type =  PORT_UNKNOWN;
52701 -       ss->line =  port->port.tty->index;
52702 -       ss->flags = port->port.flags;
52703 -       ss->xmit_fifo_size = FWTTY_PORT_TXFIFO_LEN;
52704 +       ss->line = port->index;
52705         ss->baud_base = 400000000;
52706 -       ss->close_delay = port->port.close_delay;
52707 +       ss->close_delay = jiffies_to_msecs(port->port.close_delay) / 10;
52708 +       ss->closing_wait = 3000;
52709         mutex_unlock(&port->port.mutex);
52711         return 0;
52714 @@ -1232,20 +1231,20 @@ static int set_serial_info(struct tty_struct *tty,
52715                            struct serial_struct *ss)
52717         struct fwtty_port *port = tty->driver_data;
52718 +       unsigned int cdelay;
52720 -       if (ss->irq != 0 || ss->port != 0 || ss->custom_divisor != 0 ||
52721 -           ss->baud_base != 400000000)
52722 -               return -EPERM;
52723 +       cdelay = msecs_to_jiffies(ss->close_delay * 10);
52725         mutex_lock(&port->port.mutex);
52726         if (!capable(CAP_SYS_ADMIN)) {
52727 -               if (((ss->flags & ~ASYNC_USR_MASK) !=
52728 +               if (cdelay != port->port.close_delay ||
52729 +                   ((ss->flags & ~ASYNC_USR_MASK) !=
52730                      (port->port.flags & ~ASYNC_USR_MASK))) {
52731                         mutex_unlock(&port->port.mutex);
52732                         return -EPERM;
52733                 }
52734         }
52735 -       port->port.close_delay = ss->close_delay * HZ / 100;
52736 +       port->port.close_delay = cdelay;
52737         mutex_unlock(&port->port.mutex);
52739         return 0;
52740 diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
52741 index 607378bfebb7..a520f7f213db 100644
52742 --- a/drivers/staging/greybus/uart.c
52743 +++ b/drivers/staging/greybus/uart.c
52744 @@ -614,10 +614,12 @@ static int get_serial_info(struct tty_struct *tty,
52745         ss->line = gb_tty->minor;
52746         ss->xmit_fifo_size = 16;
52747         ss->baud_base = 9600;
52748 -       ss->close_delay = gb_tty->port.close_delay / 10;
52749 +       ss->close_delay = jiffies_to_msecs(gb_tty->port.close_delay) / 10;
52750         ss->closing_wait =
52751                 gb_tty->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
52752 -               ASYNC_CLOSING_WAIT_NONE : gb_tty->port.closing_wait / 10;
52753 +               ASYNC_CLOSING_WAIT_NONE :
52754 +               jiffies_to_msecs(gb_tty->port.closing_wait) / 10;
52756         return 0;
52759 @@ -629,17 +631,16 @@ static int set_serial_info(struct tty_struct *tty,
52760         unsigned int close_delay;
52761         int retval = 0;
52763 -       close_delay = ss->close_delay * 10;
52764 +       close_delay = msecs_to_jiffies(ss->close_delay * 10);
52765         closing_wait = ss->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
52766 -                       ASYNC_CLOSING_WAIT_NONE : ss->closing_wait * 10;
52767 +                       ASYNC_CLOSING_WAIT_NONE :
52768 +                       msecs_to_jiffies(ss->closing_wait * 10);
52770         mutex_lock(&gb_tty->port.mutex);
52771         if (!capable(CAP_SYS_ADMIN)) {
52772                 if ((close_delay != gb_tty->port.close_delay) ||
52773                     (closing_wait != gb_tty->port.closing_wait))
52774                         retval = -EPERM;
52775 -               else
52776 -                       retval = -EOPNOTSUPP;
52777         } else {
52778                 gb_tty->port.close_delay = close_delay;
52779                 gb_tty->port.closing_wait = closing_wait;
52780 diff --git a/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
52781 index 7ca7378b1859..0ab67b2aec67 100644
52782 --- a/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
52783 +++ b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
52784 @@ -843,8 +843,10 @@ static int lm3554_probe(struct i2c_client *client)
52785                 return -ENOMEM;
52787         flash->pdata = lm3554_platform_data_func(client);
52788 -       if (IS_ERR(flash->pdata))
52789 -               return PTR_ERR(flash->pdata);
52790 +       if (IS_ERR(flash->pdata)) {
52791 +               err = PTR_ERR(flash->pdata);
52792 +               goto fail1;
52793 +       }
52795         v4l2_i2c_subdev_init(&flash->sd, client, &lm3554_ops);
52796         flash->sd.internal_ops = &lm3554_internal_ops;
52797 @@ -856,7 +858,7 @@ static int lm3554_probe(struct i2c_client *client)
52798                                    ARRAY_SIZE(lm3554_controls));
52799         if (ret) {
52800                 dev_err(&client->dev, "error initialize a ctrl_handler.\n");
52801 -               goto fail2;
52802 +               goto fail3;
52803         }
52805         for (i = 0; i < ARRAY_SIZE(lm3554_controls); i++)
52806 @@ -865,14 +867,14 @@ static int lm3554_probe(struct i2c_client *client)
52808         if (flash->ctrl_handler.error) {
52809                 dev_err(&client->dev, "ctrl_handler error.\n");
52810 -               goto fail2;
52811 +               goto fail3;
52812         }
52814         flash->sd.ctrl_handler = &flash->ctrl_handler;
52815         err = media_entity_pads_init(&flash->sd.entity, 0, NULL);
52816         if (err) {
52817                 dev_err(&client->dev, "error initialize a media entity.\n");
52818 -               goto fail1;
52819 +               goto fail2;
52820         }
52822         flash->sd.entity.function = MEDIA_ENT_F_FLASH;
52823 @@ -884,14 +886,15 @@ static int lm3554_probe(struct i2c_client *client)
52824         err = lm3554_gpio_init(client);
52825         if (err) {
52826                 dev_err(&client->dev, "gpio request/direction_output fail");
52827 -               goto fail2;
52828 +               goto fail3;
52829         }
52830         return atomisp_register_i2c_module(&flash->sd, NULL, LED_FLASH);
52831 -fail2:
52832 +fail3:
52833         media_entity_cleanup(&flash->sd.entity);
52834         v4l2_ctrl_handler_free(&flash->ctrl_handler);
52835 -fail1:
52836 +fail2:
52837         v4l2_device_unregister_subdev(&flash->sd);
52838 +fail1:
52839         kfree(flash);
52841         return err;
52842 diff --git a/drivers/staging/media/atomisp/pci/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp_fops.c
52843 index 453bb6913550..f1e6b2597853 100644
52844 --- a/drivers/staging/media/atomisp/pci/atomisp_fops.c
52845 +++ b/drivers/staging/media/atomisp/pci/atomisp_fops.c
52846 @@ -221,6 +221,9 @@ int atomisp_q_video_buffers_to_css(struct atomisp_sub_device *asd,
52847         unsigned long irqflags;
52848         int err = 0;
52850 +       if (WARN_ON(css_pipe_id >= IA_CSS_PIPE_ID_NUM))
52851 +               return -EINVAL;
52853         while (pipe->buffers_in_css < ATOMISP_CSS_Q_DEPTH) {
52854                 struct videobuf_buffer *vb;
52856 diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
52857 index 2ae50decfc8b..9da82855552d 100644
52858 --- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
52859 +++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
52860 @@ -948,10 +948,8 @@ int atomisp_alloc_css_stat_bufs(struct atomisp_sub_device *asd,
52861                 dev_dbg(isp->dev, "allocating %d dis buffers\n", count);
52862                 while (count--) {
52863                         dis_buf = kzalloc(sizeof(struct atomisp_dis_buf), GFP_KERNEL);
52864 -                       if (!dis_buf) {
52865 -                               kfree(s3a_buf);
52866 +                       if (!dis_buf)
52867                                 goto error;
52868 -                       }
52869                         if (atomisp_css_allocate_stat_buffers(
52870                                 asd, stream_id, NULL, dis_buf, NULL)) {
52871                                 kfree(dis_buf);
52872 diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
52873 index f13af2329f48..0168f9839c90 100644
52874 --- a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
52875 +++ b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
52876 @@ -857,16 +857,17 @@ static void free_private_pages(struct hmm_buffer_object *bo,
52877         kfree(bo->page_obj);
52880 -static void free_user_pages(struct hmm_buffer_object *bo)
52881 +static void free_user_pages(struct hmm_buffer_object *bo,
52882 +                           unsigned int page_nr)
52884         int i;
52886         hmm_mem_stat.usr_size -= bo->pgnr;
52888         if (bo->mem_type == HMM_BO_MEM_TYPE_PFN) {
52889 -               unpin_user_pages(bo->pages, bo->pgnr);
52890 +               unpin_user_pages(bo->pages, page_nr);
52891         } else {
52892 -               for (i = 0; i < bo->pgnr; i++)
52893 +               for (i = 0; i < page_nr; i++)
52894                         put_page(bo->pages[i]);
52895         }
52896         kfree(bo->pages);
52897 @@ -942,6 +943,8 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
52898                 dev_err(atomisp_dev,
52899                         "get_user_pages err: bo->pgnr = %d, pgnr actually pinned = %d.\n",
52900                         bo->pgnr, page_nr);
52901 +               if (page_nr < 0)
52902 +                       page_nr = 0;
52903                 goto out_of_mem;
52904         }
52906 @@ -954,7 +957,7 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
52908  out_of_mem:
52910 -       free_user_pages(bo);
52911 +       free_user_pages(bo, page_nr);
52913         return -ENOMEM;
52915 @@ -1037,7 +1040,7 @@ void hmm_bo_free_pages(struct hmm_buffer_object *bo)
52916         if (bo->type == HMM_BO_PRIVATE)
52917                 free_private_pages(bo, &dynamic_pool, &reserved_pool);
52918         else if (bo->type == HMM_BO_USER)
52919 -               free_user_pages(bo);
52920 +               free_user_pages(bo, bo->pgnr);
52921         else
52922                 dev_err(atomisp_dev, "invalid buffer type.\n");
52923         mutex_unlock(&bo->mutex);
52924 diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
52925 index e10ce103a5b4..94a0467d673b 100644
52926 --- a/drivers/staging/media/imx/imx-media-capture.c
52927 +++ b/drivers/staging/media/imx/imx-media-capture.c
52928 @@ -557,7 +557,7 @@ static int capture_validate_fmt(struct capture_priv *priv)
52929                 priv->vdev.fmt.fmt.pix.height != f.fmt.pix.height ||
52930                 priv->vdev.cc->cs != cc->cs ||
52931                 priv->vdev.compose.width != compose.width ||
52932 -               priv->vdev.compose.height != compose.height) ? -EINVAL : 0;
52933 +               priv->vdev.compose.height != compose.height) ? -EPIPE : 0;
52936  static int capture_start_streaming(struct vb2_queue *vq, unsigned int count)
52937 diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
52938 index 60aa02eb7d2a..6d9c49b39531 100644
52939 --- a/drivers/staging/media/ipu3/ipu3-v4l2.c
52940 +++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
52941 @@ -686,6 +686,7 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
52943         dev_dbg(dev, "IPU3 pipe %u pipe_id = %u", pipe, css_pipe->pipe_id);
52945 +       css_q = imgu_node_to_queue(node);
52946         for (i = 0; i < IPU3_CSS_QUEUES; i++) {
52947                 unsigned int inode = imgu_map_node(imgu, i);
52949 @@ -693,6 +694,18 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
52950                 if (inode == IMGU_NODE_STAT_3A || inode == IMGU_NODE_PARAMS)
52951                         continue;
52953 +               /* CSS expects some format on OUT queue */
52954 +               if (i != IPU3_CSS_QUEUE_OUT &&
52955 +                   !imgu_pipe->nodes[inode].enabled) {
52956 +                       fmts[i] = NULL;
52957 +                       continue;
52958 +               }
52960 +               if (i == css_q) {
52961 +                       fmts[i] = &f->fmt.pix_mp;
52962 +                       continue;
52963 +               }
52965                 if (try) {
52966                         fmts[i] = kmemdup(&imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp,
52967                                           sizeof(struct v4l2_pix_format_mplane),
52968 @@ -705,10 +718,6 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
52969                         fmts[i] = &imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp;
52970                 }
52972 -               /* CSS expects some format on OUT queue */
52973 -               if (i != IPU3_CSS_QUEUE_OUT &&
52974 -                   !imgu_pipe->nodes[inode].enabled)
52975 -                       fmts[i] = NULL;
52976         }
52978         if (!try) {
52979 @@ -725,16 +734,10 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
52980                 rects[IPU3_CSS_RECT_GDC]->height = pad_fmt.height;
52981         }
52983 -       /*
52984 -        * imgu doesn't set the node to the value given by user
52985 -        * before we return success from this function, so set it here.
52986 -        */
52987 -       css_q = imgu_node_to_queue(node);
52988         if (!fmts[css_q]) {
52989                 ret = -EINVAL;
52990                 goto out;
52991         }
52992 -       *fmts[css_q] = f->fmt.pix_mp;
52994         if (try)
52995                 ret = imgu_css_fmt_try(&imgu->css, fmts, rects, pipe);
52996 @@ -745,15 +748,18 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
52997         if (ret < 0)
52998                 goto out;
53000 -       if (try)
53001 -               f->fmt.pix_mp = *fmts[css_q];
53002 -       else
53003 -               f->fmt = imgu_pipe->nodes[node].vdev_fmt.fmt;
53004 +       /*
53005 +        * imgu doesn't set the node to the value given by user
53006 +        * before we return success from this function, so set it here.
53007 +        */
53008 +       if (!try)
53009 +               imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp = f->fmt.pix_mp;
53011  out:
53012         if (try) {
53013                 for (i = 0; i < IPU3_CSS_QUEUES; i++)
53014 -                       kfree(fmts[i]);
53015 +                       if (i != css_q)
53016 +                               kfree(fmts[i]);
53017         }
53019         return ret;
53020 diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
53021 index dae9073e7d3c..085397045b36 100644
53022 --- a/drivers/staging/media/omap4iss/iss.c
53023 +++ b/drivers/staging/media/omap4iss/iss.c
53024 @@ -1236,8 +1236,10 @@ static int iss_probe(struct platform_device *pdev)
53025         if (ret < 0)
53026                 goto error;
53028 -       if (!omap4iss_get(iss))
53029 +       if (!omap4iss_get(iss)) {
53030 +               ret = -EINVAL;
53031                 goto error;
53032 +       }
53034         ret = iss_reset(iss);
53035         if (ret < 0)
53036 diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
53037 index d3eb81ee8dc2..d821661d30f3 100644
53038 --- a/drivers/staging/media/rkvdec/rkvdec.c
53039 +++ b/drivers/staging/media/rkvdec/rkvdec.c
53040 @@ -55,16 +55,13 @@ static const struct v4l2_ctrl_ops rkvdec_ctrl_ops = {
53042  static const struct rkvdec_ctrl_desc rkvdec_h264_ctrl_descs[] = {
53043         {
53044 -               .mandatory = true,
53045                 .cfg.id = V4L2_CID_STATELESS_H264_DECODE_PARAMS,
53046         },
53047         {
53048 -               .mandatory = true,
53049                 .cfg.id = V4L2_CID_STATELESS_H264_SPS,
53050                 .cfg.ops = &rkvdec_ctrl_ops,
53051         },
53052         {
53053 -               .mandatory = true,
53054                 .cfg.id = V4L2_CID_STATELESS_H264_PPS,
53055         },
53056         {
53057 @@ -585,25 +582,7 @@ static const struct vb2_ops rkvdec_queue_ops = {
53059  static int rkvdec_request_validate(struct media_request *req)
53061 -       struct media_request_object *obj;
53062 -       const struct rkvdec_ctrls *ctrls;
53063 -       struct v4l2_ctrl_handler *hdl;
53064 -       struct rkvdec_ctx *ctx = NULL;
53065 -       unsigned int count, i;
53066 -       int ret;
53068 -       list_for_each_entry(obj, &req->objects, list) {
53069 -               if (vb2_request_object_is_buffer(obj)) {
53070 -                       struct vb2_buffer *vb;
53072 -                       vb = container_of(obj, struct vb2_buffer, req_obj);
53073 -                       ctx = vb2_get_drv_priv(vb->vb2_queue);
53074 -                       break;
53075 -               }
53076 -       }
53078 -       if (!ctx)
53079 -               return -EINVAL;
53080 +       unsigned int count;
53082         count = vb2_request_buffer_cnt(req);
53083         if (!count)
53084 @@ -611,31 +590,6 @@ static int rkvdec_request_validate(struct media_request *req)
53085         else if (count > 1)
53086                 return -EINVAL;
53088 -       hdl = v4l2_ctrl_request_hdl_find(req, &ctx->ctrl_hdl);
53089 -       if (!hdl)
53090 -               return -ENOENT;
53092 -       ret = 0;
53093 -       ctrls = ctx->coded_fmt_desc->ctrls;
53094 -       for (i = 0; ctrls && i < ctrls->num_ctrls; i++) {
53095 -               u32 id = ctrls->ctrls[i].cfg.id;
53096 -               struct v4l2_ctrl *ctrl;
53098 -               if (!ctrls->ctrls[i].mandatory)
53099 -                       continue;
53101 -               ctrl = v4l2_ctrl_request_hdl_ctrl_find(hdl, id);
53102 -               if (!ctrl) {
53103 -                       ret = -ENOENT;
53104 -                       break;
53105 -               }
53106 -       }
53108 -       v4l2_ctrl_request_hdl_put(hdl);
53110 -       if (ret)
53111 -               return ret;
53113         return vb2_request_validate(req);
53116 @@ -1118,7 +1072,7 @@ static struct platform_driver rkvdec_driver = {
53117         .remove = rkvdec_remove,
53118         .driver = {
53119                    .name = "rkvdec",
53120 -                  .of_match_table = of_match_ptr(of_rkvdec_match),
53121 +                  .of_match_table = of_rkvdec_match,
53122                    .pm = &rkvdec_pm_ops,
53123         },
53124  };
53125 diff --git a/drivers/staging/media/rkvdec/rkvdec.h b/drivers/staging/media/rkvdec/rkvdec.h
53126 index 77a137cca88e..52ac3874c5e5 100644
53127 --- a/drivers/staging/media/rkvdec/rkvdec.h
53128 +++ b/drivers/staging/media/rkvdec/rkvdec.h
53129 @@ -25,7 +25,6 @@
53130  struct rkvdec_ctx;
53132  struct rkvdec_ctrl_desc {
53133 -       u32 mandatory : 1;
53134         struct v4l2_ctrl_config cfg;
53135  };
53137 diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
53138 index 7718c561823f..92ace87c1c7d 100644
53139 --- a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
53140 +++ b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
53141 @@ -443,16 +443,17 @@
53142  #define VE_DEC_H265_STATUS_STCD_BUSY           BIT(21)
53143  #define VE_DEC_H265_STATUS_WB_BUSY             BIT(20)
53144  #define VE_DEC_H265_STATUS_BS_DMA_BUSY         BIT(19)
53145 -#define VE_DEC_H265_STATUS_IQIT_BUSY           BIT(18)
53146 +#define VE_DEC_H265_STATUS_IT_BUSY             BIT(18)
53147  #define VE_DEC_H265_STATUS_INTER_BUSY          BIT(17)
53148  #define VE_DEC_H265_STATUS_MORE_DATA           BIT(16)
53149 -#define VE_DEC_H265_STATUS_VLD_BUSY            BIT(14)
53150 -#define VE_DEC_H265_STATUS_DEBLOCKING_BUSY     BIT(13)
53151 -#define VE_DEC_H265_STATUS_DEBLOCKING_DRAM_BUSY        BIT(12)
53152 -#define VE_DEC_H265_STATUS_INTRA_BUSY          BIT(11)
53153 -#define VE_DEC_H265_STATUS_SAO_BUSY            BIT(10)
53154 -#define VE_DEC_H265_STATUS_MVP_BUSY            BIT(9)
53155 -#define VE_DEC_H265_STATUS_SWDEC_BUSY          BIT(8)
53156 +#define VE_DEC_H265_STATUS_DBLK_BUSY           BIT(15)
53157 +#define VE_DEC_H265_STATUS_IREC_BUSY           BIT(14)
53158 +#define VE_DEC_H265_STATUS_INTRA_BUSY          BIT(13)
53159 +#define VE_DEC_H265_STATUS_MCRI_BUSY           BIT(12)
53160 +#define VE_DEC_H265_STATUS_IQIT_BUSY           BIT(11)
53161 +#define VE_DEC_H265_STATUS_MVP_BUSY            BIT(10)
53162 +#define VE_DEC_H265_STATUS_IS_BUSY             BIT(9)
53163 +#define VE_DEC_H265_STATUS_VLD_BUSY            BIT(8)
53164  #define VE_DEC_H265_STATUS_OVER_TIME           BIT(3)
53165  #define VE_DEC_H265_STATUS_VLD_DATA_REQ                BIT(2)
53166  #define VE_DEC_H265_STATUS_ERROR               BIT(1)
53167 diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
53168 index 5516be3af898..c1d52190e1bd 100644
53169 --- a/drivers/staging/qlge/qlge_main.c
53170 +++ b/drivers/staging/qlge/qlge_main.c
53171 @@ -4550,7 +4550,7 @@ static int qlge_probe(struct pci_dev *pdev,
53172         struct net_device *ndev = NULL;
53173         struct devlink *devlink;
53174         static int cards_found;
53175 -       int err = 0;
53176 +       int err;
53178         devlink = devlink_alloc(&qlge_devlink_ops, sizeof(struct qlge_adapter));
53179         if (!devlink)
53180 @@ -4561,8 +4561,10 @@ static int qlge_probe(struct pci_dev *pdev,
53181         ndev = alloc_etherdev_mq(sizeof(struct qlge_netdev_priv),
53182                                  min(MAX_CPUS,
53183                                      netif_get_num_default_rss_queues()));
53184 -       if (!ndev)
53185 +       if (!ndev) {
53186 +               err = -ENOMEM;
53187                 goto devlink_free;
53188 +       }
53190         ndev_priv = netdev_priv(ndev);
53191         ndev_priv->qdev = qdev;
53192 diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
53193 index 9fc4adc83d77..b5a313649f44 100644
53194 --- a/drivers/staging/rtl8192u/r8192U_core.c
53195 +++ b/drivers/staging/rtl8192u/r8192U_core.c
53196 @@ -3210,7 +3210,7 @@ static void rtl819x_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum,
53197                              u32 *TotalRxDataNum)
53199         u16                     SlotIndex;
53200 -       u8                      i;
53201 +       u16                     i;
53203         *TotalRxBcnNum = 0;
53204         *TotalRxDataNum = 0;
53205 diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
53206 index 898add4d1fc8..0aa9dd467349 100644
53207 --- a/drivers/staging/rts5208/rtsx.c
53208 +++ b/drivers/staging/rts5208/rtsx.c
53209 @@ -477,7 +477,7 @@ static int rtsx_polling_thread(void *__dev)
53211         for (;;) {
53212                 set_current_state(TASK_INTERRUPTIBLE);
53213 -               schedule_timeout(msecs_to_jiffies(POLLING_INTERVAL));
53214 +               schedule_msec_hrtimeout((POLLING_INTERVAL));
53216                 /* lock the device pointers */
53217                 mutex_lock(&dev->dev_mutex);
53218 diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
53219 index 0433536930a9..d8726f28843f 100644
53220 --- a/drivers/staging/unisys/visornic/visornic_main.c
53221 +++ b/drivers/staging/unisys/visornic/visornic_main.c
53222 @@ -549,7 +549,7 @@ static int visornic_disable_with_timeout(struct net_device *netdev,
53223                 }
53224                 set_current_state(TASK_INTERRUPTIBLE);
53225                 spin_unlock_irqrestore(&devdata->priv_lock, flags);
53226 -               wait += schedule_timeout(msecs_to_jiffies(10));
53227 +               wait += schedule_msec_hrtimeout((10));
53228                 spin_lock_irqsave(&devdata->priv_lock, flags);
53229         }
53231 @@ -560,7 +560,7 @@ static int visornic_disable_with_timeout(struct net_device *netdev,
53232                 while (1) {
53233                         set_current_state(TASK_INTERRUPTIBLE);
53234                         spin_unlock_irqrestore(&devdata->priv_lock, flags);
53235 -                       schedule_timeout(msecs_to_jiffies(10));
53236 +                       schedule_msec_hrtimeout((10));
53237                         spin_lock_irqsave(&devdata->priv_lock, flags);
53238                         if (atomic_read(&devdata->usage))
53239                                 break;
53240 @@ -714,7 +714,7 @@ static int visornic_enable_with_timeout(struct net_device *netdev,
53241                 }
53242                 set_current_state(TASK_INTERRUPTIBLE);
53243                 spin_unlock_irqrestore(&devdata->priv_lock, flags);
53244 -               wait += schedule_timeout(msecs_to_jiffies(10));
53245 +               wait += schedule_msec_hrtimeout((10));
53246                 spin_lock_irqsave(&devdata->priv_lock, flags);
53247         }
53249 diff --git a/drivers/staging/wimax/i2400m/op-rfkill.c b/drivers/staging/wimax/i2400m/op-rfkill.c
53250 index fbddf2e18c14..44698a1aae87 100644
53251 --- a/drivers/staging/wimax/i2400m/op-rfkill.c
53252 +++ b/drivers/staging/wimax/i2400m/op-rfkill.c
53253 @@ -86,7 +86,7 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev,
53254         if (cmd == NULL)
53255                 goto error_alloc;
53256         cmd->hdr.type = cpu_to_le16(I2400M_MT_CMD_RF_CONTROL);
53257 -       cmd->hdr.length = sizeof(cmd->sw_rf);
53258 +       cmd->hdr.length = cpu_to_le16(sizeof(cmd->sw_rf));
53259         cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION);
53260         cmd->sw_rf.hdr.type = cpu_to_le16(I2400M_TLV_RF_OPERATION);
53261         cmd->sw_rf.hdr.length = cpu_to_le16(sizeof(cmd->sw_rf.status));
53262 diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
53263 index 9ee797b8cb7e..508b49b0eaf5 100644
53264 --- a/drivers/target/target_core_pscsi.c
53265 +++ b/drivers/target/target_core_pscsi.c
53266 @@ -620,8 +620,9 @@ static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status,
53267                         unsigned char *buf;
53269                         buf = transport_kmap_data_sg(cmd);
53270 -                       if (!buf)
53271 +                       if (!buf) {
53272                                 ; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */
53273 +                       }
53275                         if (cdb[0] == MODE_SENSE_10) {
53276                                 if (!(buf[3] & 0x80))
53277 diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
53278 index bf73cd5f4b04..6809c970be03 100644
53279 --- a/drivers/target/target_core_user.c
53280 +++ b/drivers/target/target_core_user.c
53281 @@ -1377,7 +1377,7 @@ static int tcmu_run_tmr_queue(struct tcmu_dev *udev)
53282         return 1;
53285 -static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
53286 +static bool tcmu_handle_completions(struct tcmu_dev *udev)
53288         struct tcmu_mailbox *mb;
53289         struct tcmu_cmd *cmd;
53290 @@ -1420,7 +1420,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
53291                         pr_err("cmd_id %u not found, ring is broken\n",
53292                                entry->hdr.cmd_id);
53293                         set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
53294 -                       break;
53295 +                       return false;
53296                 }
53298                 tcmu_handle_completion(cmd, entry);
53299 diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
53300 index 319a1e701163..ddb8f9ecf307 100644
53301 --- a/drivers/tee/optee/core.c
53302 +++ b/drivers/tee/optee/core.c
53303 @@ -79,16 +79,6 @@ int optee_from_msg_param(struct tee_param *params, size_t num_params,
53304                                 return rc;
53305                         p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
53306                         p->u.memref.shm = shm;
53308 -                       /* Check that the memref is covered by the shm object */
53309 -                       if (p->u.memref.size) {
53310 -                               size_t o = p->u.memref.shm_offs +
53311 -                                          p->u.memref.size - 1;
53313 -                               rc = tee_shm_get_pa(shm, o, NULL);
53314 -                               if (rc)
53315 -                                       return rc;
53316 -                       }
53317                         break;
53318                 case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
53319                 case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
53320 diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
53321 index 10af3341e5ea..6956581ed7a4 100644
53322 --- a/drivers/thermal/cpufreq_cooling.c
53323 +++ b/drivers/thermal/cpufreq_cooling.c
53324 @@ -125,7 +125,7 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev,
53326         int i;
53328 -       for (i = cpufreq_cdev->max_level; i >= 0; i--) {
53329 +       for (i = cpufreq_cdev->max_level; i > 0; i--) {
53330                 if (power >= cpufreq_cdev->em->table[i].power)
53331                         break;
53332         }
53333 diff --git a/drivers/thermal/gov_fair_share.c b/drivers/thermal/gov_fair_share.c
53334 index aaa07180ab48..645432ce6365 100644
53335 --- a/drivers/thermal/gov_fair_share.c
53336 +++ b/drivers/thermal/gov_fair_share.c
53337 @@ -82,6 +82,8 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
53338         int total_instance = 0;
53339         int cur_trip_level = get_trip_level(tz);
53341 +       mutex_lock(&tz->lock);
53343         list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
53344                 if (instance->trip != trip)
53345                         continue;
53346 @@ -110,6 +112,8 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
53347                 mutex_unlock(&instance->cdev->lock);
53348                 thermal_cdev_update(cdev);
53349         }
53351 +       mutex_unlock(&tz->lock);
53352         return 0;
53355 diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
53356 index d8ce3a687b80..3c4c0516e58a 100644
53357 --- a/drivers/thermal/qcom/tsens.c
53358 +++ b/drivers/thermal/qcom/tsens.c
53359 @@ -755,8 +755,10 @@ int __init init_common(struct tsens_priv *priv)
53360                 for (i = VER_MAJOR; i <= VER_STEP; i++) {
53361                         priv->rf[i] = devm_regmap_field_alloc(dev, priv->srot_map,
53362                                                               priv->fields[i]);
53363 -                       if (IS_ERR(priv->rf[i]))
53364 -                               return PTR_ERR(priv->rf[i]);
53365 +                       if (IS_ERR(priv->rf[i])) {
53366 +                               ret = PTR_ERR(priv->rf[i]);
53367 +                               goto err_put_device;
53368 +                       }
53369                 }
53370                 ret = regmap_field_read(priv->rf[VER_MINOR], &ver_minor);
53371                 if (ret)
53372 diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
53373 index 69ef12f852b7..5b76f9a1280d 100644
53374 --- a/drivers/thermal/thermal_of.c
53375 +++ b/drivers/thermal/thermal_of.c
53376 @@ -704,14 +704,17 @@ static int thermal_of_populate_bind_params(struct device_node *np,
53378         count = of_count_phandle_with_args(np, "cooling-device",
53379                                            "#cooling-cells");
53380 -       if (!count) {
53381 +       if (count <= 0) {
53382                 pr_err("Add a cooling_device property with at least one device\n");
53383 +               ret = -ENOENT;
53384                 goto end;
53385         }
53387         __tcbp = kcalloc(count, sizeof(*__tcbp), GFP_KERNEL);
53388 -       if (!__tcbp)
53389 +       if (!__tcbp) {
53390 +               ret = -ENOMEM;
53391                 goto end;
53392 +       }
53394         for (i = 0; i < count; i++) {
53395                 ret = of_parse_phandle_with_args(np, "cooling-device",
53396 diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
53397 index 18b78ea110ef..ecda5e18d23f 100644
53398 --- a/drivers/tty/amiserial.c
53399 +++ b/drivers/tty/amiserial.c
53400 @@ -970,6 +970,7 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
53401         if (!serial_isroot()) {
53402                 if ((ss->baud_base != state->baud_base) ||
53403                     (ss->close_delay != port->close_delay) ||
53404 +                   (ss->closing_wait != port->closing_wait) ||
53405                     (ss->xmit_fifo_size != state->xmit_fifo_size) ||
53406                     ((ss->flags & ~ASYNC_USR_MASK) !=
53407                      (port->flags & ~ASYNC_USR_MASK))) {
53408 diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
53409 index 9f13f7d49dd7..f9f14104bd2c 100644
53410 --- a/drivers/tty/moxa.c
53411 +++ b/drivers/tty/moxa.c
53412 @@ -2040,7 +2040,7 @@ static int moxa_get_serial_info(struct tty_struct *tty,
53413         ss->line = info->port.tty->index,
53414         ss->flags = info->port.flags,
53415         ss->baud_base = 921600,
53416 -       ss->close_delay = info->port.close_delay;
53417 +       ss->close_delay = jiffies_to_msecs(info->port.close_delay) / 10;
53418         mutex_unlock(&info->port.mutex);
53419         return 0;
53421 @@ -2050,6 +2050,7 @@ static int moxa_set_serial_info(struct tty_struct *tty,
53422                 struct serial_struct *ss)
53424         struct moxa_port *info = tty->driver_data;
53425 +       unsigned int close_delay;
53427         if (tty->index == MAX_PORTS)
53428                 return -EINVAL;
53429 @@ -2061,19 +2062,24 @@ static int moxa_set_serial_info(struct tty_struct *tty,
53430                         ss->baud_base != 921600)
53431                 return -EPERM;
53433 +       close_delay = msecs_to_jiffies(ss->close_delay * 10);
53435         mutex_lock(&info->port.mutex);
53436         if (!capable(CAP_SYS_ADMIN)) {
53437 -               if (((ss->flags & ~ASYNC_USR_MASK) !=
53438 +               if (close_delay != info->port.close_delay ||
53439 +                   ss->type != info->type ||
53440 +                   ((ss->flags & ~ASYNC_USR_MASK) !=
53441                      (info->port.flags & ~ASYNC_USR_MASK))) {
53442                         mutex_unlock(&info->port.mutex);
53443                         return -EPERM;
53444                 }
53445 -       }
53446 -       info->port.close_delay = ss->close_delay * HZ / 100;
53447 +       } else {
53448 +               info->port.close_delay = close_delay;
53450 -       MoxaSetFifo(info, ss->type == PORT_16550A);
53451 +               MoxaSetFifo(info, ss->type == PORT_16550A);
53453 -       info->type = ss->type;
53454 +               info->type = ss->type;
53455 +       }
53456         mutex_unlock(&info->port.mutex);
53457         return 0;
53459 diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
53460 index 4203b64bccdb..2d8e76263a25 100644
53461 --- a/drivers/tty/mxser.c
53462 +++ b/drivers/tty/mxser.c
53463 @@ -1208,19 +1208,26 @@ static int mxser_get_serial_info(struct tty_struct *tty,
53465         struct mxser_port *info = tty->driver_data;
53466         struct tty_port *port = &info->port;
53467 +       unsigned int closing_wait, close_delay;
53469         if (tty->index == MXSER_PORTS)
53470                 return -ENOTTY;
53472         mutex_lock(&port->mutex);
53474 +       close_delay = jiffies_to_msecs(info->port.close_delay) / 10;
53475 +       closing_wait = info->port.closing_wait;
53476 +       if (closing_wait != ASYNC_CLOSING_WAIT_NONE)
53477 +               closing_wait = jiffies_to_msecs(closing_wait) / 10;
53479         ss->type = info->type,
53480         ss->line = tty->index,
53481         ss->port = info->ioaddr,
53482         ss->irq = info->board->irq,
53483         ss->flags = info->port.flags,
53484         ss->baud_base = info->baud_base,
53485 -       ss->close_delay = info->port.close_delay,
53486 -       ss->closing_wait = info->port.closing_wait,
53487 +       ss->close_delay = close_delay;
53488 +       ss->closing_wait = closing_wait;
53489         ss->custom_divisor = info->custom_divisor,
53490         mutex_unlock(&port->mutex);
53491         return 0;
53492 @@ -1233,7 +1240,7 @@ static int mxser_set_serial_info(struct tty_struct *tty,
53493         struct tty_port *port = &info->port;
53494         speed_t baud;
53495         unsigned long sl_flags;
53496 -       unsigned int flags;
53497 +       unsigned int flags, close_delay, closing_wait;
53498         int retval = 0;
53500         if (tty->index == MXSER_PORTS)
53501 @@ -1255,9 +1262,15 @@ static int mxser_set_serial_info(struct tty_struct *tty,
53503         flags = port->flags & ASYNC_SPD_MASK;
53505 +       close_delay = msecs_to_jiffies(ss->close_delay * 10);
53506 +       closing_wait = ss->closing_wait;
53507 +       if (closing_wait != ASYNC_CLOSING_WAIT_NONE)
53508 +               closing_wait = msecs_to_jiffies(closing_wait * 10);
53510         if (!capable(CAP_SYS_ADMIN)) {
53511                 if ((ss->baud_base != info->baud_base) ||
53512 -                               (ss->close_delay != info->port.close_delay) ||
53513 +                               (close_delay != info->port.close_delay) ||
53514 +                               (closing_wait != info->port.closing_wait) ||
53515                                 ((ss->flags & ~ASYNC_USR_MASK) != (info->port.flags & ~ASYNC_USR_MASK))) {
53516                         mutex_unlock(&port->mutex);
53517                         return -EPERM;
53518 @@ -1271,8 +1284,8 @@ static int mxser_set_serial_info(struct tty_struct *tty,
53519                  */
53520                 port->flags = ((port->flags & ~ASYNC_FLAGS) |
53521                                 (ss->flags & ASYNC_FLAGS));
53522 -               port->close_delay = ss->close_delay * HZ / 100;
53523 -               port->closing_wait = ss->closing_wait * HZ / 100;
53524 +               port->close_delay = close_delay;
53525 +               port->closing_wait = closing_wait;
53526                 if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST &&
53527                                 (ss->baud_base != info->baud_base ||
53528                                 ss->custom_divisor !=
53529 @@ -1284,11 +1297,11 @@ static int mxser_set_serial_info(struct tty_struct *tty,
53530                         baud = ss->baud_base / ss->custom_divisor;
53531                         tty_encode_baud_rate(tty, baud, baud);
53532                 }
53533 -       }
53535 -       info->type = ss->type;
53536 +               info->type = ss->type;
53538 -       process_txrx_fifo(info);
53539 +               process_txrx_fifo(info);
53540 +       }
53542         if (tty_port_initialized(port)) {
53543                 if (flags != (port->flags & ASYNC_SPD_MASK)) {
53544 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
53545 index 51dafc06f541..2406653d38b7 100644
53546 --- a/drivers/tty/n_gsm.c
53547 +++ b/drivers/tty/n_gsm.c
53548 @@ -2384,8 +2384,18 @@ static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
53549                 /* Don't register device 0 - this is the control channel and not
53550                    a usable tty interface */
53551                 base = mux_num_to_base(gsm); /* Base for this MUX */
53552 -               for (i = 1; i < NUM_DLCI; i++)
53553 -                       tty_register_device(gsm_tty_driver, base + i, NULL);
53554 +               for (i = 1; i < NUM_DLCI; i++) {
53555 +                       struct device *dev;
53557 +                       dev = tty_register_device(gsm_tty_driver,
53558 +                                                       base + i, NULL);
53559 +                       if (IS_ERR(dev)) {
53560 +                               for (i--; i >= 1; i--)
53561 +                                       tty_unregister_device(gsm_tty_driver,
53562 +                                                               base + i);
53563 +                               return PTR_ERR(dev);
53564 +                       }
53565 +               }
53566         }
53567         return ret;
53569 diff --git a/drivers/tty/serial/liteuart.c b/drivers/tty/serial/liteuart.c
53570 index 64842f3539e1..0b06770642cb 100644
53571 --- a/drivers/tty/serial/liteuart.c
53572 +++ b/drivers/tty/serial/liteuart.c
53573 @@ -270,8 +270,8 @@ static int liteuart_probe(struct platform_device *pdev)
53575         /* get membase */
53576         port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
53577 -       if (!port->membase)
53578 -               return -ENXIO;
53579 +       if (IS_ERR(port->membase))
53580 +               return PTR_ERR(port->membase);
53582         /* values not from device tree */
53583         port->dev = &pdev->dev;
53584 diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
53585 index 76b94d0ff586..84e8158088cd 100644
53586 --- a/drivers/tty/serial/omap-serial.c
53587 +++ b/drivers/tty/serial/omap-serial.c
53588 @@ -159,6 +159,8 @@ struct uart_omap_port {
53589         u32                     calc_latency;
53590         struct work_struct      qos_work;
53591         bool                    is_suspending;
53593 +       unsigned int            rs485_tx_filter_count;
53594  };
53596  #define to_uart_omap_port(p) ((container_of((p), struct uart_omap_port, port)))
53597 @@ -302,7 +304,8 @@ static void serial_omap_stop_tx(struct uart_port *port)
53598                         serial_out(up, UART_OMAP_SCR, up->scr);
53599                         res = (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) ?
53600                                 1 : 0;
53601 -                       if (gpiod_get_value(up->rts_gpiod) != res) {
53602 +                       if (up->rts_gpiod &&
53603 +                           gpiod_get_value(up->rts_gpiod) != res) {
53604                                 if (port->rs485.delay_rts_after_send > 0)
53605                                         mdelay(
53606                                         port->rs485.delay_rts_after_send);
53607 @@ -328,19 +331,6 @@ static void serial_omap_stop_tx(struct uart_port *port)
53608                 serial_out(up, UART_IER, up->ier);
53609         }
53611 -       if ((port->rs485.flags & SER_RS485_ENABLED) &&
53612 -           !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
53613 -               /*
53614 -                * Empty the RX FIFO, we are not interested in anything
53615 -                * received during the half-duplex transmission.
53616 -                */
53617 -               serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_RCVR);
53618 -               /* Re-enable RX interrupts */
53619 -               up->ier |= UART_IER_RLSI | UART_IER_RDI;
53620 -               up->port.read_status_mask |= UART_LSR_DR;
53621 -               serial_out(up, UART_IER, up->ier);
53622 -       }
53624         pm_runtime_mark_last_busy(up->dev);
53625         pm_runtime_put_autosuspend(up->dev);
53627 @@ -366,6 +356,10 @@ static void transmit_chars(struct uart_omap_port *up, unsigned int lsr)
53628                 serial_out(up, UART_TX, up->port.x_char);
53629                 up->port.icount.tx++;
53630                 up->port.x_char = 0;
53631 +               if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
53632 +                   !(up->port.rs485.flags & SER_RS485_RX_DURING_TX))
53633 +                       up->rs485_tx_filter_count++;
53635                 return;
53636         }
53637         if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
53638 @@ -377,6 +371,10 @@ static void transmit_chars(struct uart_omap_port *up, unsigned int lsr)
53639                 serial_out(up, UART_TX, xmit->buf[xmit->tail]);
53640                 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
53641                 up->port.icount.tx++;
53642 +               if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
53643 +                   !(up->port.rs485.flags & SER_RS485_RX_DURING_TX))
53644 +                       up->rs485_tx_filter_count++;
53646                 if (uart_circ_empty(xmit))
53647                         break;
53648         } while (--count > 0);
53649 @@ -411,7 +409,7 @@ static void serial_omap_start_tx(struct uart_port *port)
53651                 /* if rts not already enabled */
53652                 res = (port->rs485.flags & SER_RS485_RTS_ON_SEND) ? 1 : 0;
53653 -               if (gpiod_get_value(up->rts_gpiod) != res) {
53654 +               if (up->rts_gpiod && gpiod_get_value(up->rts_gpiod) != res) {
53655                         gpiod_set_value(up->rts_gpiod, res);
53656                         if (port->rs485.delay_rts_before_send > 0)
53657                                 mdelay(port->rs485.delay_rts_before_send);
53658 @@ -420,7 +418,7 @@ static void serial_omap_start_tx(struct uart_port *port)
53660         if ((port->rs485.flags & SER_RS485_ENABLED) &&
53661             !(port->rs485.flags & SER_RS485_RX_DURING_TX))
53662 -               serial_omap_stop_rx(port);
53663 +               up->rs485_tx_filter_count = 0;
53665         serial_omap_enable_ier_thri(up);
53666         pm_runtime_mark_last_busy(up->dev);
53667 @@ -491,8 +489,13 @@ static void serial_omap_rlsi(struct uart_omap_port *up, unsigned int lsr)
53668          * Read one data character out to avoid stalling the receiver according
53669          * to the table 23-246 of the omap4 TRM.
53670          */
53671 -       if (likely(lsr & UART_LSR_DR))
53672 +       if (likely(lsr & UART_LSR_DR)) {
53673                 serial_in(up, UART_RX);
53674 +               if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
53675 +                   !(up->port.rs485.flags & SER_RS485_RX_DURING_TX) &&
53676 +                   up->rs485_tx_filter_count)
53677 +                       up->rs485_tx_filter_count--;
53678 +       }
53680         up->port.icount.rx++;
53681         flag = TTY_NORMAL;
53682 @@ -543,6 +546,13 @@ static void serial_omap_rdi(struct uart_omap_port *up, unsigned int lsr)
53683                 return;
53685         ch = serial_in(up, UART_RX);
53686 +       if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
53687 +           !(up->port.rs485.flags & SER_RS485_RX_DURING_TX) &&
53688 +           up->rs485_tx_filter_count) {
53689 +               up->rs485_tx_filter_count--;
53690 +               return;
53691 +       }
53693         flag = TTY_NORMAL;
53694         up->port.icount.rx++;
53696 @@ -1407,18 +1417,13 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
53697         /* store new config */
53698         port->rs485 = *rs485;
53700 -       /*
53701 -        * Just as a precaution, only allow rs485
53702 -        * to be enabled if the gpio pin is valid
53703 -        */
53704         if (up->rts_gpiod) {
53705                 /* enable / disable rts */
53706                 val = (port->rs485.flags & SER_RS485_ENABLED) ?
53707                         SER_RS485_RTS_AFTER_SEND : SER_RS485_RTS_ON_SEND;
53708                 val = (port->rs485.flags & val) ? 1 : 0;
53709                 gpiod_set_value(up->rts_gpiod, val);
53710 -       } else
53711 -               port->rs485.flags &= ~SER_RS485_ENABLED;
53712 +       }
53714         /* Enable interrupts */
53715         up->ier = mode;
53716 diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
53717 index f86ec2d2635b..9adb8362578c 100644
53718 --- a/drivers/tty/serial/sc16is7xx.c
53719 +++ b/drivers/tty/serial/sc16is7xx.c
53720 @@ -1196,7 +1196,7 @@ static int sc16is7xx_probe(struct device *dev,
53721         ret = regmap_read(regmap,
53722                           SC16IS7XX_LSR_REG << SC16IS7XX_REG_SHIFT, &val);
53723         if (ret < 0)
53724 -               return ret;
53725 +               return -EPROBE_DEFER;
53727         /* Alloc port structure */
53728         s = devm_kzalloc(dev, struct_size(s, p, devtype->nr_uart), GFP_KERNEL);
53729 diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
53730 index ba31e97d3d96..43f02ed055d5 100644
53731 --- a/drivers/tty/serial/serial_core.c
53732 +++ b/drivers/tty/serial/serial_core.c
53733 @@ -1305,7 +1305,7 @@ static int uart_set_rs485_config(struct uart_port *port,
53734         unsigned long flags;
53736         if (!port->rs485_config)
53737 -               return -ENOIOCTLCMD;
53738 +               return -ENOTTY;
53740         if (copy_from_user(&rs485, rs485_user, sizeof(*rs485_user)))
53741                 return -EFAULT;
53742 @@ -1329,7 +1329,7 @@ static int uart_get_iso7816_config(struct uart_port *port,
53743         struct serial_iso7816 aux;
53745         if (!port->iso7816_config)
53746 -               return -ENOIOCTLCMD;
53747 +               return -ENOTTY;
53749         spin_lock_irqsave(&port->lock, flags);
53750         aux = port->iso7816;
53751 @@ -1349,7 +1349,7 @@ static int uart_set_iso7816_config(struct uart_port *port,
53752         unsigned long flags;
53754         if (!port->iso7816_config)
53755 -               return -ENOIOCTLCMD;
53756 +               return -ENOTTY;
53758         if (copy_from_user(&iso7816, iso7816_user, sizeof(*iso7816_user)))
53759                 return -EFAULT;
53760 diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
53761 index b3675cf25a69..99dfa884cbef 100644
53762 --- a/drivers/tty/serial/stm32-usart.c
53763 +++ b/drivers/tty/serial/stm32-usart.c
53764 @@ -214,12 +214,14 @@ static void stm32_usart_receive_chars(struct uart_port *port, bool threaded)
53765         struct tty_port *tport = &port->state->port;
53766         struct stm32_port *stm32_port = to_stm32_port(port);
53767         const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
53768 -       unsigned long c;
53769 +       unsigned long c, flags;
53770         u32 sr;
53771         char flag;
53773 -       if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
53774 -               pm_wakeup_event(tport->tty->dev, 0);
53775 +       if (threaded)
53776 +               spin_lock_irqsave(&port->lock, flags);
53777 +       else
53778 +               spin_lock(&port->lock);
53780         while (stm32_usart_pending_rx(port, &sr, &stm32_port->last_res,
53781                                       threaded)) {
53782 @@ -276,9 +278,12 @@ static void stm32_usart_receive_chars(struct uart_port *port, bool threaded)
53783                 uart_insert_char(port, sr, USART_SR_ORE, c, flag);
53784         }
53786 -       spin_unlock(&port->lock);
53787 +       if (threaded)
53788 +               spin_unlock_irqrestore(&port->lock, flags);
53789 +       else
53790 +               spin_unlock(&port->lock);
53792         tty_flip_buffer_push(tport);
53793 -       spin_lock(&port->lock);
53796  static void stm32_usart_tx_dma_complete(void *arg)
53797 @@ -286,12 +291,16 @@ static void stm32_usart_tx_dma_complete(void *arg)
53798         struct uart_port *port = arg;
53799         struct stm32_port *stm32port = to_stm32_port(port);
53800         const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
53801 +       unsigned long flags;
53803 +       dmaengine_terminate_async(stm32port->tx_ch);
53804         stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
53805         stm32port->tx_dma_busy = false;
53807         /* Let's see if we have pending data to send */
53808 +       spin_lock_irqsave(&port->lock, flags);
53809         stm32_usart_transmit_chars(port);
53810 +       spin_unlock_irqrestore(&port->lock, flags);
53813  static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
53814 @@ -455,29 +464,34 @@ static void stm32_usart_transmit_chars(struct uart_port *port)
53815  static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
53817         struct uart_port *port = ptr;
53818 +       struct tty_port *tport = &port->state->port;
53819         struct stm32_port *stm32_port = to_stm32_port(port);
53820         const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
53821         u32 sr;
53823 -       spin_lock(&port->lock);
53825         sr = readl_relaxed(port->membase + ofs->isr);
53827         if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
53828                 writel_relaxed(USART_ICR_RTOCF,
53829                                port->membase + ofs->icr);
53831 -       if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG)
53832 +       if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) {
53833 +               /* Clear wake up flag and disable wake up interrupt */
53834                 writel_relaxed(USART_ICR_WUCF,
53835                                port->membase + ofs->icr);
53836 +               stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
53837 +               if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
53838 +                       pm_wakeup_event(tport->tty->dev, 0);
53839 +       }
53841         if ((sr & USART_SR_RXNE) && !(stm32_port->rx_ch))
53842                 stm32_usart_receive_chars(port, false);
53844 -       if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch))
53845 +       if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) {
53846 +               spin_lock(&port->lock);
53847                 stm32_usart_transmit_chars(port);
53849 -       spin_unlock(&port->lock);
53850 +               spin_unlock(&port->lock);
53851 +       }
53853         if (stm32_port->rx_ch)
53854                 return IRQ_WAKE_THREAD;
53855 @@ -490,13 +504,9 @@ static irqreturn_t stm32_usart_threaded_interrupt(int irq, void *ptr)
53856         struct uart_port *port = ptr;
53857         struct stm32_port *stm32_port = to_stm32_port(port);
53859 -       spin_lock(&port->lock);
53861         if (stm32_port->rx_ch)
53862                 stm32_usart_receive_chars(port, true);
53864 -       spin_unlock(&port->lock);
53866         return IRQ_HANDLED;
53869 @@ -505,7 +515,10 @@ static unsigned int stm32_usart_tx_empty(struct uart_port *port)
53870         struct stm32_port *stm32_port = to_stm32_port(port);
53871         const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
53873 -       return readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE;
53874 +       if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC)
53875 +               return TIOCSER_TEMT;
53877 +       return 0;
53880  static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
53881 @@ -634,6 +647,7 @@ static int stm32_usart_startup(struct uart_port *port)
53883         struct stm32_port *stm32_port = to_stm32_port(port);
53884         const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
53885 +       const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
53886         const char *name = to_platform_device(port->dev)->name;
53887         u32 val;
53888         int ret;
53889 @@ -646,21 +660,10 @@ static int stm32_usart_startup(struct uart_port *port)
53891         /* RX FIFO Flush */
53892         if (ofs->rqr != UNDEF_REG)
53893 -               stm32_usart_set_bits(port, ofs->rqr, USART_RQR_RXFRQ);
53895 -       /* Tx and RX FIFO configuration */
53896 -       if (stm32_port->fifoen) {
53897 -               val = readl_relaxed(port->membase + ofs->cr3);
53898 -               val &= ~(USART_CR3_TXFTCFG_MASK | USART_CR3_RXFTCFG_MASK);
53899 -               val |= USART_CR3_TXFTCFG_HALF << USART_CR3_TXFTCFG_SHIFT;
53900 -               val |= USART_CR3_RXFTCFG_HALF << USART_CR3_RXFTCFG_SHIFT;
53901 -               writel_relaxed(val, port->membase + ofs->cr3);
53902 -       }
53903 +               writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr);
53905 -       /* RX FIFO enabling */
53906 -       val = stm32_port->cr1_irq | USART_CR1_RE;
53907 -       if (stm32_port->fifoen)
53908 -               val |= USART_CR1_FIFOEN;
53909 +       /* RX enabling */
53910 +       val = stm32_port->cr1_irq | USART_CR1_RE | BIT(cfg->uart_enable_bit);
53911         stm32_usart_set_bits(port, ofs->cr1, val);
53913         return 0;
53914 @@ -691,6 +694,11 @@ static void stm32_usart_shutdown(struct uart_port *port)
53915         if (ret)
53916                 dev_err(port->dev, "Transmission is not complete\n");
53918 +       /* flush RX & TX FIFO */
53919 +       if (ofs->rqr != UNDEF_REG)
53920 +               writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
53921 +                              port->membase + ofs->rqr);
53923         stm32_usart_clr_bits(port, ofs->cr1, val);
53925         free_irq(port->irq, port);
53926 @@ -737,8 +745,9 @@ static void stm32_usart_set_termios(struct uart_port *port,
53927         unsigned int baud, bits;
53928         u32 usartdiv, mantissa, fraction, oversampling;
53929         tcflag_t cflag = termios->c_cflag;
53930 -       u32 cr1, cr2, cr3;
53931 +       u32 cr1, cr2, cr3, isr;
53932         unsigned long flags;
53933 +       int ret;
53935         if (!stm32_port->hw_flow_control)
53936                 cflag &= ~CRTSCTS;
53937 @@ -747,21 +756,36 @@ static void stm32_usart_set_termios(struct uart_port *port,
53939         spin_lock_irqsave(&port->lock, flags);
53941 +       ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
53942 +                                               isr,
53943 +                                               (isr & USART_SR_TC),
53944 +                                               10, 100000);
53946 +       /* Send the TC error message only when ISR_TC is not set. */
53947 +       if (ret)
53948 +               dev_err(port->dev, "Transmission is not complete\n");
53950         /* Stop serial port and reset value */
53951         writel_relaxed(0, port->membase + ofs->cr1);
53953         /* flush RX & TX FIFO */
53954         if (ofs->rqr != UNDEF_REG)
53955 -               stm32_usart_set_bits(port, ofs->rqr,
53956 -                                    USART_RQR_TXFRQ | USART_RQR_RXFRQ);
53957 +               writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
53958 +                              port->membase + ofs->rqr);
53960         cr1 = USART_CR1_TE | USART_CR1_RE;
53961         if (stm32_port->fifoen)
53962                 cr1 |= USART_CR1_FIFOEN;
53963         cr2 = 0;
53965 +       /* Tx and RX FIFO configuration */
53966         cr3 = readl_relaxed(port->membase + ofs->cr3);
53967 -       cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTCFG_MASK | USART_CR3_RXFTIE
53968 -               | USART_CR3_TXFTCFG_MASK;
53969 +       cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTIE;
53970 +       if (stm32_port->fifoen) {
53971 +               cr3 &= ~(USART_CR3_TXFTCFG_MASK | USART_CR3_RXFTCFG_MASK);
53972 +               cr3 |= USART_CR3_TXFTCFG_HALF << USART_CR3_TXFTCFG_SHIFT;
53973 +               cr3 |= USART_CR3_RXFTCFG_HALF << USART_CR3_RXFTCFG_SHIFT;
53974 +       }
53976         if (cflag & CSTOPB)
53977                 cr2 |= USART_CR2_STOP_2B;
53978 @@ -817,12 +841,6 @@ static void stm32_usart_set_termios(struct uart_port *port,
53979                 cr3 |= USART_CR3_CTSE | USART_CR3_RTSE;
53980         }
53982 -       /* Handle modem control interrupts */
53983 -       if (UART_ENABLE_MS(port, termios->c_cflag))
53984 -               stm32_usart_enable_ms(port);
53985 -       else
53986 -               stm32_usart_disable_ms(port);
53988         usartdiv = DIV_ROUND_CLOSEST(port->uartclk, baud);
53990         /*
53991 @@ -892,12 +910,24 @@ static void stm32_usart_set_termios(struct uart_port *port,
53992                 cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
53993         }
53995 +       /* Configure wake up from low power on start bit detection */
53996 +       if (stm32_port->wakeirq > 0) {
53997 +               cr3 &= ~USART_CR3_WUS_MASK;
53998 +               cr3 |= USART_CR3_WUS_START_BIT;
53999 +       }
54001         writel_relaxed(cr3, port->membase + ofs->cr3);
54002         writel_relaxed(cr2, port->membase + ofs->cr2);
54003         writel_relaxed(cr1, port->membase + ofs->cr1);
54005         stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
54006         spin_unlock_irqrestore(&port->lock, flags);
54008 +       /* Handle modem control interrupts */
54009 +       if (UART_ENABLE_MS(port, termios->c_cflag))
54010 +               stm32_usart_enable_ms(port);
54011 +       else
54012 +               stm32_usart_disable_ms(port);
54015  static const char *stm32_usart_type(struct uart_port *port)
54016 @@ -1252,10 +1282,6 @@ static int stm32_usart_serial_probe(struct platform_device *pdev)
54017                 device_set_wakeup_enable(&pdev->dev, false);
54018         }
54020 -       ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port);
54021 -       if (ret)
54022 -               goto err_wirq;
54024         ret = stm32_usart_of_dma_rx_probe(stm32port, pdev);
54025         if (ret)
54026                 dev_info(&pdev->dev, "interrupt mode used for rx (no dma)\n");
54027 @@ -1269,11 +1295,40 @@ static int stm32_usart_serial_probe(struct platform_device *pdev)
54028         pm_runtime_get_noresume(&pdev->dev);
54029         pm_runtime_set_active(&pdev->dev);
54030         pm_runtime_enable(&pdev->dev);
54032 +       ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port);
54033 +       if (ret)
54034 +               goto err_port;
54036         pm_runtime_put_sync(&pdev->dev);
54038         return 0;
54040 -err_wirq:
54041 +err_port:
54042 +       pm_runtime_disable(&pdev->dev);
54043 +       pm_runtime_set_suspended(&pdev->dev);
54044 +       pm_runtime_put_noidle(&pdev->dev);
54046 +       if (stm32port->rx_ch) {
54047 +               dmaengine_terminate_async(stm32port->rx_ch);
54048 +               dma_release_channel(stm32port->rx_ch);
54049 +       }
54051 +       if (stm32port->rx_dma_buf)
54052 +               dma_free_coherent(&pdev->dev,
54053 +                                 RX_BUF_L, stm32port->rx_buf,
54054 +                                 stm32port->rx_dma_buf);
54056 +       if (stm32port->tx_ch) {
54057 +               dmaengine_terminate_async(stm32port->tx_ch);
54058 +               dma_release_channel(stm32port->tx_ch);
54059 +       }
54061 +       if (stm32port->tx_dma_buf)
54062 +               dma_free_coherent(&pdev->dev,
54063 +                                 TX_BUF_L, stm32port->tx_buf,
54064 +                                 stm32port->tx_dma_buf);
54066         if (stm32port->wakeirq > 0)
54067                 dev_pm_clear_wake_irq(&pdev->dev);
54069 @@ -1295,11 +1350,20 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
54070         int err;
54072         pm_runtime_get_sync(&pdev->dev);
54073 +       err = uart_remove_one_port(&stm32_usart_driver, port);
54074 +       if (err)
54075 +               return(err);
54077 +       pm_runtime_disable(&pdev->dev);
54078 +       pm_runtime_set_suspended(&pdev->dev);
54079 +       pm_runtime_put_noidle(&pdev->dev);
54081         stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
54083 -       if (stm32_port->rx_ch)
54084 +       if (stm32_port->rx_ch) {
54085 +               dmaengine_terminate_async(stm32_port->rx_ch);
54086                 dma_release_channel(stm32_port->rx_ch);
54087 +       }
54089         if (stm32_port->rx_dma_buf)
54090                 dma_free_coherent(&pdev->dev,
54091 @@ -1308,8 +1372,10 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
54093         stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
54095 -       if (stm32_port->tx_ch)
54096 +       if (stm32_port->tx_ch) {
54097 +               dmaengine_terminate_async(stm32_port->tx_ch);
54098                 dma_release_channel(stm32_port->tx_ch);
54099 +       }
54101         if (stm32_port->tx_dma_buf)
54102                 dma_free_coherent(&pdev->dev,
54103 @@ -1323,12 +1389,7 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
54105         stm32_usart_deinit_port(stm32_port);
54107 -       err = uart_remove_one_port(&stm32_usart_driver, port);
54109 -       pm_runtime_disable(&pdev->dev);
54110 -       pm_runtime_put_noidle(&pdev->dev);
54112 -       return err;
54113 +       return 0;
54116  #ifdef CONFIG_SERIAL_STM32_CONSOLE
54117 @@ -1436,23 +1497,20 @@ static void __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port,
54119         struct stm32_port *stm32_port = to_stm32_port(port);
54120         const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
54121 -       const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
54122 -       u32 val;
54124         if (stm32_port->wakeirq <= 0)
54125                 return;
54127 +       /*
54128 +        * Enable low-power wake-up and wake-up irq if argument is set to
54129 +        * "enable", disable low-power wake-up and wake-up irq otherwise
54130 +        */
54131         if (enable) {
54132 -               stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
54133                 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM);
54134 -               val = readl_relaxed(port->membase + ofs->cr3);
54135 -               val &= ~USART_CR3_WUS_MASK;
54136 -               /* Enable Wake up interrupt from low power on start bit */
54137 -               val |= USART_CR3_WUS_START_BIT | USART_CR3_WUFIE;
54138 -               writel_relaxed(val, port->membase + ofs->cr3);
54139 -               stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
54140 +               stm32_usart_set_bits(port, ofs->cr3, USART_CR3_WUFIE);
54141         } else {
54142                 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM);
54143 +               stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
54144         }
54147 diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
54148 index cb4f327c46db..94b568aa46bb 100644
54149 --- a/drivers/tty/serial/stm32-usart.h
54150 +++ b/drivers/tty/serial/stm32-usart.h
54151 @@ -127,9 +127,6 @@ struct stm32_usart_info stm32h7_info = {
54152  /* Dummy bits */
54153  #define USART_SR_DUMMY_RX      BIT(16)
54155 -/* USART_ICR (F7) */
54156 -#define USART_CR_TC            BIT(6)
54158  /* USART_DR */
54159  #define USART_DR_MASK          GENMASK(8, 0)
54161 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
54162 index 391bada4cedb..adbcbfa11b29 100644
54163 --- a/drivers/tty/tty_io.c
54164 +++ b/drivers/tty/tty_io.c
54165 @@ -2530,14 +2530,14 @@ static int send_break(struct tty_struct *tty, unsigned int duration)
54166   *     @p: pointer to result
54167   *
54168   *     Obtain the modem status bits from the tty driver if the feature
54169 - *     is supported. Return -EINVAL if it is not available.
54170 + *     is supported. Return -ENOTTY if it is not available.
54171   *
54172   *     Locking: none (up to the driver)
54173   */
54175  static int tty_tiocmget(struct tty_struct *tty, int __user *p)
54177 -       int retval = -EINVAL;
54178 +       int retval = -ENOTTY;
54180         if (tty->ops->tiocmget) {
54181                 retval = tty->ops->tiocmget(tty);
54182 @@ -2555,7 +2555,7 @@ static int tty_tiocmget(struct tty_struct *tty, int __user *p)
54183   *     @p: pointer to desired bits
54184   *
54185   *     Set the modem status bits from the tty driver if the feature
54186 - *     is supported. Return -EINVAL if it is not available.
54187 + *     is supported. Return -ENOTTY if it is not available.
54188   *
54189   *     Locking: none (up to the driver)
54190   */
54191 @@ -2567,7 +2567,7 @@ static int tty_tiocmset(struct tty_struct *tty, unsigned int cmd,
54192         unsigned int set, clear, val;
54194         if (tty->ops->tiocmset == NULL)
54195 -               return -EINVAL;
54196 +               return -ENOTTY;
54198         retval = get_user(val, p);
54199         if (retval)
54200 @@ -2607,7 +2607,7 @@ int tty_get_icount(struct tty_struct *tty,
54201         if (tty->ops->get_icount)
54202                 return tty->ops->get_icount(tty, icount);
54203         else
54204 -               return -EINVAL;
54205 +               return -ENOTTY;
54207  EXPORT_SYMBOL_GPL(tty_get_icount);
54209 diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
54210 index 4de1c6ddb8ff..803da2d111c8 100644
54211 --- a/drivers/tty/tty_ioctl.c
54212 +++ b/drivers/tty/tty_ioctl.c
54213 @@ -774,8 +774,8 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
54214         case TCSETX:
54215         case TCSETXW:
54216         case TCSETXF:
54217 -               return -EINVAL;
54218 -#endif         
54219 +               return -ENOTTY;
54220 +#endif
54221         case TIOCGSOFTCAR:
54222                 copy_termios(real_tty, &kterm);
54223                 ret = put_user((kterm.c_cflag & CLOCAL) ? 1 : 0,
54224 diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
54225 index 284b07224c55..0cc360da5426 100644
54226 --- a/drivers/tty/vt/vt.c
54227 +++ b/drivers/tty/vt/vt.c
54228 @@ -1381,6 +1381,7 @@ struct vc_data *vc_deallocate(unsigned int currcons)
54229                 atomic_notifier_call_chain(&vt_notifier_list, VT_DEALLOCATE, &param);
54230                 vcs_remove_sysfs(currcons);
54231                 visual_deinit(vc);
54232 +               con_free_unimap(vc);
54233                 put_pid(vc->vt_pid);
54234                 vc_uniscr_set(vc, NULL);
54235                 kfree(vc->vc_screenbuf);
54236 diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
54237 index d7d4bdd57f46..56707b6b0f57 100644
54238 --- a/drivers/usb/cdns3/cdnsp-gadget.c
54239 +++ b/drivers/usb/cdns3/cdnsp-gadget.c
54240 @@ -727,7 +727,7 @@ int cdnsp_reset_device(struct cdnsp_device *pdev)
54241          * are in Disabled state.
54242          */
54243         for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i)
54244 -               pdev->eps[i].ep_state |= EP_STOPPED;
54245 +               pdev->eps[i].ep_state |= EP_STOPPED | EP_UNCONFIGURED;
54247         trace_cdnsp_handle_cmd_reset_dev(slot_ctx);
54249 @@ -942,6 +942,7 @@ static int cdnsp_gadget_ep_enable(struct usb_ep *ep,
54251         pep = to_cdnsp_ep(ep);
54252         pdev = pep->pdev;
54253 +       pep->ep_state &= ~EP_UNCONFIGURED;
54255         if (dev_WARN_ONCE(pdev->dev, pep->ep_state & EP_ENABLED,
54256                           "%s is already enabled\n", pep->name))
54257 @@ -1023,9 +1024,13 @@ static int cdnsp_gadget_ep_disable(struct usb_ep *ep)
54258                 goto finish;
54259         }
54261 -       cdnsp_cmd_stop_ep(pdev, pep);
54262         pep->ep_state |= EP_DIS_IN_RROGRESS;
54263 -       cdnsp_cmd_flush_ep(pdev, pep);
54265 +       /* Endpoint was unconfigured by Reset Device command. */
54266 +       if (!(pep->ep_state & EP_UNCONFIGURED)) {
54267 +               cdnsp_cmd_stop_ep(pdev, pep);
54268 +               cdnsp_cmd_flush_ep(pdev, pep);
54269 +       }
54271         /* Remove all queued USB requests. */
54272         while (!list_empty(&pep->pending_list)) {
54273 @@ -1043,10 +1048,12 @@ static int cdnsp_gadget_ep_disable(struct usb_ep *ep)
54275         cdnsp_endpoint_zero(pdev, pep);
54277 -       ret = cdnsp_update_eps_configuration(pdev, pep);
54278 +       if (!(pep->ep_state & EP_UNCONFIGURED))
54279 +               ret = cdnsp_update_eps_configuration(pdev, pep);
54281         cdnsp_free_endpoint_rings(pdev, pep);
54283 -       pep->ep_state &= ~EP_ENABLED;
54284 +       pep->ep_state &= ~(EP_ENABLED | EP_UNCONFIGURED);
54285         pep->ep_state |= EP_STOPPED;
54287  finish:
54288 diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h
54289 index 6bbb26548c04..783ca8ffde00 100644
54290 --- a/drivers/usb/cdns3/cdnsp-gadget.h
54291 +++ b/drivers/usb/cdns3/cdnsp-gadget.h
54292 @@ -835,6 +835,7 @@ struct cdnsp_ep {
54293  #define EP_WEDGE               BIT(4)
54294  #define EP0_HALTED_STATUS      BIT(5)
54295  #define EP_HAS_STREAMS         BIT(6)
54296 +#define EP_UNCONFIGURED                BIT(7)
54298         bool skip;
54299  };
54300 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
54301 index 3fda1ec961d7..c103961c3fae 100644
54302 --- a/drivers/usb/class/cdc-acm.c
54303 +++ b/drivers/usb/class/cdc-acm.c
54304 @@ -929,8 +929,7 @@ static int get_serial_info(struct tty_struct *tty, struct serial_struct *ss)
54306         struct acm *acm = tty->driver_data;
54308 -       ss->xmit_fifo_size = acm->writesize;
54309 -       ss->baud_base = le32_to_cpu(acm->line.dwDTERate);
54310 +       ss->line = acm->minor;
54311         ss->close_delay = jiffies_to_msecs(acm->port.close_delay) / 10;
54312         ss->closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
54313                                 ASYNC_CLOSING_WAIT_NONE :
54314 @@ -942,7 +941,6 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
54316         struct acm *acm = tty->driver_data;
54317         unsigned int closing_wait, close_delay;
54318 -       unsigned int old_closing_wait, old_close_delay;
54319         int retval = 0;
54321         close_delay = msecs_to_jiffies(ss->close_delay * 10);
54322 @@ -950,20 +948,12 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
54323                         ASYNC_CLOSING_WAIT_NONE :
54324                         msecs_to_jiffies(ss->closing_wait * 10);
54326 -       /* we must redo the rounding here, so that the values match */
54327 -       old_close_delay = jiffies_to_msecs(acm->port.close_delay) / 10;
54328 -       old_closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
54329 -                               ASYNC_CLOSING_WAIT_NONE :
54330 -                               jiffies_to_msecs(acm->port.closing_wait) / 10;
54332         mutex_lock(&acm->port.mutex);
54334         if (!capable(CAP_SYS_ADMIN)) {
54335 -               if ((ss->close_delay != old_close_delay) ||
54336 -                   (ss->closing_wait != old_closing_wait))
54337 +               if ((close_delay != acm->port.close_delay) ||
54338 +                   (closing_wait != acm->port.closing_wait))
54339                         retval = -EPERM;
54340 -               else
54341 -                       retval = -EOPNOTSUPP;
54342         } else {
54343                 acm->port.close_delay  = close_delay;
54344                 acm->port.closing_wait = closing_wait;
54345 @@ -1634,12 +1624,13 @@ static int acm_resume(struct usb_interface *intf)
54346         struct urb *urb;
54347         int rv = 0;
54349 -       acm_unpoison_urbs(acm);
54350         spin_lock_irq(&acm->write_lock);
54352         if (--acm->susp_count)
54353                 goto out;
54355 +       acm_unpoison_urbs(acm);
54357         if (tty_port_initialized(&acm->port)) {
54358                 rv = usb_submit_urb(acm->ctrlurb, GFP_ATOMIC);
54360 diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
54361 index 508b1c3f8b73..d1e4a7379beb 100644
54362 --- a/drivers/usb/class/cdc-wdm.c
54363 +++ b/drivers/usb/class/cdc-wdm.c
54364 @@ -321,12 +321,23 @@ static void wdm_int_callback(struct urb *urb)
54368 -static void kill_urbs(struct wdm_device *desc)
54369 +static void poison_urbs(struct wdm_device *desc)
54371         /* the order here is essential */
54372 -       usb_kill_urb(desc->command);
54373 -       usb_kill_urb(desc->validity);
54374 -       usb_kill_urb(desc->response);
54375 +       usb_poison_urb(desc->command);
54376 +       usb_poison_urb(desc->validity);
54377 +       usb_poison_urb(desc->response);
54380 +static void unpoison_urbs(struct wdm_device *desc)
54382 +       /*
54383 +        *  the order here is not essential
54384 +        *  it is symmetrical just to be nice
54385 +        */
54386 +       usb_unpoison_urb(desc->response);
54387 +       usb_unpoison_urb(desc->validity);
54388 +       usb_unpoison_urb(desc->command);
54391  static void free_urbs(struct wdm_device *desc)
54392 @@ -741,11 +752,12 @@ static int wdm_release(struct inode *inode, struct file *file)
54393         if (!desc->count) {
54394                 if (!test_bit(WDM_DISCONNECTING, &desc->flags)) {
54395                         dev_dbg(&desc->intf->dev, "wdm_release: cleanup\n");
54396 -                       kill_urbs(desc);
54397 +                       poison_urbs(desc);
54398                         spin_lock_irq(&desc->iuspin);
54399                         desc->resp_count = 0;
54400                         spin_unlock_irq(&desc->iuspin);
54401                         desc->manage_power(desc->intf, 0);
54402 +                       unpoison_urbs(desc);
54403                 } else {
54404                         /* must avoid dev_printk here as desc->intf is invalid */
54405                         pr_debug(KBUILD_MODNAME " %s: device gone - cleaning up\n", __func__);
54406 @@ -1037,9 +1049,9 @@ static void wdm_disconnect(struct usb_interface *intf)
54407         wake_up_all(&desc->wait);
54408         mutex_lock(&desc->rlock);
54409         mutex_lock(&desc->wlock);
54410 +       poison_urbs(desc);
54411         cancel_work_sync(&desc->rxwork);
54412         cancel_work_sync(&desc->service_outs_intr);
54413 -       kill_urbs(desc);
54414         mutex_unlock(&desc->wlock);
54415         mutex_unlock(&desc->rlock);
54417 @@ -1080,9 +1092,10 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
54418                 set_bit(WDM_SUSPENDING, &desc->flags);
54419                 spin_unlock_irq(&desc->iuspin);
54420                 /* callback submits work - order is essential */
54421 -               kill_urbs(desc);
54422 +               poison_urbs(desc);
54423                 cancel_work_sync(&desc->rxwork);
54424                 cancel_work_sync(&desc->service_outs_intr);
54425 +               unpoison_urbs(desc);
54426         }
54427         if (!PMSG_IS_AUTO(message)) {
54428                 mutex_unlock(&desc->wlock);
54429 @@ -1140,7 +1153,7 @@ static int wdm_pre_reset(struct usb_interface *intf)
54430         wake_up_all(&desc->wait);
54431         mutex_lock(&desc->rlock);
54432         mutex_lock(&desc->wlock);
54433 -       kill_urbs(desc);
54434 +       poison_urbs(desc);
54435         cancel_work_sync(&desc->rxwork);
54436         cancel_work_sync(&desc->service_outs_intr);
54437         return 0;
54438 @@ -1151,6 +1164,7 @@ static int wdm_post_reset(struct usb_interface *intf)
54439         struct wdm_device *desc = wdm_find_device(intf);
54440         int rv;
54442 +       unpoison_urbs(desc);
54443         clear_bit(WDM_OVERFLOW, &desc->flags);
54444         clear_bit(WDM_RESETTING, &desc->flags);
54445         rv = recover_from_urb_loss(desc);
54446 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
54447 index 7f71218cc1e5..13fe37fbbd2c 100644
54448 --- a/drivers/usb/core/hub.c
54449 +++ b/drivers/usb/core/hub.c
54450 @@ -3556,7 +3556,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
54451         u16             portchange, portstatus;
54453         if (!test_and_set_bit(port1, hub->child_usage_bits)) {
54454 -               status = pm_runtime_get_sync(&port_dev->dev);
54455 +               status = pm_runtime_resume_and_get(&port_dev->dev);
54456                 if (status < 0) {
54457                         dev_dbg(&udev->dev, "can't resume usb port, status %d\n",
54458                                         status);
54459 @@ -3593,9 +3593,6 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
54460                  * sequence.
54461                  */
54462                 status = hub_port_status(hub, port1, &portstatus, &portchange);
54464 -               /* TRSMRCY = 10 msec */
54465 -               msleep(10);
54466         }
54468   SuspendCleared:
54469 @@ -3610,6 +3607,9 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
54470                                 usb_clear_port_feature(hub->hdev, port1,
54471                                                 USB_PORT_FEAT_C_SUSPEND);
54472                 }
54474 +               /* TRSMRCY = 10 msec */
54475 +               msleep(10);
54476         }
54478         if (udev->persist_enabled)
54479 diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
54480 index 76ac5d6555ae..21e7522655ac 100644
54481 --- a/drivers/usb/core/quirks.c
54482 +++ b/drivers/usb/core/quirks.c
54483 @@ -406,6 +406,7 @@ static const struct usb_device_id usb_quirk_list[] = {
54485         /* Realtek hub in Dell WD19 (Type-C) */
54486         { USB_DEVICE(0x0bda, 0x0487), .driver_info = USB_QUIRK_NO_LPM },
54487 +       { USB_DEVICE(0x0bda, 0x5487), .driver_info = USB_QUIRK_RESET_RESUME },
54489         /* Generic RTL8153 based ethernet adapters */
54490         { USB_DEVICE(0x0bda, 0x8153), .driver_info = USB_QUIRK_NO_LPM },
54491 @@ -438,6 +439,9 @@ static const struct usb_device_id usb_quirk_list[] = {
54492         { USB_DEVICE(0x17ef, 0xa012), .driver_info =
54493                         USB_QUIRK_DISCONNECT_SUSPEND },
54495 +       /* Lenovo ThinkPad USB-C Dock Gen2 Ethernet (RTL8153 GigE) */
54496 +       { USB_DEVICE(0x17ef, 0xa387), .driver_info = USB_QUIRK_NO_LPM },
54498         /* BUILDWIN Photo Frame */
54499         { USB_DEVICE(0x1908, 0x1315), .driver_info =
54500                         USB_QUIRK_HONOR_BNUMINTERFACES },
54501 diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
54502 index 7161344c6522..641e4251cb7f 100644
54503 --- a/drivers/usb/dwc2/core.h
54504 +++ b/drivers/usb/dwc2/core.h
54505 @@ -112,6 +112,7 @@ struct dwc2_hsotg_req;
54506   * @debugfs: File entry for debugfs file for this endpoint.
54507   * @dir_in: Set to true if this endpoint is of the IN direction, which
54508   *          means that it is sending data to the Host.
54509 + * @map_dir: Set to the value of dir_in when the DMA buffer is mapped.
54510   * @index: The index for the endpoint registers.
54511   * @mc: Multi Count - number of transactions per microframe
54512   * @interval: Interval for periodic endpoints, in frames or microframes.
54513 @@ -161,6 +162,7 @@ struct dwc2_hsotg_ep {
54514         unsigned short          fifo_index;
54516         unsigned char           dir_in;
54517 +       unsigned char           map_dir;
54518         unsigned char           index;
54519         unsigned char           mc;
54520         u16                     interval;
54521 diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
54522 index 55f1d14fc414..510fd0572feb 100644
54523 --- a/drivers/usb/dwc2/core_intr.c
54524 +++ b/drivers/usb/dwc2/core_intr.c
54525 @@ -307,6 +307,7 @@ static void dwc2_handle_conn_id_status_change_intr(struct dwc2_hsotg *hsotg)
54526  static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
54528         int ret;
54529 +       u32 hprt0;
54531         /* Clear interrupt */
54532         dwc2_writel(hsotg, GINTSTS_SESSREQINT, GINTSTS);
54533 @@ -327,6 +328,13 @@ static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
54534                  * established
54535                  */
54536                 dwc2_hsotg_disconnect(hsotg);
54537 +       } else {
54538 +               /* Turn on the port power bit. */
54539 +               hprt0 = dwc2_read_hprt0(hsotg);
54540 +               hprt0 |= HPRT0_PWR;
54541 +               dwc2_writel(hsotg, hprt0, HPRT0);
54542 +               /* Connect hcd after port power is set. */
54543 +               dwc2_hcd_connect(hsotg);
54544         }
54547 @@ -652,6 +660,71 @@ static u32 dwc2_read_common_intr(struct dwc2_hsotg *hsotg)
54548                 return 0;
54551 +/**
54552 + * dwc_handle_gpwrdn_disc_det() - Handles the gpwrdn disconnect detect.
54553 + * Exits hibernation without restoring registers.
54554 + *
54555 + * @hsotg: Programming view of DWC_otg controller
54556 + * @gpwrdn: GPWRDN register
54557 + */
54558 +static inline void dwc_handle_gpwrdn_disc_det(struct dwc2_hsotg *hsotg,
54559 +                                             u32 gpwrdn)
54561 +       u32 gpwrdn_tmp;
54563 +       /* Switch-on voltage to the core */
54564 +       gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
54565 +       gpwrdn_tmp &= ~GPWRDN_PWRDNSWTCH;
54566 +       dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
54567 +       udelay(5);
54569 +       /* Reset core */
54570 +       gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
54571 +       gpwrdn_tmp &= ~GPWRDN_PWRDNRSTN;
54572 +       dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
54573 +       udelay(5);
54575 +       /* Disable Power Down Clamp */
54576 +       gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
54577 +       gpwrdn_tmp &= ~GPWRDN_PWRDNCLMP;
54578 +       dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
54579 +       udelay(5);
54581 +       /* Deassert reset core */
54582 +       gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
54583 +       gpwrdn_tmp |= GPWRDN_PWRDNRSTN;
54584 +       dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
54585 +       udelay(5);
54587 +       /* Disable PMU interrupt */
54588 +       gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
54589 +       gpwrdn_tmp &= ~GPWRDN_PMUINTSEL;
54590 +       dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
54592 +       /* De-assert Wakeup Logic */
54593 +       gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
54594 +       gpwrdn_tmp &= ~GPWRDN_PMUACTV;
54595 +       dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
54597 +       hsotg->hibernated = 0;
54598 +       hsotg->bus_suspended = 0;
54600 +       if (gpwrdn & GPWRDN_IDSTS) {
54601 +               hsotg->op_state = OTG_STATE_B_PERIPHERAL;
54602 +               dwc2_core_init(hsotg, false);
54603 +               dwc2_enable_global_interrupts(hsotg);
54604 +               dwc2_hsotg_core_init_disconnected(hsotg, false);
54605 +               dwc2_hsotg_core_connect(hsotg);
54606 +       } else {
54607 +               hsotg->op_state = OTG_STATE_A_HOST;
54609 +               /* Initialize the Core for Host mode */
54610 +               dwc2_core_init(hsotg, false);
54611 +               dwc2_enable_global_interrupts(hsotg);
54612 +               dwc2_hcd_start(hsotg);
54613 +       }
54616  /*
54617   * GPWRDN interrupt handler.
54618   *
54619 @@ -673,64 +746,14 @@ static void dwc2_handle_gpwrdn_intr(struct dwc2_hsotg *hsotg)
54621         if ((gpwrdn & GPWRDN_DISCONN_DET) &&
54622             (gpwrdn & GPWRDN_DISCONN_DET_MSK) && !linestate) {
54623 -               u32 gpwrdn_tmp;
54625                 dev_dbg(hsotg->dev, "%s: GPWRDN_DISCONN_DET\n", __func__);
54627 -               /* Switch-on voltage to the core */
54628 -               gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
54629 -               gpwrdn_tmp &= ~GPWRDN_PWRDNSWTCH;
54630 -               dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
54631 -               udelay(10);
54633 -               /* Reset core */
54634 -               gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
54635 -               gpwrdn_tmp &= ~GPWRDN_PWRDNRSTN;
54636 -               dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
54637 -               udelay(10);
54639 -               /* Disable Power Down Clamp */
54640 -               gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
54641 -               gpwrdn_tmp &= ~GPWRDN_PWRDNCLMP;
54642 -               dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
54643 -               udelay(10);
54645 -               /* Deassert reset core */
54646 -               gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
54647 -               gpwrdn_tmp |= GPWRDN_PWRDNRSTN;
54648 -               dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
54649 -               udelay(10);
54651 -               /* Disable PMU interrupt */
54652 -               gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
54653 -               gpwrdn_tmp &= ~GPWRDN_PMUINTSEL;
54654 -               dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
54656 -               /* De-assert Wakeup Logic */
54657 -               gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
54658 -               gpwrdn_tmp &= ~GPWRDN_PMUACTV;
54659 -               dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
54661 -               hsotg->hibernated = 0;
54663 -               if (gpwrdn & GPWRDN_IDSTS) {
54664 -                       hsotg->op_state = OTG_STATE_B_PERIPHERAL;
54665 -                       dwc2_core_init(hsotg, false);
54666 -                       dwc2_enable_global_interrupts(hsotg);
54667 -                       dwc2_hsotg_core_init_disconnected(hsotg, false);
54668 -                       dwc2_hsotg_core_connect(hsotg);
54669 -               } else {
54670 -                       hsotg->op_state = OTG_STATE_A_HOST;
54672 -                       /* Initialize the Core for Host mode */
54673 -                       dwc2_core_init(hsotg, false);
54674 -                       dwc2_enable_global_interrupts(hsotg);
54675 -                       dwc2_hcd_start(hsotg);
54676 -               }
54677 -       }
54679 -       if ((gpwrdn & GPWRDN_LNSTSCHG) &&
54680 -           (gpwrdn & GPWRDN_LNSTSCHG_MSK) && linestate) {
54681 +               /*
54682 +                * Call disconnect detect function to exit from
54683 +                * hibernation
54684 +                */
54685 +               dwc_handle_gpwrdn_disc_det(hsotg, gpwrdn);
54686 +       } else if ((gpwrdn & GPWRDN_LNSTSCHG) &&
54687 +                  (gpwrdn & GPWRDN_LNSTSCHG_MSK) && linestate) {
54688                 dev_dbg(hsotg->dev, "%s: GPWRDN_LNSTSCHG\n", __func__);
54689                 if (hsotg->hw_params.hibernation &&
54690                     hsotg->hibernated) {
54691 @@ -741,24 +764,21 @@ static void dwc2_handle_gpwrdn_intr(struct dwc2_hsotg *hsotg)
54692                                 dwc2_exit_hibernation(hsotg, 1, 0, 1);
54693                         }
54694                 }
54695 -       }
54696 -       if ((gpwrdn & GPWRDN_RST_DET) && (gpwrdn & GPWRDN_RST_DET_MSK)) {
54697 +       } else if ((gpwrdn & GPWRDN_RST_DET) &&
54698 +                  (gpwrdn & GPWRDN_RST_DET_MSK)) {
54699                 dev_dbg(hsotg->dev, "%s: GPWRDN_RST_DET\n", __func__);
54700                 if (!linestate && (gpwrdn & GPWRDN_BSESSVLD))
54701                         dwc2_exit_hibernation(hsotg, 0, 1, 0);
54702 -       }
54703 -       if ((gpwrdn & GPWRDN_STS_CHGINT) &&
54704 -           (gpwrdn & GPWRDN_STS_CHGINT_MSK) && linestate) {
54705 +       } else if ((gpwrdn & GPWRDN_STS_CHGINT) &&
54706 +                  (gpwrdn & GPWRDN_STS_CHGINT_MSK)) {
54707                 dev_dbg(hsotg->dev, "%s: GPWRDN_STS_CHGINT\n", __func__);
54708 -               if (hsotg->hw_params.hibernation &&
54709 -                   hsotg->hibernated) {
54710 -                       if (gpwrdn & GPWRDN_IDSTS) {
54711 -                               dwc2_exit_hibernation(hsotg, 0, 0, 0);
54712 -                               call_gadget(hsotg, resume);
54713 -                       } else {
54714 -                               dwc2_exit_hibernation(hsotg, 1, 0, 1);
54715 -                       }
54716 -               }
54717 +               /*
54718 +                * As GPWRDN_STS_CHGINT exit from hibernation flow is
54719 +                * the same as in GPWRDN_DISCONN_DET flow. Call
54720 +                * disconnect detect helper function to exit from
54721 +                * hibernation.
54722 +                */
54723 +               dwc_handle_gpwrdn_disc_det(hsotg, gpwrdn);
54724         }
54727 diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
54728 index ad4c94366dad..d2f623d83bf7 100644
54729 --- a/drivers/usb/dwc2/gadget.c
54730 +++ b/drivers/usb/dwc2/gadget.c
54731 @@ -422,7 +422,7 @@ static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg,
54733         struct usb_request *req = &hs_req->req;
54735 -       usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in);
54736 +       usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->map_dir);
54739  /*
54740 @@ -1242,6 +1242,7 @@ static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg,
54742         int ret;
54744 +       hs_ep->map_dir = hs_ep->dir_in;
54745         ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in);
54746         if (ret)
54747                 goto dma_error;
54748 diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
54749 index 1a9789ec5847..6af1dcbc3656 100644
54750 --- a/drivers/usb/dwc2/hcd.c
54751 +++ b/drivers/usb/dwc2/hcd.c
54752 @@ -5580,7 +5580,15 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
54753                 return ret;
54754         }
54756 -       dwc2_hcd_rem_wakeup(hsotg);
54757 +       if (rem_wakeup) {
54758 +               dwc2_hcd_rem_wakeup(hsotg);
54759 +               /*
54760 +                * Change "port_connect_status_change" flag to re-enumerate,
54761 +                * because after exit from hibernation port connection status
54762 +                * is not detected.
54763 +                */
54764 +               hsotg->flags.b.port_connect_status_change = 1;
54765 +       }
54767         hsotg->hibernated = 0;
54768         hsotg->bus_suspended = 0;
54769 diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
54770 index f2448d0a9d39..126f0e10b3ef 100644
54771 --- a/drivers/usb/dwc3/core.c
54772 +++ b/drivers/usb/dwc3/core.c
54773 @@ -114,6 +114,8 @@ void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
54774         dwc->current_dr_role = mode;
54777 +static int dwc3_core_soft_reset(struct dwc3 *dwc);
54779  static void __dwc3_set_mode(struct work_struct *work)
54781         struct dwc3 *dwc = work_to_dwc(work);
54782 @@ -121,6 +123,8 @@ static void __dwc3_set_mode(struct work_struct *work)
54783         int ret;
54784         u32 reg;
54786 +       mutex_lock(&dwc->mutex);
54788         pm_runtime_get_sync(dwc->dev);
54790         if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_OTG)
54791 @@ -154,6 +158,25 @@ static void __dwc3_set_mode(struct work_struct *work)
54792                 break;
54793         }
54795 +       /* For DRD host or device mode only */
54796 +       if (dwc->desired_dr_role != DWC3_GCTL_PRTCAP_OTG) {
54797 +               reg = dwc3_readl(dwc->regs, DWC3_GCTL);
54798 +               reg |= DWC3_GCTL_CORESOFTRESET;
54799 +               dwc3_writel(dwc->regs, DWC3_GCTL, reg);
54801 +               /*
54802 +                * Wait for internal clocks to synchronized. DWC_usb31 and
54803 +                * DWC_usb32 may need at least 50ms (less for DWC_usb3). To
54804 +                * keep it consistent across different IPs, let's wait up to
54805 +                * 100ms before clearing GCTL.CORESOFTRESET.
54806 +                */
54807 +               msleep(100);
54809 +               reg = dwc3_readl(dwc->regs, DWC3_GCTL);
54810 +               reg &= ~DWC3_GCTL_CORESOFTRESET;
54811 +               dwc3_writel(dwc->regs, DWC3_GCTL, reg);
54812 +       }
54814         spin_lock_irqsave(&dwc->lock, flags);
54816         dwc3_set_prtcap(dwc, dwc->desired_dr_role);
54817 @@ -178,6 +201,8 @@ static void __dwc3_set_mode(struct work_struct *work)
54818                 }
54819                 break;
54820         case DWC3_GCTL_PRTCAP_DEVICE:
54821 +               dwc3_core_soft_reset(dwc);
54823                 dwc3_event_buffers_setup(dwc);
54825                 if (dwc->usb2_phy)
54826 @@ -200,6 +225,7 @@ static void __dwc3_set_mode(struct work_struct *work)
54827  out:
54828         pm_runtime_mark_last_busy(dwc->dev);
54829         pm_runtime_put_autosuspend(dwc->dev);
54830 +       mutex_unlock(&dwc->mutex);
54833  void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
54834 @@ -1277,6 +1303,8 @@ static void dwc3_get_properties(struct dwc3 *dwc)
54835                                 "snps,usb3_lpm_capable");
54836         dwc->usb2_lpm_disable = device_property_read_bool(dev,
54837                                 "snps,usb2-lpm-disable");
54838 +       dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev,
54839 +                               "snps,usb2-gadget-lpm-disable");
54840         device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd",
54841                                 &rx_thr_num_pkt_prd);
54842         device_property_read_u8(dev, "snps,rx-max-burst-prd",
54843 @@ -1543,6 +1571,7 @@ static int dwc3_probe(struct platform_device *pdev)
54844         dwc3_cache_hwparams(dwc);
54846         spin_lock_init(&dwc->lock);
54847 +       mutex_init(&dwc->mutex);
54849         pm_runtime_set_active(dev);
54850         pm_runtime_use_autosuspend(dev);
54851 diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
54852 index 052b20d52651..453cfebd4d04 100644
54853 --- a/drivers/usb/dwc3/core.h
54854 +++ b/drivers/usb/dwc3/core.h
54855 @@ -13,6 +13,7 @@
54857  #include <linux/device.h>
54858  #include <linux/spinlock.h>
54859 +#include <linux/mutex.h>
54860  #include <linux/ioport.h>
54861  #include <linux/list.h>
54862  #include <linux/bitops.h>
54863 @@ -946,6 +947,7 @@ struct dwc3_scratchpad_array {
54864   * @scratch_addr: dma address of scratchbuf
54865   * @ep0_in_setup: one control transfer is completed and enter setup phase
54866   * @lock: for synchronizing
54867 + * @mutex: for mode switching
54868   * @dev: pointer to our struct device
54869   * @sysdev: pointer to the DMA-capable device
54870   * @xhci: pointer to our xHCI child
54871 @@ -1034,7 +1036,8 @@ struct dwc3_scratchpad_array {
54872   * @dis_start_transfer_quirk: set if start_transfer failure SW workaround is
54873   *                     not needed for DWC_usb31 version 1.70a-ea06 and below
54874   * @usb3_lpm_capable: set if hadrware supports Link Power Management
54875 - * @usb2_lpm_disable: set to disable usb2 lpm
54876 + * @usb2_lpm_disable: set to disable usb2 lpm for host
54877 + * @usb2_gadget_lpm_disable: set to disable usb2 lpm for gadget
54878   * @disable_scramble_quirk: set if we enable the disable scramble quirk
54879   * @u2exit_lfps_quirk: set if we enable u2exit lfps quirk
54880   * @u2ss_inp3_quirk: set if we enable P3 OK for U2/SS Inactive quirk
54881 @@ -1085,6 +1088,9 @@ struct dwc3 {
54882         /* device lock */
54883         spinlock_t              lock;
54885 +       /* mode switching lock */
54886 +       struct mutex            mutex;
54888         struct device           *dev;
54889         struct device           *sysdev;
54891 @@ -1238,6 +1244,7 @@ struct dwc3 {
54892         unsigned                dis_start_transfer_quirk:1;
54893         unsigned                usb3_lpm_capable:1;
54894         unsigned                usb2_lpm_disable:1;
54895 +       unsigned                usb2_gadget_lpm_disable:1;
54897         unsigned                disable_scramble_quirk:1;
54898         unsigned                u2exit_lfps_quirk:1;
54899 diff --git a/drivers/usb/dwc3/dwc3-imx8mp.c b/drivers/usb/dwc3/dwc3-imx8mp.c
54900 index 75f0042b998b..84c1a4ac2444 100644
54901 --- a/drivers/usb/dwc3/dwc3-imx8mp.c
54902 +++ b/drivers/usb/dwc3/dwc3-imx8mp.c
54903 @@ -167,6 +167,7 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev)
54905         dwc3_np = of_get_child_by_name(node, "dwc3");
54906         if (!dwc3_np) {
54907 +               err = -ENODEV;
54908                 dev_err(dev, "failed to find dwc3 core child\n");
54909                 goto disable_rpm;
54910         }
54911 diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
54912 index 3db17806e92e..e196673f5c64 100644
54913 --- a/drivers/usb/dwc3/dwc3-omap.c
54914 +++ b/drivers/usb/dwc3/dwc3-omap.c
54915 @@ -437,8 +437,13 @@ static int dwc3_omap_extcon_register(struct dwc3_omap *omap)
54917                 if (extcon_get_state(edev, EXTCON_USB) == true)
54918                         dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID);
54919 +               else
54920 +                       dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_OFF);
54922                 if (extcon_get_state(edev, EXTCON_USB_HOST) == true)
54923                         dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND);
54924 +               else
54925 +                       dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_FLOAT);
54927                 omap->edev = edev;
54928         }
54929 diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
54930 index 4c5c6972124a..19789e94bbd0 100644
54931 --- a/drivers/usb/dwc3/dwc3-pci.c
54932 +++ b/drivers/usb/dwc3/dwc3-pci.c
54933 @@ -41,6 +41,7 @@
54934  #define PCI_DEVICE_ID_INTEL_TGPH               0x43ee
54935  #define PCI_DEVICE_ID_INTEL_JSP                        0x4dee
54936  #define PCI_DEVICE_ID_INTEL_ADLP               0x51ee
54937 +#define PCI_DEVICE_ID_INTEL_ADLM               0x54ee
54938  #define PCI_DEVICE_ID_INTEL_ADLS               0x7ae1
54939  #define PCI_DEVICE_ID_INTEL_TGL                        0x9a15
54941 @@ -122,6 +123,7 @@ static const struct property_entry dwc3_pci_mrfld_properties[] = {
54942         PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
54943         PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
54944         PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
54945 +       PROPERTY_ENTRY_BOOL("snps,usb2-gadget-lpm-disable"),
54946         PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
54947         {}
54948  };
54949 @@ -388,6 +390,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
54950         { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLP),
54951           (kernel_ulong_t) &dwc3_pci_intel_swnode, },
54953 +       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLM),
54954 +         (kernel_ulong_t) &dwc3_pci_intel_swnode, },
54956         { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLS),
54957           (kernel_ulong_t) &dwc3_pci_intel_swnode, },
54959 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
54960 index c7ef218e7a8c..8585b56d9f2d 100644
54961 --- a/drivers/usb/dwc3/gadget.c
54962 +++ b/drivers/usb/dwc3/gadget.c
54963 @@ -308,13 +308,12 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
54964         }
54966         if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
54967 -               int             needs_wakeup;
54968 +               int link_state;
54970 -               needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 ||
54971 -                               dwc->link_state == DWC3_LINK_STATE_U2 ||
54972 -                               dwc->link_state == DWC3_LINK_STATE_U3);
54974 -               if (unlikely(needs_wakeup)) {
54975 +               link_state = dwc3_gadget_get_link_state(dwc);
54976 +               if (link_state == DWC3_LINK_STATE_U1 ||
54977 +                   link_state == DWC3_LINK_STATE_U2 ||
54978 +                   link_state == DWC3_LINK_STATE_U3) {
54979                         ret = __dwc3_gadget_wakeup(dwc);
54980                         dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
54981                                         ret);
54982 @@ -608,12 +607,14 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
54983                 u8 bInterval_m1;
54985                 /*
54986 -                * Valid range for DEPCFG.bInterval_m1 is from 0 to 13, and it
54987 -                * must be set to 0 when the controller operates in full-speed.
54988 +                * Valid range for DEPCFG.bInterval_m1 is from 0 to 13.
54989 +                *
54990 +                * NOTE: The programming guide incorrectly stated bInterval_m1
54991 +                * must be set to 0 when operating in fullspeed. Internally the
54992 +                * controller does not have this limitation. See DWC_usb3x
54993 +                * programming guide section 3.2.2.1.
54994                  */
54995                 bInterval_m1 = min_t(u8, desc->bInterval - 1, 13);
54996 -               if (dwc->gadget->speed == USB_SPEED_FULL)
54997 -                       bInterval_m1 = 0;
54999                 if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
55000                     dwc->gadget->speed == USB_SPEED_FULL)
55001 @@ -1675,7 +1676,9 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
55002                 }
55003         }
55005 -       return __dwc3_gadget_kick_transfer(dep);
55006 +       __dwc3_gadget_kick_transfer(dep);
55008 +       return 0;
55011  static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
55012 @@ -1973,6 +1976,8 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
55013         case DWC3_LINK_STATE_RESET:
55014         case DWC3_LINK_STATE_RX_DET:    /* in HS, means Early Suspend */
55015         case DWC3_LINK_STATE_U3:        /* in HS, means SUSPEND */
55016 +       case DWC3_LINK_STATE_U2:        /* in HS, means Sleep (L1) */
55017 +       case DWC3_LINK_STATE_U1:
55018         case DWC3_LINK_STATE_RESUME:
55019                 break;
55020         default:
55021 @@ -2299,6 +2304,10 @@ static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
55022         if (DWC3_VER_IS_PRIOR(DWC3, 250A))
55023                 reg |= DWC3_DEVTEN_ULSTCNGEN;
55025 +       /* On 2.30a and above this bit enables U3/L2-L1 Suspend Events */
55026 +       if (!DWC3_VER_IS_PRIOR(DWC3, 230A))
55027 +               reg |= DWC3_DEVTEN_EOPFEN;
55029         dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
55032 @@ -3322,6 +3331,15 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
55034         u32                     reg;
55036 +       /*
55037 +        * Ideally, dwc3_reset_gadget() would trigger the function
55038 +        * drivers to stop any active transfers through ep disable.
55039 +        * However, for functions which defer ep disable, such as mass
55040 +        * storage, we will need to rely on the call to stop active
55041 +        * transfers here, and avoid allowing of request queuing.
55042 +        */
55043 +       dwc->connected = false;
55045         /*
55046          * WORKAROUND: DWC3 revisions <1.88a have an issue which
55047          * would cause a missing Disconnect Event if there's a
55048 @@ -3460,6 +3478,7 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
55049         /* Enable USB2 LPM Capability */
55051         if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A) &&
55052 +           !dwc->usb2_gadget_lpm_disable &&
55053             (speed != DWC3_DSTS_SUPERSPEED) &&
55054             (speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
55055                 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
55056 @@ -3486,6 +3505,12 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
55058                 dwc3_gadget_dctl_write_safe(dwc, reg);
55059         } else {
55060 +               if (dwc->usb2_gadget_lpm_disable) {
55061 +                       reg = dwc3_readl(dwc->regs, DWC3_DCFG);
55062 +                       reg &= ~DWC3_DCFG_LPM_CAP;
55063 +                       dwc3_writel(dwc->regs, DWC3_DCFG, reg);
55064 +               }
55066                 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
55067                 reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
55068                 dwc3_gadget_dctl_write_safe(dwc, reg);
55069 @@ -3934,7 +3959,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
55070         dwc->gadget->ssp_rate           = USB_SSP_GEN_UNKNOWN;
55071         dwc->gadget->sg_supported       = true;
55072         dwc->gadget->name               = "dwc3-gadget";
55073 -       dwc->gadget->lpm_capable        = true;
55074 +       dwc->gadget->lpm_capable        = !dwc->usb2_gadget_lpm_disable;
55076         /*
55077          * FIXME We might be setting max_speed to <SUPER, however versions
55078 @@ -4005,8 +4030,9 @@ int dwc3_gadget_init(struct dwc3 *dwc)
55080  void dwc3_gadget_exit(struct dwc3 *dwc)
55082 -       usb_del_gadget_udc(dwc->gadget);
55083 +       usb_del_gadget(dwc->gadget);
55084         dwc3_gadget_free_endpoints(dwc);
55085 +       usb_put_gadget(dwc->gadget);
55086         dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
55087                           dwc->bounce_addr);
55088         kfree(dwc->setup_buf);
55089 diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
55090 index 2d115353424c..8bb25773b61e 100644
55091 --- a/drivers/usb/gadget/config.c
55092 +++ b/drivers/usb/gadget/config.c
55093 @@ -194,9 +194,13 @@ EXPORT_SYMBOL_GPL(usb_assign_descriptors);
55094  void usb_free_all_descriptors(struct usb_function *f)
55096         usb_free_descriptors(f->fs_descriptors);
55097 +       f->fs_descriptors = NULL;
55098         usb_free_descriptors(f->hs_descriptors);
55099 +       f->hs_descriptors = NULL;
55100         usb_free_descriptors(f->ss_descriptors);
55101 +       f->ss_descriptors = NULL;
55102         usb_free_descriptors(f->ssp_descriptors);
55103 +       f->ssp_descriptors = NULL;
55105  EXPORT_SYMBOL_GPL(usb_free_all_descriptors);
55107 diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
55108 index 801a8b668a35..10a5d9f0f2b9 100644
55109 --- a/drivers/usb/gadget/function/f_fs.c
55110 +++ b/drivers/usb/gadget/function/f_fs.c
55111 @@ -2640,6 +2640,7 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
55113         do { /* lang_count > 0 so we can use do-while */
55114                 unsigned needed = needed_count;
55115 +               u32 str_per_lang = str_count;
55117                 if (len < 3)
55118                         goto error_free;
55119 @@ -2675,7 +2676,7 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
55121                         data += length + 1;
55122                         len -= length + 1;
55123 -               } while (--str_count);
55124 +               } while (--str_per_lang);
55126                 s->id = 0;   /* terminator */
55127                 s->s = NULL;
55128 diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
55129 index 560382e0a8f3..e65f474ad7b3 100644
55130 --- a/drivers/usb/gadget/function/f_uac1.c
55131 +++ b/drivers/usb/gadget/function/f_uac1.c
55132 @@ -19,6 +19,9 @@
55133  #include "u_audio.h"
55134  #include "u_uac1.h"
55136 +/* UAC1 spec: 3.7.2.3 Audio Channel Cluster Format */
55137 +#define UAC1_CHANNEL_MASK 0x0FFF
55139  struct f_uac1 {
55140         struct g_audio g_audio;
55141         u8 ac_intf, as_in_intf, as_out_intf;
55142 @@ -30,6 +33,11 @@ static inline struct f_uac1 *func_to_uac1(struct usb_function *f)
55143         return container_of(f, struct f_uac1, g_audio.func);
55146 +static inline struct f_uac1_opts *g_audio_to_uac1_opts(struct g_audio *audio)
55148 +       return container_of(audio->func.fi, struct f_uac1_opts, func_inst);
55151  /*
55152   * DESCRIPTORS ... most are static, but strings and full
55153   * configuration descriptors are built on demand.
55154 @@ -505,11 +513,42 @@ static void f_audio_disable(struct usb_function *f)
55156  /*-------------------------------------------------------------------------*/
55158 +static int f_audio_validate_opts(struct g_audio *audio, struct device *dev)
55160 +       struct f_uac1_opts *opts = g_audio_to_uac1_opts(audio);
55162 +       if (!opts->p_chmask && !opts->c_chmask) {
55163 +               dev_err(dev, "Error: no playback and capture channels\n");
55164 +               return -EINVAL;
55165 +       } else if (opts->p_chmask & ~UAC1_CHANNEL_MASK) {
55166 +               dev_err(dev, "Error: unsupported playback channels mask\n");
55167 +               return -EINVAL;
55168 +       } else if (opts->c_chmask & ~UAC1_CHANNEL_MASK) {
55169 +               dev_err(dev, "Error: unsupported capture channels mask\n");
55170 +               return -EINVAL;
55171 +       } else if ((opts->p_ssize < 1) || (opts->p_ssize > 4)) {
55172 +               dev_err(dev, "Error: incorrect playback sample size\n");
55173 +               return -EINVAL;
55174 +       } else if ((opts->c_ssize < 1) || (opts->c_ssize > 4)) {
55175 +               dev_err(dev, "Error: incorrect capture sample size\n");
55176 +               return -EINVAL;
55177 +       } else if (!opts->p_srate) {
55178 +               dev_err(dev, "Error: incorrect playback sampling rate\n");
55179 +               return -EINVAL;
55180 +       } else if (!opts->c_srate) {
55181 +               dev_err(dev, "Error: incorrect capture sampling rate\n");
55182 +               return -EINVAL;
55183 +       }
55185 +       return 0;
55188  /* audio function driver setup/binding */
55189  static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
55191         struct usb_composite_dev        *cdev = c->cdev;
55192         struct usb_gadget               *gadget = cdev->gadget;
55193 +       struct device                   *dev = &gadget->dev;
55194         struct f_uac1                   *uac1 = func_to_uac1(f);
55195         struct g_audio                  *audio = func_to_g_audio(f);
55196         struct f_uac1_opts              *audio_opts;
55197 @@ -519,6 +558,10 @@ static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
55198         int                             rate;
55199         int                             status;
55201 +       status = f_audio_validate_opts(audio, dev);
55202 +       if (status)
55203 +               return status;
55205         audio_opts = container_of(f->fi, struct f_uac1_opts, func_inst);
55207         us = usb_gstrings_attach(cdev, uac1_strings, ARRAY_SIZE(strings_uac1));
55208 diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
55209 index 6f03e944e0e3..dd960cea642f 100644
55210 --- a/drivers/usb/gadget/function/f_uac2.c
55211 +++ b/drivers/usb/gadget/function/f_uac2.c
55212 @@ -14,6 +14,9 @@
55213  #include "u_audio.h"
55214  #include "u_uac2.h"
55216 +/* UAC2 spec: 4.1 Audio Channel Cluster Descriptor */
55217 +#define UAC2_CHANNEL_MASK 0x07FFFFFF
55219  /*
55220   * The driver implements a simple UAC_2 topology.
55221   * USB-OUT -> IT_1 -> OT_3 -> ALSA_Capture
55222 @@ -604,6 +607,36 @@ static void setup_descriptor(struct f_uac2_opts *opts)
55223         hs_audio_desc[i] = NULL;
55226 +static int afunc_validate_opts(struct g_audio *agdev, struct device *dev)
55228 +       struct f_uac2_opts *opts = g_audio_to_uac2_opts(agdev);
55230 +       if (!opts->p_chmask && !opts->c_chmask) {
55231 +               dev_err(dev, "Error: no playback and capture channels\n");
55232 +               return -EINVAL;
55233 +       } else if (opts->p_chmask & ~UAC2_CHANNEL_MASK) {
55234 +               dev_err(dev, "Error: unsupported playback channels mask\n");
55235 +               return -EINVAL;
55236 +       } else if (opts->c_chmask & ~UAC2_CHANNEL_MASK) {
55237 +               dev_err(dev, "Error: unsupported capture channels mask\n");
55238 +               return -EINVAL;
55239 +       } else if ((opts->p_ssize < 1) || (opts->p_ssize > 4)) {
55240 +               dev_err(dev, "Error: incorrect playback sample size\n");
55241 +               return -EINVAL;
55242 +       } else if ((opts->c_ssize < 1) || (opts->c_ssize > 4)) {
55243 +               dev_err(dev, "Error: incorrect capture sample size\n");
55244 +               return -EINVAL;
55245 +       } else if (!opts->p_srate) {
55246 +               dev_err(dev, "Error: incorrect playback sampling rate\n");
55247 +               return -EINVAL;
55248 +       } else if (!opts->c_srate) {
55249 +               dev_err(dev, "Error: incorrect capture sampling rate\n");
55250 +               return -EINVAL;
55251 +       }
55253 +       return 0;
55256  static int
55257  afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
55259 @@ -612,11 +645,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
55260         struct usb_composite_dev *cdev = cfg->cdev;
55261         struct usb_gadget *gadget = cdev->gadget;
55262         struct device *dev = &gadget->dev;
55263 -       struct f_uac2_opts *uac2_opts;
55264 +       struct f_uac2_opts *uac2_opts = g_audio_to_uac2_opts(agdev);
55265         struct usb_string *us;
55266         int ret;
55268 -       uac2_opts = container_of(fn->fi, struct f_uac2_opts, func_inst);
55269 +       ret = afunc_validate_opts(agdev, dev);
55270 +       if (ret)
55271 +               return ret;
55273         us = usb_gstrings_attach(cdev, fn_strings, ARRAY_SIZE(strings_fn));
55274         if (IS_ERR(us))
55275 diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
55276 index 44b4352a2676..f48a00e49794 100644
55277 --- a/drivers/usb/gadget/function/f_uvc.c
55278 +++ b/drivers/usb/gadget/function/f_uvc.c
55279 @@ -633,7 +633,12 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
55281         uvc_hs_streaming_ep.wMaxPacketSize =
55282                 cpu_to_le16(max_packet_size | ((max_packet_mult - 1) << 11));
55283 -       uvc_hs_streaming_ep.bInterval = opts->streaming_interval;
55285 +       /* A high-bandwidth endpoint must specify a bInterval value of 1 */
55286 +       if (max_packet_mult > 1)
55287 +               uvc_hs_streaming_ep.bInterval = 1;
55288 +       else
55289 +               uvc_hs_streaming_ep.bInterval = opts->streaming_interval;
55291         uvc_ss_streaming_ep.wMaxPacketSize = cpu_to_le16(max_packet_size);
55292         uvc_ss_streaming_ep.bInterval = opts->streaming_interval;
55293 @@ -817,6 +822,7 @@ static struct usb_function_instance *uvc_alloc_inst(void)
55294         pd->bmControls[0]               = 1;
55295         pd->bmControls[1]               = 0;
55296         pd->iProcessing                 = 0;
55297 +       pd->bmVideoStandards            = 0;
55299         od = &opts->uvc_output_terminal;
55300         od->bLength                     = UVC_DT_OUTPUT_TERMINAL_SIZE;
55301 diff --git a/drivers/usb/gadget/legacy/webcam.c b/drivers/usb/gadget/legacy/webcam.c
55302 index a9f8eb8e1c76..2c9eab2b863d 100644
55303 --- a/drivers/usb/gadget/legacy/webcam.c
55304 +++ b/drivers/usb/gadget/legacy/webcam.c
55305 @@ -125,6 +125,7 @@ static const struct uvc_processing_unit_descriptor uvc_processing = {
55306         .bmControls[0]          = 1,
55307         .bmControls[1]          = 0,
55308         .iProcessing            = 0,
55309 +       .bmVideoStandards       = 0,
55310  };
55312  static const struct uvc_output_terminal_descriptor uvc_output_terminal = {
55313 diff --git a/drivers/usb/gadget/udc/aspeed-vhub/core.c b/drivers/usb/gadget/udc/aspeed-vhub/core.c
55314 index be7bb64e3594..d11d3d14313f 100644
55315 --- a/drivers/usb/gadget/udc/aspeed-vhub/core.c
55316 +++ b/drivers/usb/gadget/udc/aspeed-vhub/core.c
55317 @@ -36,6 +36,7 @@ void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
55318                    int status)
55320         bool internal = req->internal;
55321 +       struct ast_vhub *vhub = ep->vhub;
55323         EPVDBG(ep, "completing request @%p, status %d\n", req, status);
55325 @@ -46,7 +47,7 @@ void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
55327         if (req->req.dma) {
55328                 if (!WARN_ON(!ep->dev))
55329 -                       usb_gadget_unmap_request(&ep->dev->gadget,
55330 +                       usb_gadget_unmap_request_by_dev(&vhub->pdev->dev,
55331                                                  &req->req, ep->epn.is_in);
55332                 req->req.dma = 0;
55333         }
55334 diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
55335 index 02d8bfae58fb..cb164c615e6f 100644
55336 --- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c
55337 +++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
55338 @@ -376,7 +376,7 @@ static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req,
55339         if (ep->epn.desc_mode ||
55340             ((((unsigned long)u_req->buf & 7) == 0) &&
55341              (ep->epn.is_in || !(u_req->length & (u_ep->maxpacket - 1))))) {
55342 -               rc = usb_gadget_map_request(&ep->dev->gadget, u_req,
55343 +               rc = usb_gadget_map_request_by_dev(&vhub->pdev->dev, u_req,
55344                                             ep->epn.is_in);
55345                 if (rc) {
55346                         dev_warn(&vhub->pdev->dev,
55347 diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
55348 index 57067763b100..5f474ffe2be1 100644
55349 --- a/drivers/usb/gadget/udc/dummy_hcd.c
55350 +++ b/drivers/usb/gadget/udc/dummy_hcd.c
55351 @@ -903,6 +903,21 @@ static int dummy_pullup(struct usb_gadget *_gadget, int value)
55352         spin_lock_irqsave(&dum->lock, flags);
55353         dum->pullup = (value != 0);
55354         set_link_state(dum_hcd);
55355 +       if (value == 0) {
55356 +               /*
55357 +                * Emulate synchronize_irq(): wait for callbacks to finish.
55358 +                * This seems to be the best place to emulate the call to
55359 +                * synchronize_irq() that's in usb_gadget_remove_driver().
55360 +                * Doing it in dummy_udc_stop() would be too late since it
55361 +                * is called after the unbind callback and unbind shouldn't
55362 +                * be invoked until all the other callbacks are finished.
55363 +                */
55364 +               while (dum->callback_usage > 0) {
55365 +                       spin_unlock_irqrestore(&dum->lock, flags);
55366 +                       usleep_range(1000, 2000);
55367 +                       spin_lock_irqsave(&dum->lock, flags);
55368 +               }
55369 +       }
55370         spin_unlock_irqrestore(&dum->lock, flags);
55372         usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd));
55373 @@ -1004,14 +1019,6 @@ static int dummy_udc_stop(struct usb_gadget *g)
55374         spin_lock_irq(&dum->lock);
55375         dum->ints_enabled = 0;
55376         stop_activity(dum);
55378 -       /* emulate synchronize_irq(): wait for callbacks to finish */
55379 -       while (dum->callback_usage > 0) {
55380 -               spin_unlock_irq(&dum->lock);
55381 -               usleep_range(1000, 2000);
55382 -               spin_lock_irq(&dum->lock);
55383 -       }
55385         dum->driver = NULL;
55386         spin_unlock_irq(&dum->lock);
55388 diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
55389 index d6ca50f01985..75bf446f4a66 100644
55390 --- a/drivers/usb/gadget/udc/fotg210-udc.c
55391 +++ b/drivers/usb/gadget/udc/fotg210-udc.c
55392 @@ -338,15 +338,16 @@ static void fotg210_start_dma(struct fotg210_ep *ep,
55393                 } else {
55394                         buffer = req->req.buf + req->req.actual;
55395                         length = ioread32(ep->fotg210->reg +
55396 -                                       FOTG210_FIBCR(ep->epnum - 1));
55397 -                       length &= FIBCR_BCFX;
55398 +                                       FOTG210_FIBCR(ep->epnum - 1)) & FIBCR_BCFX;
55399 +                       if (length > req->req.length - req->req.actual)
55400 +                               length = req->req.length - req->req.actual;
55401                 }
55402         } else {
55403                 buffer = req->req.buf + req->req.actual;
55404                 if (req->req.length - req->req.actual > ep->ep.maxpacket)
55405                         length = ep->ep.maxpacket;
55406                 else
55407 -                       length = req->req.length;
55408 +                       length = req->req.length - req->req.actual;
55409         }
55411         d = dma_map_single(dev, buffer, length,
55412 @@ -379,8 +380,7 @@ static void fotg210_ep0_queue(struct fotg210_ep *ep,
55413         }
55414         if (ep->dir_in) { /* if IN */
55415                 fotg210_start_dma(ep, req);
55416 -               if ((req->req.length == req->req.actual) ||
55417 -                   (req->req.actual < ep->ep.maxpacket))
55418 +               if (req->req.length == req->req.actual)
55419                         fotg210_done(ep, req, 0);
55420         } else { /* OUT */
55421                 u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR0);
55422 @@ -820,7 +820,7 @@ static void fotg210_ep0in(struct fotg210_udc *fotg210)
55423                 if (req->req.length)
55424                         fotg210_start_dma(ep, req);
55426 -               if ((req->req.length - req->req.actual) < ep->ep.maxpacket)
55427 +               if (req->req.actual == req->req.length)
55428                         fotg210_done(ep, req, 0);
55429         } else {
55430                 fotg210_set_cxdone(fotg210);
55431 @@ -849,12 +849,16 @@ static void fotg210_out_fifo_handler(struct fotg210_ep *ep)
55433         struct fotg210_request *req = list_entry(ep->queue.next,
55434                                                  struct fotg210_request, queue);
55435 +       int disgr1 = ioread32(ep->fotg210->reg + FOTG210_DISGR1);
55437         fotg210_start_dma(ep, req);
55439 -       /* finish out transfer */
55440 +       /* Complete the request when it's full or a short packet arrived.
55441 +        * Like other drivers, short_not_ok isn't handled.
55442 +        */
55444         if (req->req.length == req->req.actual ||
55445 -           req->req.actual < ep->ep.maxpacket)
55446 +           (disgr1 & DISGR1_SPK_INT(ep->epnum - 1)))
55447                 fotg210_done(ep, req, 0);
55450 @@ -1027,6 +1031,12 @@ static void fotg210_init(struct fotg210_udc *fotg210)
55451         value &= ~DMCR_GLINT_EN;
55452         iowrite32(value, fotg210->reg + FOTG210_DMCR);
55454 +       /* enable only grp2 irqs we handle */
55455 +       iowrite32(~(DISGR2_DMA_ERROR | DISGR2_RX0BYTE_INT | DISGR2_TX0BYTE_INT
55456 +                   | DISGR2_ISO_SEQ_ABORT_INT | DISGR2_ISO_SEQ_ERR_INT
55457 +                   | DISGR2_RESM_INT | DISGR2_SUSP_INT | DISGR2_USBRST_INT),
55458 +                 fotg210->reg + FOTG210_DMISGR2);
55460         /* disable all fifo interrupt */
55461         iowrite32(~(u32)0, fotg210->reg + FOTG210_DMISGR1);
55463 diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
55464 index a3c1fc924268..fd3656d0f760 100644
55465 --- a/drivers/usb/gadget/udc/pch_udc.c
55466 +++ b/drivers/usb/gadget/udc/pch_udc.c
55467 @@ -7,12 +7,14 @@
55468  #include <linux/module.h>
55469  #include <linux/pci.h>
55470  #include <linux/delay.h>
55471 +#include <linux/dmi.h>
55472  #include <linux/errno.h>
55473 +#include <linux/gpio/consumer.h>
55474 +#include <linux/gpio/machine.h>
55475  #include <linux/list.h>
55476  #include <linux/interrupt.h>
55477  #include <linux/usb/ch9.h>
55478  #include <linux/usb/gadget.h>
55479 -#include <linux/gpio/consumer.h>
55480  #include <linux/irq.h>
55482  #define PCH_VBUS_PERIOD                3000    /* VBUS polling period (msec) */
55483 @@ -596,18 +598,22 @@ static void pch_udc_reconnect(struct pch_udc_dev *dev)
55484  static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
55485                                           int is_active)
55487 +       unsigned long           iflags;
55489 +       spin_lock_irqsave(&dev->lock, iflags);
55490         if (is_active) {
55491                 pch_udc_reconnect(dev);
55492                 dev->vbus_session = 1;
55493         } else {
55494                 if (dev->driver && dev->driver->disconnect) {
55495 -                       spin_lock(&dev->lock);
55496 +                       spin_unlock_irqrestore(&dev->lock, iflags);
55497                         dev->driver->disconnect(&dev->gadget);
55498 -                       spin_unlock(&dev->lock);
55499 +                       spin_lock_irqsave(&dev->lock, iflags);
55500                 }
55501                 pch_udc_set_disconnect(dev);
55502                 dev->vbus_session = 0;
55503         }
55504 +       spin_unlock_irqrestore(&dev->lock, iflags);
55507  /**
55508 @@ -1166,20 +1172,25 @@ static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
55509  static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
55511         struct pch_udc_dev      *dev;
55512 +       unsigned long           iflags;
55514         if (!gadget)
55515                 return -EINVAL;
55517         dev = container_of(gadget, struct pch_udc_dev, gadget);
55519 +       spin_lock_irqsave(&dev->lock, iflags);
55520         if (is_on) {
55521                 pch_udc_reconnect(dev);
55522         } else {
55523                 if (dev->driver && dev->driver->disconnect) {
55524 -                       spin_lock(&dev->lock);
55525 +                       spin_unlock_irqrestore(&dev->lock, iflags);
55526                         dev->driver->disconnect(&dev->gadget);
55527 -                       spin_unlock(&dev->lock);
55528 +                       spin_lock_irqsave(&dev->lock, iflags);
55529                 }
55530                 pch_udc_set_disconnect(dev);
55531         }
55532 +       spin_unlock_irqrestore(&dev->lock, iflags);
55534         return 0;
55536 @@ -1350,6 +1361,43 @@ static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
55537         return IRQ_HANDLED;
55540 +static struct gpiod_lookup_table minnowboard_udc_gpios = {
55541 +       .dev_id         = "0000:02:02.4",
55542 +       .table          = {
55543 +               GPIO_LOOKUP("sch_gpio.33158", 12, NULL, GPIO_ACTIVE_HIGH),
55544 +               {}
55545 +       },
55548 +static const struct dmi_system_id pch_udc_gpio_dmi_table[] = {
55549 +       {
55550 +               .ident = "MinnowBoard",
55551 +               .matches = {
55552 +                       DMI_MATCH(DMI_BOARD_NAME, "MinnowBoard"),
55553 +               },
55554 +               .driver_data = &minnowboard_udc_gpios,
55555 +       },
55556 +       { }
55559 +static void pch_vbus_gpio_remove_table(void *table)
55561 +       gpiod_remove_lookup_table(table);
55564 +static int pch_vbus_gpio_add_table(struct pch_udc_dev *dev)
55566 +       struct device *d = &dev->pdev->dev;
55567 +       const struct dmi_system_id *dmi;
55569 +       dmi = dmi_first_match(pch_udc_gpio_dmi_table);
55570 +       if (!dmi)
55571 +               return 0;
55573 +       gpiod_add_lookup_table(dmi->driver_data);
55574 +       return devm_add_action_or_reset(d, pch_vbus_gpio_remove_table, dmi->driver_data);
55577  /**
55578   * pch_vbus_gpio_init() - This API initializes GPIO port detecting VBUS.
55579   * @dev:               Reference to the driver structure
55580 @@ -1360,6 +1408,7 @@ static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
55581   */
55582  static int pch_vbus_gpio_init(struct pch_udc_dev *dev)
55584 +       struct device *d = &dev->pdev->dev;
55585         int err;
55586         int irq_num = 0;
55587         struct gpio_desc *gpiod;
55588 @@ -1367,8 +1416,12 @@ static int pch_vbus_gpio_init(struct pch_udc_dev *dev)
55589         dev->vbus_gpio.port = NULL;
55590         dev->vbus_gpio.intr = 0;
55592 +       err = pch_vbus_gpio_add_table(dev);
55593 +       if (err)
55594 +               return err;
55596         /* Retrieve the GPIO line from the USB gadget device */
55597 -       gpiod = devm_gpiod_get(dev->gadget.dev.parent, NULL, GPIOD_IN);
55598 +       gpiod = devm_gpiod_get_optional(d, NULL, GPIOD_IN);
55599         if (IS_ERR(gpiod))
55600                 return PTR_ERR(gpiod);
55601         gpiod_set_consumer_name(gpiod, "pch_vbus");
55602 @@ -1756,7 +1809,7 @@ static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
55603         }
55604         /* prevent from using desc. - set HOST BUSY */
55605         dma_desc->status |= PCH_UDC_BS_HST_BSY;
55606 -       dma_desc->dataptr = cpu_to_le32(DMA_ADDR_INVALID);
55607 +       dma_desc->dataptr = lower_32_bits(DMA_ADDR_INVALID);
55608         req->td_data = dma_desc;
55609         req->td_data_last = dma_desc;
55610         req->chain_len = 1;
55611 @@ -2298,6 +2351,21 @@ static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
55612                 pch_udc_set_dma(dev, DMA_DIR_RX);
55615 +static int pch_udc_gadget_setup(struct pch_udc_dev *dev)
55616 +       __must_hold(&dev->lock)
55618 +       int rc;
55620 +       /* In some cases we can get an interrupt before driver gets setup */
55621 +       if (!dev->driver)
55622 +               return -ESHUTDOWN;
55624 +       spin_unlock(&dev->lock);
55625 +       rc = dev->driver->setup(&dev->gadget, &dev->setup_data);
55626 +       spin_lock(&dev->lock);
55627 +       return rc;
55630  /**
55631   * pch_udc_svc_control_in() - Handle Control IN endpoint interrupts
55632   * @dev:       Reference to the device structure
55633 @@ -2369,15 +2437,12 @@ static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
55634                         dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
55635                 else /* OUT */
55636                         dev->gadget.ep0 = &ep->ep;
55637 -               spin_lock(&dev->lock);
55638                 /* If Mass storage Reset */
55639                 if ((dev->setup_data.bRequestType == 0x21) &&
55640                     (dev->setup_data.bRequest == 0xFF))
55641                         dev->prot_stall = 0;
55642                 /* call gadget with setup data received */
55643 -               setup_supported = dev->driver->setup(&dev->gadget,
55644 -                                                    &dev->setup_data);
55645 -               spin_unlock(&dev->lock);
55646 +               setup_supported = pch_udc_gadget_setup(dev);
55648                 if (dev->setup_data.bRequestType & USB_DIR_IN) {
55649                         ep->td_data->status = (ep->td_data->status &
55650 @@ -2625,9 +2690,7 @@ static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
55651                 dev->ep[i].halted = 0;
55652         }
55653         dev->stall = 0;
55654 -       spin_unlock(&dev->lock);
55655 -       dev->driver->setup(&dev->gadget, &dev->setup_data);
55656 -       spin_lock(&dev->lock);
55657 +       pch_udc_gadget_setup(dev);
55660  /**
55661 @@ -2662,9 +2725,7 @@ static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
55662         dev->stall = 0;
55664         /* call gadget zero with setup data received */
55665 -       spin_unlock(&dev->lock);
55666 -       dev->driver->setup(&dev->gadget, &dev->setup_data);
55667 -       spin_lock(&dev->lock);
55668 +       pch_udc_gadget_setup(dev);
55671  /**
55672 @@ -2870,14 +2931,20 @@ static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
55673   * @dev:       Reference to the driver structure
55674   *
55675   * Return codes:
55676 - *     0: Success
55677 + *     0:              Success
55678 + *     -%ERRNO:        All kind of errors when retrieving VBUS GPIO
55679   */
55680  static int pch_udc_pcd_init(struct pch_udc_dev *dev)
55682 +       int ret;
55684         pch_udc_init(dev);
55685         pch_udc_pcd_reinit(dev);
55686 -       pch_vbus_gpio_init(dev);
55687 -       return 0;
55689 +       ret = pch_vbus_gpio_init(dev);
55690 +       if (ret)
55691 +               pch_udc_exit(dev);
55692 +       return ret;
55695  /**
55696 @@ -2938,7 +3005,7 @@ static int init_dma_pools(struct pch_udc_dev *dev)
55697         dev->dma_addr = dma_map_single(&dev->pdev->dev, ep0out_buf,
55698                                        UDC_EP0OUT_BUFF_SIZE * 4,
55699                                        DMA_FROM_DEVICE);
55700 -       return 0;
55701 +       return dma_mapping_error(&dev->pdev->dev, dev->dma_addr);
55704  static int pch_udc_start(struct usb_gadget *g,
55705 @@ -3063,6 +3130,7 @@ static int pch_udc_probe(struct pci_dev *pdev,
55706         if (retval)
55707                 return retval;
55709 +       dev->pdev = pdev;
55710         pci_set_drvdata(pdev, dev);
55712         /* Determine BAR based on PCI ID */
55713 @@ -3078,16 +3146,10 @@ static int pch_udc_probe(struct pci_dev *pdev,
55715         dev->base_addr = pcim_iomap_table(pdev)[bar];
55717 -       /*
55718 -        * FIXME: add a GPIO descriptor table to pdev.dev using
55719 -        * gpiod_add_descriptor_table() from <linux/gpio/machine.h> based on
55720 -        * the PCI subsystem ID. The system-dependent GPIO is necessary for
55721 -        * VBUS operation.
55722 -        */
55724         /* initialize the hardware */
55725 -       if (pch_udc_pcd_init(dev))
55726 -               return -ENODEV;
55727 +       retval = pch_udc_pcd_init(dev);
55728 +       if (retval)
55729 +               return retval;
55731         pci_enable_msi(pdev);
55733 @@ -3104,7 +3166,6 @@ static int pch_udc_probe(struct pci_dev *pdev,
55735         /* device struct setup */
55736         spin_lock_init(&dev->lock);
55737 -       dev->pdev = pdev;
55738         dev->gadget.ops = &pch_udc_ops;
55740         retval = init_dma_pools(dev);
55741 diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
55742 index 896c1a016d55..65cae4883454 100644
55743 --- a/drivers/usb/gadget/udc/r8a66597-udc.c
55744 +++ b/drivers/usb/gadget/udc/r8a66597-udc.c
55745 @@ -1849,6 +1849,8 @@ static int r8a66597_probe(struct platform_device *pdev)
55746                 return PTR_ERR(reg);
55748         ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
55749 +       if (!ires)
55750 +               return -EINVAL;
55751         irq = ires->start;
55752         irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
55754 diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
55755 index 1d3ebb07ccd4..b154b62abefa 100644
55756 --- a/drivers/usb/gadget/udc/s3c2410_udc.c
55757 +++ b/drivers/usb/gadget/udc/s3c2410_udc.c
55758 @@ -54,8 +54,6 @@ static struct clk             *udc_clock;
55759  static struct clk              *usb_bus_clock;
55760  static void __iomem            *base_addr;
55761  static int                     irq_usbd;
55762 -static u64                     rsrc_start;
55763 -static u64                     rsrc_len;
55764  static struct dentry           *s3c2410_udc_debugfs_root;
55766  static inline u32 udc_read(u32 reg)
55767 @@ -1752,7 +1750,8 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
55768         udc_clock = clk_get(NULL, "usb-device");
55769         if (IS_ERR(udc_clock)) {
55770                 dev_err(dev, "failed to get udc clock source\n");
55771 -               return PTR_ERR(udc_clock);
55772 +               retval = PTR_ERR(udc_clock);
55773 +               goto err_usb_bus_clk;
55774         }
55776         clk_prepare_enable(udc_clock);
55777 @@ -1775,7 +1774,7 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
55778         base_addr = devm_platform_ioremap_resource(pdev, 0);
55779         if (IS_ERR(base_addr)) {
55780                 retval = PTR_ERR(base_addr);
55781 -               goto err_mem;
55782 +               goto err_udc_clk;
55783         }
55785         the_controller = udc;
55786 @@ -1793,7 +1792,7 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
55787         if (retval != 0) {
55788                 dev_err(dev, "cannot get irq %i, err %d\n", irq_usbd, retval);
55789                 retval = -EBUSY;
55790 -               goto err_map;
55791 +               goto err_udc_clk;
55792         }
55794         dev_dbg(dev, "got irq %i\n", irq_usbd);
55795 @@ -1864,10 +1863,14 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
55796                 gpio_free(udc_info->vbus_pin);
55797  err_int:
55798         free_irq(irq_usbd, udc);
55799 -err_map:
55800 -       iounmap(base_addr);
55801 -err_mem:
55802 -       release_mem_region(rsrc_start, rsrc_len);
55803 +err_udc_clk:
55804 +       clk_disable_unprepare(udc_clock);
55805 +       clk_put(udc_clock);
55806 +       udc_clock = NULL;
55807 +err_usb_bus_clk:
55808 +       clk_disable_unprepare(usb_bus_clock);
55809 +       clk_put(usb_bus_clock);
55810 +       usb_bus_clock = NULL;
55812         return retval;
55814 @@ -1899,9 +1902,6 @@ static int s3c2410_udc_remove(struct platform_device *pdev)
55816         free_irq(irq_usbd, udc);
55818 -       iounmap(base_addr);
55819 -       release_mem_region(rsrc_start, rsrc_len);
55821         if (!IS_ERR(udc_clock) && udc_clock != NULL) {
55822                 clk_disable_unprepare(udc_clock);
55823                 clk_put(udc_clock);
55824 diff --git a/drivers/usb/gadget/udc/snps_udc_plat.c b/drivers/usb/gadget/udc/snps_udc_plat.c
55825 index 32f1d3e90c26..99805d60a7ab 100644
55826 --- a/drivers/usb/gadget/udc/snps_udc_plat.c
55827 +++ b/drivers/usb/gadget/udc/snps_udc_plat.c
55828 @@ -114,8 +114,8 @@ static int udc_plat_probe(struct platform_device *pdev)
55830         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
55831         udc->virt_addr = devm_ioremap_resource(dev, res);
55832 -       if (IS_ERR(udc->regs))
55833 -               return PTR_ERR(udc->regs);
55834 +       if (IS_ERR(udc->virt_addr))
55835 +               return PTR_ERR(udc->virt_addr);
55837         /* udc csr registers base */
55838         udc->csr = udc->virt_addr + UDC_CSR_ADDR;
55839 diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
55840 index 580bef8eb4cb..2319c9737c2b 100644
55841 --- a/drivers/usb/gadget/udc/tegra-xudc.c
55842 +++ b/drivers/usb/gadget/udc/tegra-xudc.c
55843 @@ -3883,7 +3883,7 @@ static int tegra_xudc_remove(struct platform_device *pdev)
55845         pm_runtime_get_sync(xudc->dev);
55847 -       cancel_delayed_work(&xudc->plc_reset_work);
55848 +       cancel_delayed_work_sync(&xudc->plc_reset_work);
55849         cancel_work_sync(&xudc->usb_role_sw_work);
55851         usb_del_gadget_udc(&xudc->gadget);
55852 diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
55853 index b94f2a070c05..df9428f1dc5e 100644
55854 --- a/drivers/usb/host/Kconfig
55855 +++ b/drivers/usb/host/Kconfig
55856 @@ -272,6 +272,7 @@ config USB_EHCI_TEGRA
55857         select USB_CHIPIDEA
55858         select USB_CHIPIDEA_HOST
55859         select USB_CHIPIDEA_TEGRA
55860 +       select USB_GADGET
55861         help
55862           This option is deprecated now and the driver was removed, use
55863           USB_CHIPIDEA_TEGRA instead.
55864 diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
55865 index 5617ef30530a..f0e4a315cc81 100644
55866 --- a/drivers/usb/host/fotg210-hcd.c
55867 +++ b/drivers/usb/host/fotg210-hcd.c
55868 @@ -5568,7 +5568,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
55869         struct usb_hcd *hcd;
55870         struct resource *res;
55871         int irq;
55872 -       int retval = -ENODEV;
55873 +       int retval;
55874         struct fotg210_hcd *fotg210;
55876         if (usb_disabled())
55877 @@ -5588,7 +5588,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
55878         hcd = usb_create_hcd(&fotg210_fotg210_hc_driver, dev,
55879                         dev_name(dev));
55880         if (!hcd) {
55881 -               dev_err(dev, "failed to create hcd with err %d\n", retval);
55882 +               dev_err(dev, "failed to create hcd\n");
55883                 retval = -ENOMEM;
55884                 goto fail_create_hcd;
55885         }
55886 diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
55887 index 115ced0d93e1..1be692d3cf90 100644
55888 --- a/drivers/usb/host/sl811-hcd.c
55889 +++ b/drivers/usb/host/sl811-hcd.c
55890 @@ -1287,11 +1287,10 @@ sl811h_hub_control(
55891                         goto error;
55892                 put_unaligned_le32(sl811->port1, buf);
55894 -#ifndef        VERBOSE
55895 -       if (*(u16*)(buf+2))     /* only if wPortChange is interesting */
55896 -#endif
55897 -               dev_dbg(hcd->self.controller, "GetPortStatus %08x\n",
55898 -                       sl811->port1);
55899 +               if (__is_defined(VERBOSE) ||
55900 +                   *(u16*)(buf+2)) /* only if wPortChange is interesting */
55901 +                       dev_dbg(hcd->self.controller, "GetPortStatus %08x\n",
55902 +                               sl811->port1);
55903                 break;
55904         case SetPortFeature:
55905                 if (wIndex != 1 || wLength != 0)
55906 diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
55907 index fa59b242cd51..e8af0a125f84 100644
55908 --- a/drivers/usb/host/xhci-ext-caps.h
55909 +++ b/drivers/usb/host/xhci-ext-caps.h
55910 @@ -7,8 +7,9 @@
55911   * Author: Sarah Sharp
55912   * Some code borrowed from the Linux EHCI driver.
55913   */
55914 -/* Up to 16 ms to halt an HC */
55915 -#define XHCI_MAX_HALT_USEC     (16*1000)
55917 +/* HC should halt within 16 ms, but use 32 ms as some hosts take longer */
55918 +#define XHCI_MAX_HALT_USEC     (32 * 1000)
55919  /* HC not running - set to 1 when run/stop bit is cleared. */
55920  #define XHCI_STS_HALT          (1<<0)
55922 diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
55923 index f2c4ee7c4786..717c122f9449 100644
55924 --- a/drivers/usb/host/xhci-mem.c
55925 +++ b/drivers/usb/host/xhci-mem.c
55926 @@ -2129,6 +2129,15 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
55928         if (major_revision == 0x03) {
55929                 rhub = &xhci->usb3_rhub;
55930 +               /*
55931 +                * Some hosts incorrectly use sub-minor version for minor
55932 +                * version (i.e. 0x02 instead of 0x20 for bcdUSB 0x320 and 0x01
55933 +                * for bcdUSB 0x310). Since there is no USB release with sub
55934 +                * minor version 0x301 to 0x309, we can assume that they are
55935 +                * incorrect and fix it here.
55936 +                */
55937 +               if (minor_revision > 0x00 && minor_revision < 0x10)
55938 +                       minor_revision <<= 4;
55939         } else if (major_revision <= 0x02) {
55940                 rhub = &xhci->usb2_rhub;
55941         } else {
55942 @@ -2240,6 +2249,9 @@ static void xhci_create_rhub_port_array(struct xhci_hcd *xhci,
55943                 return;
55944         rhub->ports = kcalloc_node(rhub->num_ports, sizeof(*rhub->ports),
55945                         flags, dev_to_node(dev));
55946 +       if (!rhub->ports)
55947 +               return;
55949         for (i = 0; i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
55950                 if (xhci->hw_ports[i].rhub != rhub ||
55951                     xhci->hw_ports[i].hcd_portnum == DUPLICATE_ENTRY)
55952 diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
55953 index b45e5bf08997..8950d1f10a7f 100644
55954 --- a/drivers/usb/host/xhci-mtk-sch.c
55955 +++ b/drivers/usb/host/xhci-mtk-sch.c
55956 @@ -378,6 +378,31 @@ static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
55957         sch_ep->allocated = used;
55960 +static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
55962 +       struct mu3h_sch_tt *tt = sch_ep->sch_tt;
55963 +       u32 num_esit, tmp;
55964 +       int base;
55965 +       int i, j;
55967 +       num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
55968 +       for (i = 0; i < num_esit; i++) {
55969 +               base = offset + i * sch_ep->esit;
55971 +               /*
55972 +                * Compared with hs bus, no matter what ep type,
55973 +                * the hub will always delay one uframe to send data
55974 +                */
55975 +               for (j = 0; j < sch_ep->cs_count; j++) {
55976 +                       tmp = tt->fs_bus_bw[base + j] + sch_ep->bw_cost_per_microframe;
55977 +                       if (tmp > FS_PAYLOAD_MAX)
55978 +                               return -ERANGE;
55979 +               }
55980 +       }
55982 +       return 0;
55985  static int check_sch_tt(struct usb_device *udev,
55986         struct mu3h_sch_ep_info *sch_ep, u32 offset)
55988 @@ -402,7 +427,7 @@ static int check_sch_tt(struct usb_device *udev,
55989                         return -ERANGE;
55991                 for (i = 0; i < sch_ep->cs_count; i++)
55992 -                       if (test_bit(offset + i, tt->split_bit_map))
55993 +                       if (test_bit(offset + i, tt->ss_bit_map))
55994                                 return -ERANGE;
55996         } else {
55997 @@ -432,7 +457,7 @@ static int check_sch_tt(struct usb_device *udev,
55998                         cs_count = 7; /* HW limit */
56000                 for (i = 0; i < cs_count + 2; i++) {
56001 -                       if (test_bit(offset + i, tt->split_bit_map))
56002 +                       if (test_bit(offset + i, tt->ss_bit_map))
56003                                 return -ERANGE;
56004                 }
56006 @@ -448,24 +473,44 @@ static int check_sch_tt(struct usb_device *udev,
56007                         sch_ep->num_budget_microframes = sch_ep->esit;
56008         }
56010 -       return 0;
56011 +       return check_fs_bus_bw(sch_ep, offset);
56014  static void update_sch_tt(struct usb_device *udev,
56015 -       struct mu3h_sch_ep_info *sch_ep)
56016 +       struct mu3h_sch_ep_info *sch_ep, bool used)
56018         struct mu3h_sch_tt *tt = sch_ep->sch_tt;
56019         u32 base, num_esit;
56020 +       int bw_updated;
56021 +       int bits;
56022         int i, j;
56024         num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
56025 +       bits = (sch_ep->ep_type == ISOC_OUT_EP) ? sch_ep->cs_count : 1;
56027 +       if (used)
56028 +               bw_updated = sch_ep->bw_cost_per_microframe;
56029 +       else
56030 +               bw_updated = -sch_ep->bw_cost_per_microframe;
56032         for (i = 0; i < num_esit; i++) {
56033                 base = sch_ep->offset + i * sch_ep->esit;
56034 -               for (j = 0; j < sch_ep->num_budget_microframes; j++)
56035 -                       set_bit(base + j, tt->split_bit_map);
56037 +               for (j = 0; j < bits; j++) {
56038 +                       if (used)
56039 +                               set_bit(base + j, tt->ss_bit_map);
56040 +                       else
56041 +                               clear_bit(base + j, tt->ss_bit_map);
56042 +               }
56044 +               for (j = 0; j < sch_ep->cs_count; j++)
56045 +                       tt->fs_bus_bw[base + j] += bw_updated;
56046         }
56048 -       list_add_tail(&sch_ep->tt_endpoint, &tt->ep_list);
56049 +       if (used)
56050 +               list_add_tail(&sch_ep->tt_endpoint, &tt->ep_list);
56051 +       else
56052 +               list_del(&sch_ep->tt_endpoint);
56055  static int check_sch_bw(struct usb_device *udev,
56056 @@ -535,7 +580,7 @@ static int check_sch_bw(struct usb_device *udev,
56057                 if (!tt_offset_ok)
56058                         return -ERANGE;
56060 -               update_sch_tt(udev, sch_ep);
56061 +               update_sch_tt(udev, sch_ep, 1);
56062         }
56064         /* update bus bandwidth info */
56065 @@ -548,15 +593,16 @@ static void destroy_sch_ep(struct usb_device *udev,
56066         struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep)
56068         /* only release ep bw check passed by check_sch_bw() */
56069 -       if (sch_ep->allocated)
56070 +       if (sch_ep->allocated) {
56071                 update_bus_bw(sch_bw, sch_ep, 0);
56072 +               if (sch_ep->sch_tt)
56073 +                       update_sch_tt(udev, sch_ep, 0);
56074 +       }
56076 -       list_del(&sch_ep->endpoint);
56078 -       if (sch_ep->sch_tt) {
56079 -               list_del(&sch_ep->tt_endpoint);
56080 +       if (sch_ep->sch_tt)
56081                 drop_tt(udev);
56082 -       }
56084 +       list_del(&sch_ep->endpoint);
56085         kfree(sch_ep);
56088 @@ -643,7 +689,7 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
56089                  */
56090                 if (usb_endpoint_xfer_int(&ep->desc)
56091                         || usb_endpoint_xfer_isoc(&ep->desc))
56092 -                       ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(1));
56093 +                       ep_ctx->reserved[0] = cpu_to_le32(EP_BPKTS(1));
56095                 return 0;
56096         }
56097 @@ -730,10 +776,10 @@ int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
56098                 list_move_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
56100                 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
56101 -               ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
56102 +               ep_ctx->reserved[0] = cpu_to_le32(EP_BPKTS(sch_ep->pkts)
56103                         | EP_BCSCOUNT(sch_ep->cs_count)
56104                         | EP_BBM(sch_ep->burst_mode));
56105 -               ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset)
56106 +               ep_ctx->reserved[1] = cpu_to_le32(EP_BOFFSET(sch_ep->offset)
56107                         | EP_BREPEAT(sch_ep->repeat));
56109                 xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
56110 diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
56111 index 2f27dc0d9c6b..1c331577fca9 100644
56112 --- a/drivers/usb/host/xhci-mtk.c
56113 +++ b/drivers/usb/host/xhci-mtk.c
56114 @@ -397,6 +397,8 @@ static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci)
56115         xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
56116         if (mtk->lpm_support)
56117                 xhci->quirks |= XHCI_LPM_SUPPORT;
56118 +       if (mtk->u2_lpm_disable)
56119 +               xhci->quirks |= XHCI_HW_LPM_DISABLE;
56121         /*
56122          * MTK xHCI 0.96: PSA is 1 by default even if doesn't support stream,
56123 @@ -469,6 +471,7 @@ static int xhci_mtk_probe(struct platform_device *pdev)
56124                 return ret;
56126         mtk->lpm_support = of_property_read_bool(node, "usb3-lpm-capable");
56127 +       mtk->u2_lpm_disable = of_property_read_bool(node, "usb2-lpm-disable");
56128         /* optional property, ignore the error if it does not exist */
56129         of_property_read_u32(node, "mediatek,u3p-dis-msk",
56130                              &mtk->u3p_dis_msk);
56131 diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
56132 index cbb09dfea62e..2fc0568ba054 100644
56133 --- a/drivers/usb/host/xhci-mtk.h
56134 +++ b/drivers/usb/host/xhci-mtk.h
56135 @@ -20,13 +20,15 @@
56136  #define XHCI_MTK_MAX_ESIT      64
56138  /**
56139 - * @split_bit_map: used to avoid split microframes overlay
56140 + * @ss_bit_map: used to avoid start split microframes overlay
56141 + * @fs_bus_bw: array to keep track of bandwidth already used for FS
56142   * @ep_list: Endpoints using this TT
56143   * @usb_tt: usb TT related
56144   * @tt_port: TT port number
56145   */
56146  struct mu3h_sch_tt {
56147 -       DECLARE_BITMAP(split_bit_map, XHCI_MTK_MAX_ESIT);
56148 +       DECLARE_BITMAP(ss_bit_map, XHCI_MTK_MAX_ESIT);
56149 +       u32 fs_bus_bw[XHCI_MTK_MAX_ESIT];
56150         struct list_head ep_list;
56151         struct usb_tt *usb_tt;
56152         int tt_port;
56153 @@ -150,6 +152,7 @@ struct xhci_hcd_mtk {
56154         struct phy **phys;
56155         int num_phys;
56156         bool lpm_support;
56157 +       bool u2_lpm_disable;
56158         /* usb remote wakeup */
56159         bool uwk_en;
56160         struct regmap *uwk;
56161 diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
56162 index 5bbccc9a0179..7bc18cf8042c 100644
56163 --- a/drivers/usb/host/xhci-pci.c
56164 +++ b/drivers/usb/host/xhci-pci.c
56165 @@ -57,6 +57,7 @@
56166  #define PCI_DEVICE_ID_INTEL_CML_XHCI                   0xa3af
56167  #define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI            0x9a13
56168  #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI           0x1138
56169 +#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI            0x461e
56171  #define PCI_DEVICE_ID_AMD_PROMONTORYA_4                        0x43b9
56172  #define PCI_DEVICE_ID_AMD_PROMONTORYA_3                        0x43ba
56173 @@ -166,8 +167,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
56174             (pdev->device == 0x15e0 || pdev->device == 0x15e1))
56175                 xhci->quirks |= XHCI_SNPS_BROKEN_SUSPEND;
56177 -       if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x15e5)
56178 +       if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x15e5) {
56179                 xhci->quirks |= XHCI_DISABLE_SPARSE;
56180 +               xhci->quirks |= XHCI_RESET_ON_RESUME;
56181 +       }
56183         if (pdev->vendor == PCI_VENDOR_ID_AMD)
56184                 xhci->quirks |= XHCI_TRUST_TX_LENGTH;
56185 @@ -243,7 +246,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
56186              pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI ||
56187              pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI ||
56188              pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI ||
56189 -            pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI))
56190 +            pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI ||
56191 +            pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI))
56192                 xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
56194         if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
56195 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
56196 index ce38076901e2..6cdea0d00d19 100644
56197 --- a/drivers/usb/host/xhci-ring.c
56198 +++ b/drivers/usb/host/xhci-ring.c
56199 @@ -863,7 +863,7 @@ static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id,
56200         return ret;
56203 -static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
56204 +static int xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
56205                                 struct xhci_virt_ep *ep, unsigned int stream_id,
56206                                 struct xhci_td *td,
56207                                 enum xhci_ep_reset_type reset_type)
56208 @@ -876,7 +876,7 @@ static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
56209          * Device will be reset soon to recover the link so don't do anything
56210          */
56211         if (ep->vdev->flags & VDEV_PORT_ERROR)
56212 -               return;
56213 +               return -ENODEV;
56215         /* add td to cancelled list and let reset ep handler take care of it */
56216         if (reset_type == EP_HARD_RESET) {
56217 @@ -889,16 +889,18 @@ static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
56219         if (ep->ep_state & EP_HALTED) {
56220                 xhci_dbg(xhci, "Reset ep command already pending\n");
56221 -               return;
56222 +               return 0;
56223         }
56225         err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type);
56226         if (err)
56227 -               return;
56228 +               return err;
56230         ep->ep_state |= EP_HALTED;
56232         xhci_ring_cmd_db(xhci);
56234 +       return 0;
56237  /*
56238 @@ -1015,6 +1017,7 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
56239         struct xhci_td *td = NULL;
56240         enum xhci_ep_reset_type reset_type;
56241         struct xhci_command *command;
56242 +       int err;
56244         if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
56245                 if (!xhci->devs[slot_id])
56246 @@ -1059,7 +1062,10 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
56247                                         td->status = -EPROTO;
56248                         }
56249                         /* reset ep, reset handler cleans up cancelled tds */
56250 -                       xhci_handle_halted_endpoint(xhci, ep, 0, td, reset_type);
56251 +                       err = xhci_handle_halted_endpoint(xhci, ep, 0, td,
56252 +                                                         reset_type);
56253 +                       if (err)
56254 +                               break;
56255                         xhci_stop_watchdog_timer_in_irq(xhci, ep);
56256                         return;
56257                 case EP_STATE_RUNNING:
56258 @@ -2129,16 +2135,13 @@ int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
56259         return 0;
56262 -static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
56263 -       struct xhci_transfer_event *event, struct xhci_virt_ep *ep)
56264 +static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
56265 +                    struct xhci_ring *ep_ring, struct xhci_td *td,
56266 +                    u32 trb_comp_code)
56268         struct xhci_ep_ctx *ep_ctx;
56269 -       struct xhci_ring *ep_ring;
56270 -       u32 trb_comp_code;
56272 -       ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
56273         ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
56274 -       trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
56276         switch (trb_comp_code) {
56277         case COMP_STOPPED_LENGTH_INVALID:
56278 @@ -2234,9 +2237,9 @@ static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
56279  /*
56280   * Process control tds, update urb status and actual_length.
56281   */
56282 -static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
56283 -       union xhci_trb *ep_trb, struct xhci_transfer_event *event,
56284 -       struct xhci_virt_ep *ep)
56285 +static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
56286 +               struct xhci_ring *ep_ring,  struct xhci_td *td,
56287 +                          union xhci_trb *ep_trb, struct xhci_transfer_event *event)
56289         struct xhci_ep_ctx *ep_ctx;
56290         u32 trb_comp_code;
56291 @@ -2324,15 +2327,15 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
56292                 td->urb->actual_length = requested;
56294  finish_td:
56295 -       return finish_td(xhci, td, event, ep);
56296 +       return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
56299  /*
56300   * Process isochronous tds, update urb packet status and actual_length.
56301   */
56302 -static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
56303 -       union xhci_trb *ep_trb, struct xhci_transfer_event *event,
56304 -       struct xhci_virt_ep *ep)
56305 +static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
56306 +               struct xhci_ring *ep_ring, struct xhci_td *td,
56307 +               union xhci_trb *ep_trb, struct xhci_transfer_event *event)
56309         struct urb_priv *urb_priv;
56310         int idx;
56311 @@ -2409,7 +2412,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
56313         td->urb->actual_length += frame->actual_length;
56315 -       return finish_td(xhci, td, event, ep);
56316 +       return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
56319  static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
56320 @@ -2441,17 +2444,15 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
56321  /*
56322   * Process bulk and interrupt tds, update urb status and actual_length.
56323   */
56324 -static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
56325 -       union xhci_trb *ep_trb, struct xhci_transfer_event *event,
56326 -       struct xhci_virt_ep *ep)
56327 +static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
56328 +               struct xhci_ring *ep_ring, struct xhci_td *td,
56329 +               union xhci_trb *ep_trb, struct xhci_transfer_event *event)
56331         struct xhci_slot_ctx *slot_ctx;
56332 -       struct xhci_ring *ep_ring;
56333         u32 trb_comp_code;
56334         u32 remaining, requested, ep_trb_len;
56336         slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx);
56337 -       ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
56338         trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
56339         remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
56340         ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
56341 @@ -2511,7 +2512,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
56342                           remaining);
56343                 td->urb->actual_length = 0;
56344         }
56345 -       return finish_td(xhci, td, event, ep);
56347 +       return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
56350  /*
56351 @@ -2854,11 +2856,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
56353                 /* update the urb's actual_length and give back to the core */
56354                 if (usb_endpoint_xfer_control(&td->urb->ep->desc))
56355 -                       process_ctrl_td(xhci, td, ep_trb, event, ep);
56356 +                       process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event);
56357                 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
56358 -                       process_isoc_td(xhci, td, ep_trb, event, ep);
56359 +                       process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event);
56360                 else
56361 -                       process_bulk_intr_td(xhci, td, ep_trb, event, ep);
56362 +                       process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event);
56363  cleanup:
56364                 handling_skipped_tds = ep->skip &&
56365                         trb_comp_code != COMP_MISSED_SERVICE_ERROR &&
56366 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
56367 index 1975016f46bf..0d2f1c37ab74 100644
56368 --- a/drivers/usb/host/xhci.c
56369 +++ b/drivers/usb/host/xhci.c
56370 @@ -228,6 +228,7 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
56371         struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
56372         int err, i;
56373         u64 val;
56374 +       u32 intrs;
56376         /*
56377          * Some Renesas controllers get into a weird state if they are
56378 @@ -266,7 +267,10 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
56379         if (upper_32_bits(val))
56380                 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
56382 -       for (i = 0; i < HCS_MAX_INTRS(xhci->hcs_params1); i++) {
56383 +       intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1),
56384 +                     ARRAY_SIZE(xhci->run_regs->ir_set));
56386 +       for (i = 0; i < intrs; i++) {
56387                 struct xhci_intr_reg __iomem *ir;
56389                 ir = &xhci->run_regs->ir_set[i];
56390 @@ -1510,7 +1514,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
56391   * we need to issue an evaluate context command and wait on it.
56392   */
56393  static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
56394 -               unsigned int ep_index, struct urb *urb)
56395 +               unsigned int ep_index, struct urb *urb, gfp_t mem_flags)
56397         struct xhci_container_ctx *out_ctx;
56398         struct xhci_input_control_ctx *ctrl_ctx;
56399 @@ -1541,7 +1545,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
56400                  * changes max packet sizes.
56401                  */
56403 -               command = xhci_alloc_command(xhci, true, GFP_KERNEL);
56404 +               command = xhci_alloc_command(xhci, true, mem_flags);
56405                 if (!command)
56406                         return -ENOMEM;
56408 @@ -1635,7 +1639,7 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
56409                  */
56410                 if (urb->dev->speed == USB_SPEED_FULL) {
56411                         ret = xhci_check_maxpacket(xhci, slot_id,
56412 -                                       ep_index, urb);
56413 +                                       ep_index, urb, mem_flags);
56414                         if (ret < 0) {
56415                                 xhci_urb_free_priv(urb_priv);
56416                                 urb->hcpriv = NULL;
56417 @@ -3269,6 +3273,14 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
56419         /* config ep command clears toggle if add and drop ep flags are set */
56420         ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx);
56421 +       if (!ctrl_ctx) {
56422 +               spin_unlock_irqrestore(&xhci->lock, flags);
56423 +               xhci_free_command(xhci, cfg_cmd);
56424 +               xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
56425 +                               __func__);
56426 +               goto cleanup;
56427 +       }
56429         xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
56430                                            ctrl_ctx, ep_flag, ep_flag);
56431         xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
56432 diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
56433 index eebeadd26946..6b92d037d8fc 100644
56434 --- a/drivers/usb/musb/mediatek.c
56435 +++ b/drivers/usb/musb/mediatek.c
56436 @@ -518,8 +518,8 @@ static int mtk_musb_probe(struct platform_device *pdev)
56438         glue->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
56439         if (IS_ERR(glue->xceiv)) {
56440 -               dev_err(dev, "fail to getting usb-phy %d\n", ret);
56441                 ret = PTR_ERR(glue->xceiv);
56442 +               dev_err(dev, "fail to getting usb-phy %d\n", ret);
56443                 goto err_unregister_usb_phy;
56444         }
56446 diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
56447 index fc0457db62e1..8f09a387b773 100644
56448 --- a/drivers/usb/musb/musb_core.c
56449 +++ b/drivers/usb/musb/musb_core.c
56450 @@ -2070,7 +2070,7 @@ static void musb_irq_work(struct work_struct *data)
56451         struct musb *musb = container_of(data, struct musb, irq_work.work);
56452         int error;
56454 -       error = pm_runtime_get_sync(musb->controller);
56455 +       error = pm_runtime_resume_and_get(musb->controller);
56456         if (error < 0) {
56457                 dev_err(musb->controller, "Could not enable: %i\n", error);
56459 diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
56460 index 97f37077b7f9..33b637d0d8d9 100644
56461 --- a/drivers/usb/roles/class.c
56462 +++ b/drivers/usb/roles/class.c
56463 @@ -189,6 +189,8 @@ usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode)
56464                 return NULL;
56466         dev = class_find_device_by_fwnode(role_class, fwnode);
56467 +       if (dev)
56468 +               WARN_ON(!try_module_get(dev->parent->driver->owner));
56470         return dev ? to_role_switch(dev) : NULL;
56472 diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
56473 index 7252b0ce75a6..fe1c13a8849c 100644
56474 --- a/drivers/usb/serial/ti_usb_3410_5052.c
56475 +++ b/drivers/usb/serial/ti_usb_3410_5052.c
56476 @@ -1418,14 +1418,19 @@ static int ti_set_serial_info(struct tty_struct *tty,
56477         struct serial_struct *ss)
56479         struct usb_serial_port *port = tty->driver_data;
56480 -       struct ti_port *tport = usb_get_serial_port_data(port);
56481 +       struct tty_port *tport = &port->port;
56482         unsigned cwait;
56484         cwait = ss->closing_wait;
56485         if (cwait != ASYNC_CLOSING_WAIT_NONE)
56486                 cwait = msecs_to_jiffies(10 * ss->closing_wait);
56488 -       tport->tp_port->port.closing_wait = cwait;
56489 +       if (!capable(CAP_SYS_ADMIN)) {
56490 +               if (cwait != tport->closing_wait)
56491 +                       return -EPERM;
56492 +       }
56494 +       tport->closing_wait = cwait;
56496         return 0;
56498 diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
56499 index 46d46a4f99c9..4e9c994a972a 100644
56500 --- a/drivers/usb/serial/usb_wwan.c
56501 +++ b/drivers/usb/serial/usb_wwan.c
56502 @@ -140,10 +140,10 @@ int usb_wwan_get_serial_info(struct tty_struct *tty,
56503         ss->line            = port->minor;
56504         ss->port            = port->port_number;
56505         ss->baud_base       = tty_get_baud_rate(port->port.tty);
56506 -       ss->close_delay     = port->port.close_delay / 10;
56507 +       ss->close_delay     = jiffies_to_msecs(port->port.close_delay) / 10;
56508         ss->closing_wait    = port->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
56509                                  ASYNC_CLOSING_WAIT_NONE :
56510 -                                port->port.closing_wait / 10;
56511 +                                jiffies_to_msecs(port->port.closing_wait) / 10;
56512         return 0;
56514  EXPORT_SYMBOL(usb_wwan_get_serial_info);
56515 @@ -155,9 +155,10 @@ int usb_wwan_set_serial_info(struct tty_struct *tty,
56516         unsigned int closing_wait, close_delay;
56517         int retval = 0;
56519 -       close_delay = ss->close_delay * 10;
56520 +       close_delay = msecs_to_jiffies(ss->close_delay * 10);
56521         closing_wait = ss->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
56522 -                       ASYNC_CLOSING_WAIT_NONE : ss->closing_wait * 10;
56523 +                       ASYNC_CLOSING_WAIT_NONE :
56524 +                       msecs_to_jiffies(ss->closing_wait * 10);
56526         mutex_lock(&port->port.mutex);
56528 diff --git a/drivers/usb/serial/xr_serial.c b/drivers/usb/serial/xr_serial.c
56529 index 0ca04906da4b..c59c8b47a120 100644
56530 --- a/drivers/usb/serial/xr_serial.c
56531 +++ b/drivers/usb/serial/xr_serial.c
56532 @@ -467,6 +467,11 @@ static void xr_set_termios(struct tty_struct *tty,
56533                 termios->c_cflag &= ~CSIZE;
56534                 if (old_termios)
56535                         termios->c_cflag |= old_termios->c_cflag & CSIZE;
56536 +               else
56537 +                       termios->c_cflag |= CS8;
56539 +               if (C_CSIZE(tty) == CS7)
56540 +                       bits |= XR21V141X_UART_DATA_7;
56541                 else
56542                         bits |= XR21V141X_UART_DATA_8;
56543                 break;
56544 diff --git a/drivers/usb/typec/stusb160x.c b/drivers/usb/typec/stusb160x.c
56545 index d21750bbbb44..6eaeba9b096e 100644
56546 --- a/drivers/usb/typec/stusb160x.c
56547 +++ b/drivers/usb/typec/stusb160x.c
56548 @@ -682,8 +682,8 @@ static int stusb160x_probe(struct i2c_client *client)
56549         }
56551         fwnode = device_get_named_child_node(chip->dev, "connector");
56552 -       if (IS_ERR(fwnode))
56553 -               return PTR_ERR(fwnode);
56554 +       if (!fwnode)
56555 +               return -ENODEV;
56557         /*
56558          * When both VDD and VSYS power supplies are present, the low power
56559 diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
56560 index a27deb0b5f03..027afd7dfdce 100644
56561 --- a/drivers/usb/typec/tcpm/tcpci.c
56562 +++ b/drivers/usb/typec/tcpm/tcpci.c
56563 @@ -24,6 +24,15 @@
56564  #define        AUTO_DISCHARGE_PD_HEADROOM_MV           850
56565  #define        AUTO_DISCHARGE_PPS_HEADROOM_MV          1250
56567 +#define tcpc_presenting_cc1_rd(reg) \
56568 +       (!(TCPC_ROLE_CTRL_DRP & (reg)) && \
56569 +        (((reg) & (TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT)) == \
56570 +         (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT)))
56571 +#define tcpc_presenting_cc2_rd(reg) \
56572 +       (!(TCPC_ROLE_CTRL_DRP & (reg)) && \
56573 +        (((reg) & (TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT)) == \
56574 +         (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT)))
56576  struct tcpci {
56577         struct device *dev;
56579 @@ -178,19 +187,25 @@ static int tcpci_get_cc(struct tcpc_dev *tcpc,
56580                         enum typec_cc_status *cc1, enum typec_cc_status *cc2)
56582         struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
56583 -       unsigned int reg;
56584 +       unsigned int reg, role_control;
56585         int ret;
56587 +       ret = regmap_read(tcpci->regmap, TCPC_ROLE_CTRL, &role_control);
56588 +       if (ret < 0)
56589 +               return ret;
56591         ret = regmap_read(tcpci->regmap, TCPC_CC_STATUS, &reg);
56592         if (ret < 0)
56593                 return ret;
56595         *cc1 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC1_SHIFT) &
56596                                  TCPC_CC_STATUS_CC1_MASK,
56597 -                                reg & TCPC_CC_STATUS_TERM);
56598 +                                reg & TCPC_CC_STATUS_TERM ||
56599 +                                tcpc_presenting_cc1_rd(role_control));
56600         *cc2 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC2_SHIFT) &
56601                                  TCPC_CC_STATUS_CC2_MASK,
56602 -                                reg & TCPC_CC_STATUS_TERM);
56603 +                                reg & TCPC_CC_STATUS_TERM ||
56604 +                                tcpc_presenting_cc2_rd(role_control));
56606         return 0;
56608 diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
56609 index ce7af398c7c1..52acc884a61f 100644
56610 --- a/drivers/usb/typec/tcpm/tcpm.c
56611 +++ b/drivers/usb/typec/tcpm/tcpm.c
56612 @@ -268,12 +268,27 @@ struct pd_mode_data {
56613         struct typec_altmode_desc altmode_desc[ALTMODE_DISCOVERY_MAX];
56614  };
56617 + * @min_volt: Actual min voltage at the local port
56618 + * @req_min_volt: Requested min voltage to the port partner
56619 + * @max_volt: Actual max voltage at the local port
56620 + * @req_max_volt: Requested max voltage to the port partner
56621 + * @max_curr: Actual max current at the local port
56622 + * @req_max_curr: Requested max current of the port partner
56623 + * @req_out_volt: Requested output voltage to the port partner
56624 + * @req_op_curr: Requested operating current to the port partner
56625 + * @supported: Parter has atleast one APDO hence supports PPS
56626 + * @active: PPS mode is active
56627 + */
56628  struct pd_pps_data {
56629         u32 min_volt;
56630 +       u32 req_min_volt;
56631         u32 max_volt;
56632 +       u32 req_max_volt;
56633         u32 max_curr;
56634 -       u32 out_volt;
56635 -       u32 op_curr;
56636 +       u32 req_max_curr;
56637 +       u32 req_out_volt;
56638 +       u32 req_op_curr;
56639         bool supported;
56640         bool active;
56641  };
56642 @@ -389,7 +404,10 @@ struct tcpm_port {
56643         unsigned int operating_snk_mw;
56644         bool update_sink_caps;
56646 -       /* Requested current / voltage */
56647 +       /* Requested current / voltage to the port partner */
56648 +       u32 req_current_limit;
56649 +       u32 req_supply_voltage;
56650 +       /* Actual current / voltage limit of the local port */
56651         u32 current_limit;
56652         u32 supply_voltage;
56654 @@ -438,6 +456,9 @@ struct tcpm_port {
56655         enum tcpm_ams next_ams;
56656         bool in_ams;
56658 +       /* Auto vbus discharge status */
56659 +       bool auto_vbus_discharge_enabled;
56661  #ifdef CONFIG_DEBUG_FS
56662         struct dentry *dentry;
56663         struct mutex logbuffer_lock;    /* log buffer access lock */
56664 @@ -507,6 +528,9 @@ static const char * const pd_rev[] = {
56665         (tcpm_port_is_sink(port) && \
56666         ((port)->cc1 == TYPEC_CC_RP_3_0 || (port)->cc2 == TYPEC_CC_RP_3_0))
56668 +#define tcpm_wait_for_discharge(port) \
56669 +       (((port)->auto_vbus_discharge_enabled && !(port)->vbus_vsafe0v) ? PD_T_SAFE_0V : 0)
56671  static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
56673         if (port->port_type == TYPEC_PORT_DRP) {
56674 @@ -1853,7 +1877,6 @@ static void vdm_run_state_machine(struct tcpm_port *port)
56675                         }
56677                         if (res < 0) {
56678 -                               port->vdm_sm_running = false;
56679                                 return;
56680                         }
56681                 }
56682 @@ -1869,6 +1892,7 @@ static void vdm_run_state_machine(struct tcpm_port *port)
56683                 port->vdo_data[0] = port->vdo_retry;
56684                 port->vdo_count = 1;
56685                 port->vdm_state = VDM_STATE_READY;
56686 +               tcpm_ams_finish(port);
56687                 break;
56688         case VDM_STATE_BUSY:
56689                 port->vdm_state = VDM_STATE_ERR_TMOUT;
56690 @@ -1934,7 +1958,7 @@ static void vdm_state_machine_work(struct kthread_work *work)
56691                  port->vdm_state != VDM_STATE_BUSY &&
56692                  port->vdm_state != VDM_STATE_SEND_MESSAGE);
56694 -       if (port->vdm_state == VDM_STATE_ERR_TMOUT)
56695 +       if (port->vdm_state < VDM_STATE_READY)
56696                 port->vdm_sm_running = false;
56698         mutex_unlock(&port->lock);
56699 @@ -2363,7 +2387,7 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
56700                 port->nr_sink_caps = cnt;
56701                 port->sink_cap_done = true;
56702                 if (port->ams == GET_SINK_CAPABILITIES)
56703 -                       tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
56704 +                       tcpm_set_state(port, ready_state(port), 0);
56705                 /* Unexpected Sink Capabilities */
56706                 else
56707                         tcpm_pd_handle_msg(port,
56708 @@ -2432,8 +2456,8 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
56709                 case SNK_TRANSITION_SINK:
56710                         if (port->vbus_present) {
56711                                 tcpm_set_current_limit(port,
56712 -                                                      port->current_limit,
56713 -                                                      port->supply_voltage);
56714 +                                                      port->req_current_limit,
56715 +                                                      port->req_supply_voltage);
56716                                 port->explicit_contract = true;
56717                                 tcpm_set_auto_vbus_discharge_threshold(port,
56718                                                                        TYPEC_PWR_MODE_PD,
56719 @@ -2492,8 +2516,8 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
56720                         break;
56721                 case SNK_NEGOTIATE_PPS_CAPABILITIES:
56722                         /* Revert data back from any requested PPS updates */
56723 -                       port->pps_data.out_volt = port->supply_voltage;
56724 -                       port->pps_data.op_curr = port->current_limit;
56725 +                       port->pps_data.req_out_volt = port->supply_voltage;
56726 +                       port->pps_data.req_op_curr = port->current_limit;
56727                         port->pps_status = (type == PD_CTRL_WAIT ?
56728                                             -EAGAIN : -EOPNOTSUPP);
56730 @@ -2525,6 +2549,16 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
56731                         port->sink_cap_done = true;
56732                         tcpm_set_state(port, ready_state(port), 0);
56733                         break;
56734 +               case SRC_READY:
56735 +               case SNK_READY:
56736 +                       if (port->vdm_state > VDM_STATE_READY) {
56737 +                               port->vdm_state = VDM_STATE_DONE;
56738 +                               if (tcpm_vdm_ams(port))
56739 +                                       tcpm_ams_finish(port);
56740 +                               mod_vdm_delayed_work(port, 0);
56741 +                               break;
56742 +                       }
56743 +                       fallthrough;
56744                 default:
56745                         tcpm_pd_handle_state(port,
56746                                              port->pwr_role == TYPEC_SOURCE ?
56747 @@ -2542,8 +2576,12 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
56748                         break;
56749                 case SNK_NEGOTIATE_PPS_CAPABILITIES:
56750                         port->pps_data.active = true;
56751 -                       port->supply_voltage = port->pps_data.out_volt;
56752 -                       port->current_limit = port->pps_data.op_curr;
56753 +                       port->pps_data.min_volt = port->pps_data.req_min_volt;
56754 +                       port->pps_data.max_volt = port->pps_data.req_max_volt;
56755 +                       port->pps_data.max_curr = port->pps_data.req_max_curr;
56756 +                       port->req_supply_voltage = port->pps_data.req_out_volt;
56757 +                       port->req_current_limit = port->pps_data.req_op_curr;
56758 +                       power_supply_changed(port->psy);
56759                         tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
56760                         break;
56761                 case SOFT_RESET_SEND:
56762 @@ -3102,17 +3140,16 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
56763                 src = port->source_caps[src_pdo];
56764                 snk = port->snk_pdo[snk_pdo];
56766 -               port->pps_data.min_volt = max(pdo_pps_apdo_min_voltage(src),
56767 -                                             pdo_pps_apdo_min_voltage(snk));
56768 -               port->pps_data.max_volt = min(pdo_pps_apdo_max_voltage(src),
56769 -                                             pdo_pps_apdo_max_voltage(snk));
56770 -               port->pps_data.max_curr = min_pps_apdo_current(src, snk);
56771 -               port->pps_data.out_volt = min(port->pps_data.max_volt,
56772 -                                             max(port->pps_data.min_volt,
56773 -                                                 port->pps_data.out_volt));
56774 -               port->pps_data.op_curr = min(port->pps_data.max_curr,
56775 -                                            port->pps_data.op_curr);
56776 -               power_supply_changed(port->psy);
56777 +               port->pps_data.req_min_volt = max(pdo_pps_apdo_min_voltage(src),
56778 +                                                 pdo_pps_apdo_min_voltage(snk));
56779 +               port->pps_data.req_max_volt = min(pdo_pps_apdo_max_voltage(src),
56780 +                                                 pdo_pps_apdo_max_voltage(snk));
56781 +               port->pps_data.req_max_curr = min_pps_apdo_current(src, snk);
56782 +               port->pps_data.req_out_volt = min(port->pps_data.req_max_volt,
56783 +                                                 max(port->pps_data.req_min_volt,
56784 +                                                     port->pps_data.req_out_volt));
56785 +               port->pps_data.req_op_curr = min(port->pps_data.req_max_curr,
56786 +                                                port->pps_data.req_op_curr);
56787         }
56789         return src_pdo;
56790 @@ -3192,8 +3229,8 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
56791                          flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
56792         }
56794 -       port->current_limit = ma;
56795 -       port->supply_voltage = mv;
56796 +       port->req_current_limit = ma;
56797 +       port->req_supply_voltage = mv;
56799         return 0;
56801 @@ -3239,10 +3276,10 @@ static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
56802                         tcpm_log(port, "Invalid APDO selected!");
56803                         return -EINVAL;
56804                 }
56805 -               max_mv = port->pps_data.max_volt;
56806 -               max_ma = port->pps_data.max_curr;
56807 -               out_mv = port->pps_data.out_volt;
56808 -               op_ma = port->pps_data.op_curr;
56809 +               max_mv = port->pps_data.req_max_volt;
56810 +               max_ma = port->pps_data.req_max_curr;
56811 +               out_mv = port->pps_data.req_out_volt;
56812 +               op_ma = port->pps_data.req_op_curr;
56813                 break;
56814         default:
56815                 tcpm_log(port, "Invalid PDO selected!");
56816 @@ -3289,8 +3326,8 @@ static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
56817         tcpm_log(port, "Requesting APDO %d: %u mV, %u mA",
56818                  src_pdo_index, out_mv, op_ma);
56820 -       port->pps_data.op_curr = op_ma;
56821 -       port->pps_data.out_volt = out_mv;
56822 +       port->pps_data.req_op_curr = op_ma;
56823 +       port->pps_data.req_out_volt = out_mv;
56825         return 0;
56827 @@ -3418,6 +3455,8 @@ static int tcpm_src_attach(struct tcpm_port *port)
56828         if (port->tcpc->enable_auto_vbus_discharge) {
56829                 ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, true);
56830                 tcpm_log_force(port, "enable vbus discharge ret:%d", ret);
56831 +               if (!ret)
56832 +                       port->auto_vbus_discharge_enabled = true;
56833         }
56835         ret = tcpm_set_roles(port, true, TYPEC_SOURCE, tcpm_data_role_for_source(port));
56836 @@ -3500,6 +3539,8 @@ static void tcpm_reset_port(struct tcpm_port *port)
56837         if (port->tcpc->enable_auto_vbus_discharge) {
56838                 ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, false);
56839                 tcpm_log_force(port, "Disable vbus discharge ret:%d", ret);
56840 +               if (!ret)
56841 +                       port->auto_vbus_discharge_enabled = false;
56842         }
56843         port->in_ams = false;
56844         port->ams = NONE_AMS;
56845 @@ -3533,8 +3574,6 @@ static void tcpm_reset_port(struct tcpm_port *port)
56846         port->sink_cap_done = false;
56847         if (port->tcpc->enable_frs)
56848                 port->tcpc->enable_frs(port->tcpc, false);
56850 -       power_supply_changed(port->psy);
56853  static void tcpm_detach(struct tcpm_port *port)
56854 @@ -3574,6 +3613,8 @@ static int tcpm_snk_attach(struct tcpm_port *port)
56855                 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
56856                 ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, true);
56857                 tcpm_log_force(port, "enable vbus discharge ret:%d", ret);
56858 +               if (!ret)
56859 +                       port->auto_vbus_discharge_enabled = true;
56860         }
56862         ret = tcpm_set_roles(port, true, TYPEC_SINK, tcpm_data_role_for_sink(port));
56863 @@ -4103,6 +4144,23 @@ static void run_state_machine(struct tcpm_port *port)
56864                 }
56865                 break;
56866         case SNK_TRANSITION_SINK:
56867 +               /* From the USB PD spec:
56868 +                * "The Sink Shall transition to Sink Standby before a positive or
56869 +                * negative voltage transition of VBUS. During Sink Standby
56870 +                * the Sink Shall reduce its power draw to pSnkStdby."
56871 +                *
56872 +                * This is not applicable to PPS though as the port can continue
56873 +                * to draw negotiated power without switching to standby.
56874 +                */
56875 +               if (port->supply_voltage != port->req_supply_voltage && !port->pps_data.active &&
56876 +                   port->current_limit * port->supply_voltage / 1000 > PD_P_SNK_STDBY_MW) {
56877 +                       u32 stdby_ma = PD_P_SNK_STDBY_MW * 1000 / port->supply_voltage;
56879 +                       tcpm_log(port, "Setting standby current %u mV @ %u mA",
56880 +                                port->supply_voltage, stdby_ma);
56881 +                       tcpm_set_current_limit(port, stdby_ma, port->supply_voltage);
56882 +               }
56883 +               fallthrough;
56884         case SNK_TRANSITION_SINK_VBUS:
56885                 tcpm_set_state(port, hard_reset_state(port),
56886                                PD_T_PS_TRANSITION);
56887 @@ -4676,9 +4734,9 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
56888                 if (tcpm_port_is_disconnected(port) ||
56889                     !tcpm_port_is_source(port)) {
56890                         if (port->port_type == TYPEC_PORT_SRC)
56891 -                               tcpm_set_state(port, SRC_UNATTACHED, 0);
56892 +                               tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
56893                         else
56894 -                               tcpm_set_state(port, SNK_UNATTACHED, 0);
56895 +                               tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
56896                 }
56897                 break;
56898         case SNK_UNATTACHED:
56899 @@ -4709,7 +4767,23 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
56900                         tcpm_set_state(port, SNK_DEBOUNCED, 0);
56901                 break;
56902         case SNK_READY:
56903 -               if (tcpm_port_is_disconnected(port))
56904 +               /*
56905 +                * EXIT condition is based primarily on vbus disconnect and CC is secondary.
56906 +                * "A port that has entered into USB PD communications with the Source and
56907 +                * has seen the CC voltage exceed vRd-USB may monitor the CC pin to detect
56908 +                * cable disconnect in addition to monitoring VBUS.
56909 +                *
56910 +                * A port that is monitoring the CC voltage for disconnect (but is not in
56911 +                * the process of a USB PD PR_Swap or USB PD FR_Swap) shall transition to
56912 +                * Unattached.SNK within tSinkDisconnect after the CC voltage remains below
56913 +                * vRd-USB for tPDDebounce."
56914 +                *
56915 +                * When set_auto_vbus_discharge_threshold is enabled, CC pins go
56916 +                * away before vbus decays to disconnect threshold. Allow
56917 +                * disconnect to be driven by vbus disconnect when auto vbus
56918 +                * discharge is enabled.
56919 +                */
56920 +               if (!port->auto_vbus_discharge_enabled && tcpm_port_is_disconnected(port))
56921                         tcpm_set_state(port, unattached_state(port), 0);
56922                 else if (!port->pd_capable &&
56923                          (cc1 != old_cc1 || cc2 != old_cc2))
56924 @@ -4808,9 +4882,13 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
56925                  * Ignore CC changes here.
56926                  */
56927                 break;
56929         default:
56930 -               if (tcpm_port_is_disconnected(port))
56931 +               /*
56932 +                * While acting as sink and auto vbus discharge is enabled, Allow disconnect
56933 +                * to be driven by vbus disconnect.
56934 +                */
56935 +               if (tcpm_port_is_disconnected(port) && !(port->pwr_role == TYPEC_SINK &&
56936 +                                                        port->auto_vbus_discharge_enabled))
56937                         tcpm_set_state(port, unattached_state(port), 0);
56938                 break;
56939         }
56940 @@ -4974,8 +5052,16 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
56941         case SRC_TRANSITION_SUPPLY:
56942         case SRC_READY:
56943         case SRC_WAIT_NEW_CAPABILITIES:
56944 -               /* Force to unattached state to re-initiate connection */
56945 -               tcpm_set_state(port, SRC_UNATTACHED, 0);
56946 +               /*
56947 +                * Force to unattached state to re-initiate connection.
56948 +                * DRP port should move to Unattached.SNK instead of Unattached.SRC if
56949 +                * sink removed. Although sink removal here is due to source's vbus collapse,
56950 +                * treat it the same way for consistency.
56951 +                */
56952 +               if (port->port_type == TYPEC_PORT_SRC)
56953 +                       tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
56954 +               else
56955 +                       tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
56956                 break;
56958         case PORT_RESET:
56959 @@ -4994,9 +5080,8 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
56960                 break;
56962         default:
56963 -               if (port->pwr_role == TYPEC_SINK &&
56964 -                   port->attached)
56965 -                       tcpm_set_state(port, SNK_UNATTACHED, 0);
56966 +               if (port->pwr_role == TYPEC_SINK && port->attached)
56967 +                       tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
56968                 break;
56969         }
56971 @@ -5018,7 +5103,23 @@ static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
56972                         tcpm_set_state(port, tcpm_try_snk(port) ? SNK_TRY : SRC_ATTACHED,
56973                                        PD_T_CC_DEBOUNCE);
56974                 break;
56975 +       case SRC_STARTUP:
56976 +       case SRC_SEND_CAPABILITIES:
56977 +       case SRC_SEND_CAPABILITIES_TIMEOUT:
56978 +       case SRC_NEGOTIATE_CAPABILITIES:
56979 +       case SRC_TRANSITION_SUPPLY:
56980 +       case SRC_READY:
56981 +       case SRC_WAIT_NEW_CAPABILITIES:
56982 +               if (port->auto_vbus_discharge_enabled) {
56983 +                       if (port->port_type == TYPEC_PORT_SRC)
56984 +                               tcpm_set_state(port, SRC_UNATTACHED, 0);
56985 +                       else
56986 +                               tcpm_set_state(port, SNK_UNATTACHED, 0);
56987 +               }
56988 +               break;
56989         default:
56990 +               if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
56991 +                       tcpm_set_state(port, SNK_UNATTACHED, 0);
56992                 break;
56993         }
56995 @@ -5374,7 +5475,7 @@ static int tcpm_try_role(struct typec_port *p, int role)
56996         return ret;
56999 -static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
57000 +static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 req_op_curr)
57002         unsigned int target_mw;
57003         int ret;
57004 @@ -5392,12 +5493,12 @@ static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
57005                 goto port_unlock;
57006         }
57008 -       if (op_curr > port->pps_data.max_curr) {
57009 +       if (req_op_curr > port->pps_data.max_curr) {
57010                 ret = -EINVAL;
57011                 goto port_unlock;
57012         }
57014 -       target_mw = (op_curr * port->pps_data.out_volt) / 1000;
57015 +       target_mw = (req_op_curr * port->supply_voltage) / 1000;
57016         if (target_mw < port->operating_snk_mw) {
57017                 ret = -EINVAL;
57018                 goto port_unlock;
57019 @@ -5411,10 +5512,10 @@ static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
57020         }
57022         /* Round down operating current to align with PPS valid steps */
57023 -       op_curr = op_curr - (op_curr % RDO_PROG_CURR_MA_STEP);
57024 +       req_op_curr = req_op_curr - (req_op_curr % RDO_PROG_CURR_MA_STEP);
57026         reinit_completion(&port->pps_complete);
57027 -       port->pps_data.op_curr = op_curr;
57028 +       port->pps_data.req_op_curr = req_op_curr;
57029         port->pps_status = 0;
57030         port->pps_pending = true;
57031         mutex_unlock(&port->lock);
57032 @@ -5435,7 +5536,7 @@ static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
57033         return ret;
57036 -static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt)
57037 +static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 req_out_volt)
57039         unsigned int target_mw;
57040         int ret;
57041 @@ -5453,13 +5554,13 @@ static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt)
57042                 goto port_unlock;
57043         }
57045 -       if (out_volt < port->pps_data.min_volt ||
57046 -           out_volt > port->pps_data.max_volt) {
57047 +       if (req_out_volt < port->pps_data.min_volt ||
57048 +           req_out_volt > port->pps_data.max_volt) {
57049                 ret = -EINVAL;
57050                 goto port_unlock;
57051         }
57053 -       target_mw = (port->pps_data.op_curr * out_volt) / 1000;
57054 +       target_mw = (port->current_limit * req_out_volt) / 1000;
57055         if (target_mw < port->operating_snk_mw) {
57056                 ret = -EINVAL;
57057                 goto port_unlock;
57058 @@ -5473,10 +5574,10 @@ static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt)
57059         }
57061         /* Round down output voltage to align with PPS valid steps */
57062 -       out_volt = out_volt - (out_volt % RDO_PROG_VOLT_MV_STEP);
57063 +       req_out_volt = req_out_volt - (req_out_volt % RDO_PROG_VOLT_MV_STEP);
57065         reinit_completion(&port->pps_complete);
57066 -       port->pps_data.out_volt = out_volt;
57067 +       port->pps_data.req_out_volt = req_out_volt;
57068         port->pps_status = 0;
57069         port->pps_pending = true;
57070         mutex_unlock(&port->lock);
57071 @@ -5534,8 +5635,8 @@ static int tcpm_pps_activate(struct tcpm_port *port, bool activate)
57073         /* Trigger PPS request or move back to standard PDO contract */
57074         if (activate) {
57075 -               port->pps_data.out_volt = port->supply_voltage;
57076 -               port->pps_data.op_curr = port->current_limit;
57077 +               port->pps_data.req_out_volt = port->supply_voltage;
57078 +               port->pps_data.req_op_curr = port->current_limit;
57079         }
57080         mutex_unlock(&port->lock);
57082 diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c
57083 index 29bd1c5a283c..4038104568f5 100644
57084 --- a/drivers/usb/typec/tps6598x.c
57085 +++ b/drivers/usb/typec/tps6598x.c
57086 @@ -614,8 +614,8 @@ static int tps6598x_probe(struct i2c_client *client)
57087                 return ret;
57089         fwnode = device_get_named_child_node(&client->dev, "connector");
57090 -       if (IS_ERR(fwnode))
57091 -               return PTR_ERR(fwnode);
57092 +       if (!fwnode)
57093 +               return -ENODEV;
57095         tps->role_sw = fwnode_usb_role_switch_get(fwnode);
57096         if (IS_ERR(tps->role_sw)) {
57097 diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
57098 index 244270755ae6..1e266f083bf8 100644
57099 --- a/drivers/usb/typec/ucsi/ucsi.c
57100 +++ b/drivers/usb/typec/ucsi/ucsi.c
57101 @@ -495,7 +495,8 @@ static void ucsi_unregister_altmodes(struct ucsi_connector *con, u8 recipient)
57102         }
57105 -static void ucsi_get_pdos(struct ucsi_connector *con, int is_partner)
57106 +static int ucsi_get_pdos(struct ucsi_connector *con, int is_partner,
57107 +                        u32 *pdos, int offset, int num_pdos)
57109         struct ucsi *ucsi = con->ucsi;
57110         u64 command;
57111 @@ -503,17 +504,39 @@ static void ucsi_get_pdos(struct ucsi_connector *con, int is_partner)
57113         command = UCSI_COMMAND(UCSI_GET_PDOS) | UCSI_CONNECTOR_NUMBER(con->num);
57114         command |= UCSI_GET_PDOS_PARTNER_PDO(is_partner);
57115 -       command |= UCSI_GET_PDOS_NUM_PDOS(UCSI_MAX_PDOS - 1);
57116 +       command |= UCSI_GET_PDOS_PDO_OFFSET(offset);
57117 +       command |= UCSI_GET_PDOS_NUM_PDOS(num_pdos - 1);
57118         command |= UCSI_GET_PDOS_SRC_PDOS;
57119 -       ret = ucsi_send_command(ucsi, command, con->src_pdos,
57120 -                              sizeof(con->src_pdos));
57121 -       if (ret < 0) {
57122 +       ret = ucsi_send_command(ucsi, command, pdos + offset,
57123 +                               num_pdos * sizeof(u32));
57124 +       if (ret < 0)
57125                 dev_err(ucsi->dev, "UCSI_GET_PDOS failed (%d)\n", ret);
57126 +       if (ret == 0 && offset == 0)
57127 +               dev_warn(ucsi->dev, "UCSI_GET_PDOS returned 0 bytes\n");
57129 +       return ret;
57132 +static void ucsi_get_src_pdos(struct ucsi_connector *con, int is_partner)
57134 +       int ret;
57136 +       /* UCSI max payload means only getting at most 4 PDOs at a time */
57137 +       ret = ucsi_get_pdos(con, 1, con->src_pdos, 0, UCSI_MAX_PDOS);
57138 +       if (ret < 0)
57139                 return;
57140 -       }
57142         con->num_pdos = ret / sizeof(u32); /* number of bytes to 32-bit PDOs */
57143 -       if (ret == 0)
57144 -               dev_warn(ucsi->dev, "UCSI_GET_PDOS returned 0 bytes\n");
57145 +       if (con->num_pdos < UCSI_MAX_PDOS)
57146 +               return;
57148 +       /* get the remaining PDOs, if any */
57149 +       ret = ucsi_get_pdos(con, 1, con->src_pdos, UCSI_MAX_PDOS,
57150 +                           PDO_MAX_OBJECTS - UCSI_MAX_PDOS);
57151 +       if (ret < 0)
57152 +               return;
57154 +       con->num_pdos += ret / sizeof(u32);
57157  static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
57158 @@ -522,7 +545,7 @@ static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
57159         case UCSI_CONSTAT_PWR_OPMODE_PD:
57160                 con->rdo = con->status.request_data_obj;
57161                 typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_PD);
57162 -               ucsi_get_pdos(con, 1);
57163 +               ucsi_get_src_pdos(con, 1);
57164                 break;
57165         case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
57166                 con->rdo = 0;
57167 @@ -999,6 +1022,7 @@ static const struct typec_operations ucsi_ops = {
57168         .pr_set = ucsi_pr_swap
57169  };
57171 +/* Caller must call fwnode_handle_put() after use */
57172  static struct fwnode_handle *ucsi_find_fwnode(struct ucsi_connector *con)
57174         struct fwnode_handle *fwnode;
57175 @@ -1033,7 +1057,7 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
57176         command |= UCSI_CONNECTOR_NUMBER(con->num);
57177         ret = ucsi_send_command(ucsi, command, &con->cap, sizeof(con->cap));
57178         if (ret < 0)
57179 -               goto out;
57180 +               goto out_unlock;
57182         if (con->cap.op_mode & UCSI_CONCAP_OPMODE_DRP)
57183                 cap->data = TYPEC_PORT_DRD;
57184 @@ -1151,6 +1175,8 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
57185         trace_ucsi_register_port(con->num, &con->status);
57187  out:
57188 +       fwnode_handle_put(cap->fwnode);
57189 +out_unlock:
57190         mutex_unlock(&con->lock);
57191         return ret;
57193 diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
57194 index 3920e20a9e9e..cee666790907 100644
57195 --- a/drivers/usb/typec/ucsi/ucsi.h
57196 +++ b/drivers/usb/typec/ucsi/ucsi.h
57197 @@ -8,6 +8,7 @@
57198  #include <linux/power_supply.h>
57199  #include <linux/types.h>
57200  #include <linux/usb/typec.h>
57201 +#include <linux/usb/pd.h>
57202  #include <linux/usb/role.h>
57204  /* -------------------------------------------------------------------------- */
57205 @@ -134,7 +135,9 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
57207  /* GET_PDOS command bits */
57208  #define UCSI_GET_PDOS_PARTNER_PDO(_r_)         ((u64)(_r_) << 23)
57209 +#define UCSI_GET_PDOS_PDO_OFFSET(_r_)          ((u64)(_r_) << 24)
57210  #define UCSI_GET_PDOS_NUM_PDOS(_r_)            ((u64)(_r_) << 32)
57211 +#define UCSI_MAX_PDOS                          (4)
57212  #define UCSI_GET_PDOS_SRC_PDOS                 ((u64)1 << 34)
57214  /* -------------------------------------------------------------------------- */
57215 @@ -302,7 +305,6 @@ struct ucsi {
57217  #define UCSI_MAX_SVID          5
57218  #define UCSI_MAX_ALTMODES      (UCSI_MAX_SVID * 6)
57219 -#define UCSI_MAX_PDOS          (4)
57221  #define UCSI_TYPEC_VSAFE5V     5000
57222  #define UCSI_TYPEC_1_5_CURRENT 1500
57223 @@ -330,7 +332,7 @@ struct ucsi_connector {
57224         struct power_supply *psy;
57225         struct power_supply_desc psy_desc;
57226         u32 rdo;
57227 -       u32 src_pdos[UCSI_MAX_PDOS];
57228 +       u32 src_pdos[PDO_MAX_OBJECTS];
57229         int num_pdos;
57231         struct usb_role_switch *usb_role_sw;
57232 diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c
57233 index f7633ee655a1..d1cf6b51bf85 100644
57234 --- a/drivers/usb/usbip/vudc_sysfs.c
57235 +++ b/drivers/usb/usbip/vudc_sysfs.c
57236 @@ -156,12 +156,14 @@ static ssize_t usbip_sockfd_store(struct device *dev,
57237                 tcp_rx = kthread_create(&v_rx_loop, &udc->ud, "vudc_rx");
57238                 if (IS_ERR(tcp_rx)) {
57239                         sockfd_put(socket);
57240 +                       mutex_unlock(&udc->ud.sysfs_lock);
57241                         return -EINVAL;
57242                 }
57243                 tcp_tx = kthread_create(&v_tx_loop, &udc->ud, "vudc_tx");
57244                 if (IS_ERR(tcp_tx)) {
57245                         kthread_stop(tcp_rx);
57246                         sockfd_put(socket);
57247 +                       mutex_unlock(&udc->ud.sysfs_lock);
57248                         return -EINVAL;
57249                 }
57251 diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc.c b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
57252 index f27e25112c40..8722f5effacd 100644
57253 --- a/drivers/vfio/fsl-mc/vfio_fsl_mc.c
57254 +++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
57255 @@ -568,23 +568,39 @@ static int vfio_fsl_mc_init_device(struct vfio_fsl_mc_device *vdev)
57256                 dev_err(&mc_dev->dev, "VFIO_FSL_MC: Failed to setup DPRC (%d)\n", ret);
57257                 goto out_nc_unreg;
57258         }
57259 +       return 0;
57261 +out_nc_unreg:
57262 +       bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
57263 +       return ret;
57266 +static int vfio_fsl_mc_scan_container(struct fsl_mc_device *mc_dev)
57268 +       int ret;
57270 +       /* non dprc devices do not scan for other devices */
57271 +       if (!is_fsl_mc_bus_dprc(mc_dev))
57272 +               return 0;
57273         ret = dprc_scan_container(mc_dev, false);
57274         if (ret) {
57275 -               dev_err(&mc_dev->dev, "VFIO_FSL_MC: Container scanning failed (%d)\n", ret);
57276 -               goto out_dprc_cleanup;
57277 +               dev_err(&mc_dev->dev,
57278 +                       "VFIO_FSL_MC: Container scanning failed (%d)\n", ret);
57279 +               dprc_remove_devices(mc_dev, NULL, 0);
57280 +               return ret;
57281         }
57283         return 0;
57286 +static void vfio_fsl_uninit_device(struct vfio_fsl_mc_device *vdev)
57288 +       struct fsl_mc_device *mc_dev = vdev->mc_dev;
57290 +       if (!is_fsl_mc_bus_dprc(mc_dev))
57291 +               return;
57293 -out_dprc_cleanup:
57294 -       dprc_remove_devices(mc_dev, NULL, 0);
57295         dprc_cleanup(mc_dev);
57296 -out_nc_unreg:
57297         bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
57298 -       vdev->nb.notifier_call = NULL;
57300 -       return ret;
57303  static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
57304 @@ -607,29 +623,39 @@ static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
57305         }
57307         vdev->mc_dev = mc_dev;
57309 -       ret = vfio_add_group_dev(dev, &vfio_fsl_mc_ops, vdev);
57310 -       if (ret) {
57311 -               dev_err(dev, "VFIO_FSL_MC: Failed to add to vfio group\n");
57312 -               goto out_group_put;
57313 -       }
57314 +       mutex_init(&vdev->igate);
57316         ret = vfio_fsl_mc_reflck_attach(vdev);
57317         if (ret)
57318 -               goto out_group_dev;
57319 +               goto out_group_put;
57321         ret = vfio_fsl_mc_init_device(vdev);
57322         if (ret)
57323                 goto out_reflck;
57325 -       mutex_init(&vdev->igate);
57326 +       ret = vfio_add_group_dev(dev, &vfio_fsl_mc_ops, vdev);
57327 +       if (ret) {
57328 +               dev_err(dev, "VFIO_FSL_MC: Failed to add to vfio group\n");
57329 +               goto out_device;
57330 +       }
57332 +       /*
57333 +        * This triggers recursion into vfio_fsl_mc_probe() on another device
57334 +        * and the vfio_fsl_mc_reflck_attach() must succeed, which relies on the
57335 +        * vfio_add_group_dev() above. It has no impact on this vdev, so it is
57336 +        * safe to be after the vfio device is made live.
57337 +        */
57338 +       ret = vfio_fsl_mc_scan_container(mc_dev);
57339 +       if (ret)
57340 +               goto out_group_dev;
57341         return 0;
57343 -out_reflck:
57344 -       vfio_fsl_mc_reflck_put(vdev->reflck);
57345  out_group_dev:
57346         vfio_del_group_dev(dev);
57347 +out_device:
57348 +       vfio_fsl_uninit_device(vdev);
57349 +out_reflck:
57350 +       vfio_fsl_mc_reflck_put(vdev->reflck);
57351  out_group_put:
57352         vfio_iommu_group_put(group, dev);
57353         return ret;
57354 @@ -646,16 +672,10 @@ static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev)
57356         mutex_destroy(&vdev->igate);
57358 +       dprc_remove_devices(mc_dev, NULL, 0);
57359 +       vfio_fsl_uninit_device(vdev);
57360         vfio_fsl_mc_reflck_put(vdev->reflck);
57362 -       if (is_fsl_mc_bus_dprc(mc_dev)) {
57363 -               dprc_remove_devices(mc_dev, NULL, 0);
57364 -               dprc_cleanup(mc_dev);
57365 -       }
57367 -       if (vdev->nb.notifier_call)
57368 -               bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
57370         vfio_iommu_group_put(mc_dev->dev.iommu_group, dev);
57372         return 0;
57373 diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
57374 index 917fd84c1c6f..367ff5412a38 100644
57375 --- a/drivers/vfio/mdev/mdev_sysfs.c
57376 +++ b/drivers/vfio/mdev/mdev_sysfs.c
57377 @@ -105,6 +105,7 @@ static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent,
57378                 return ERR_PTR(-ENOMEM);
57380         type->kobj.kset = parent->mdev_types_kset;
57381 +       type->parent = parent;
57383         ret = kobject_init_and_add(&type->kobj, &mdev_type_ktype, NULL,
57384                                    "%s-%s", dev_driver_string(parent->dev),
57385 @@ -132,7 +133,6 @@ static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent,
57386         }
57388         type->group = group;
57389 -       type->parent = parent;
57390         return type;
57392  attrs_failed:
57393 diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
57394 index 5023e23db3bc..cb7f2dc09e9d 100644
57395 --- a/drivers/vfio/pci/vfio_pci.c
57396 +++ b/drivers/vfio/pci/vfio_pci.c
57397 @@ -1924,6 +1924,68 @@ static int vfio_pci_bus_notifier(struct notifier_block *nb,
57398         return 0;
57401 +static int vfio_pci_vf_init(struct vfio_pci_device *vdev)
57403 +       struct pci_dev *pdev = vdev->pdev;
57404 +       int ret;
57406 +       if (!pdev->is_physfn)
57407 +               return 0;
57409 +       vdev->vf_token = kzalloc(sizeof(*vdev->vf_token), GFP_KERNEL);
57410 +       if (!vdev->vf_token)
57411 +               return -ENOMEM;
57413 +       mutex_init(&vdev->vf_token->lock);
57414 +       uuid_gen(&vdev->vf_token->uuid);
57416 +       vdev->nb.notifier_call = vfio_pci_bus_notifier;
57417 +       ret = bus_register_notifier(&pci_bus_type, &vdev->nb);
57418 +       if (ret) {
57419 +               kfree(vdev->vf_token);
57420 +               return ret;
57421 +       }
57422 +       return 0;
57425 +static void vfio_pci_vf_uninit(struct vfio_pci_device *vdev)
57427 +       if (!vdev->vf_token)
57428 +               return;
57430 +       bus_unregister_notifier(&pci_bus_type, &vdev->nb);
57431 +       WARN_ON(vdev->vf_token->users);
57432 +       mutex_destroy(&vdev->vf_token->lock);
57433 +       kfree(vdev->vf_token);
57436 +static int vfio_pci_vga_init(struct vfio_pci_device *vdev)
57438 +       struct pci_dev *pdev = vdev->pdev;
57439 +       int ret;
57441 +       if (!vfio_pci_is_vga(pdev))
57442 +               return 0;
57444 +       ret = vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
57445 +       if (ret)
57446 +               return ret;
57447 +       vga_set_legacy_decoding(pdev, vfio_pci_set_vga_decode(vdev, false));
57448 +       return 0;
57451 +static void vfio_pci_vga_uninit(struct vfio_pci_device *vdev)
57453 +       struct pci_dev *pdev = vdev->pdev;
57455 +       if (!vfio_pci_is_vga(pdev))
57456 +               return;
57457 +       vga_client_register(pdev, NULL, NULL, NULL);
57458 +       vga_set_legacy_decoding(pdev, VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
57459 +                                             VGA_RSRC_LEGACY_IO |
57460 +                                             VGA_RSRC_LEGACY_MEM);
57463  static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
57465         struct vfio_pci_device *vdev;
57466 @@ -1970,35 +2032,15 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
57467         INIT_LIST_HEAD(&vdev->vma_list);
57468         init_rwsem(&vdev->memory_lock);
57470 -       ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
57471 +       ret = vfio_pci_reflck_attach(vdev);
57472         if (ret)
57473                 goto out_free;
57475 -       ret = vfio_pci_reflck_attach(vdev);
57476 +       ret = vfio_pci_vf_init(vdev);
57477         if (ret)
57478 -               goto out_del_group_dev;
57480 -       if (pdev->is_physfn) {
57481 -               vdev->vf_token = kzalloc(sizeof(*vdev->vf_token), GFP_KERNEL);
57482 -               if (!vdev->vf_token) {
57483 -                       ret = -ENOMEM;
57484 -                       goto out_reflck;
57485 -               }
57487 -               mutex_init(&vdev->vf_token->lock);
57488 -               uuid_gen(&vdev->vf_token->uuid);
57490 -               vdev->nb.notifier_call = vfio_pci_bus_notifier;
57491 -               ret = bus_register_notifier(&pci_bus_type, &vdev->nb);
57492 -               if (ret)
57493 -                       goto out_vf_token;
57494 -       }
57496 -       if (vfio_pci_is_vga(pdev)) {
57497 -               vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
57498 -               vga_set_legacy_decoding(pdev,
57499 -                                       vfio_pci_set_vga_decode(vdev, false));
57500 -       }
57501 +               goto out_reflck;
57502 +       ret = vfio_pci_vga_init(vdev);
57503 +       if (ret)
57504 +               goto out_vf;
57506         vfio_pci_probe_power_state(vdev);
57508 @@ -2016,15 +2058,20 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
57509                 vfio_pci_set_power_state(vdev, PCI_D3hot);
57510         }
57512 -       return ret;
57513 +       ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
57514 +       if (ret)
57515 +               goto out_power;
57516 +       return 0;
57518 -out_vf_token:
57519 -       kfree(vdev->vf_token);
57520 +out_power:
57521 +       if (!disable_idle_d3)
57522 +               vfio_pci_set_power_state(vdev, PCI_D0);
57523 +out_vf:
57524 +       vfio_pci_vf_uninit(vdev);
57525  out_reflck:
57526         vfio_pci_reflck_put(vdev->reflck);
57527 -out_del_group_dev:
57528 -       vfio_del_group_dev(&pdev->dev);
57529  out_free:
57530 +       kfree(vdev->pm_save);
57531         kfree(vdev);
57532  out_group_put:
57533         vfio_iommu_group_put(group, &pdev->dev);
57534 @@ -2041,33 +2088,19 @@ static void vfio_pci_remove(struct pci_dev *pdev)
57535         if (!vdev)
57536                 return;
57538 -       if (vdev->vf_token) {
57539 -               WARN_ON(vdev->vf_token->users);
57540 -               mutex_destroy(&vdev->vf_token->lock);
57541 -               kfree(vdev->vf_token);
57542 -       }
57544 -       if (vdev->nb.notifier_call)
57545 -               bus_unregister_notifier(&pci_bus_type, &vdev->nb);
57547 +       vfio_pci_vf_uninit(vdev);
57548         vfio_pci_reflck_put(vdev->reflck);
57549 +       vfio_pci_vga_uninit(vdev);
57551         vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
57552 -       kfree(vdev->region);
57553 -       mutex_destroy(&vdev->ioeventfds_lock);
57555         if (!disable_idle_d3)
57556                 vfio_pci_set_power_state(vdev, PCI_D0);
57558 +       mutex_destroy(&vdev->ioeventfds_lock);
57559 +       kfree(vdev->region);
57560         kfree(vdev->pm_save);
57561         kfree(vdev);
57563 -       if (vfio_pci_is_vga(pdev)) {
57564 -               vga_client_register(pdev, NULL, NULL, NULL);
57565 -               vga_set_legacy_decoding(pdev,
57566 -                               VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
57567 -                               VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
57568 -       }
57571  static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
57572 diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
57573 index bfa4c6ef554e..c79d2f2387aa 100644
57574 --- a/drivers/vhost/vdpa.c
57575 +++ b/drivers/vhost/vdpa.c
57576 @@ -993,6 +993,7 @@ static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
57577         if (vma->vm_end - vma->vm_start != notify.size)
57578                 return -ENOTSUPP;
57580 +       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
57581         vma->vm_ops = &vhost_vdpa_vm_ops;
57582         return 0;
57584 diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c
57585 index 091f07e7c145..e9fbe2483844 100644
57586 --- a/drivers/video/backlight/qcom-wled.c
57587 +++ b/drivers/video/backlight/qcom-wled.c
57588 @@ -336,19 +336,19 @@ static int wled3_sync_toggle(struct wled *wled)
57589         unsigned int mask = GENMASK(wled->max_string_count - 1, 0);
57591         rc = regmap_update_bits(wled->regmap,
57592 -                               wled->ctrl_addr + WLED3_SINK_REG_SYNC,
57593 +                               wled->sink_addr + WLED3_SINK_REG_SYNC,
57594                                 mask, mask);
57595         if (rc < 0)
57596                 return rc;
57598         rc = regmap_update_bits(wled->regmap,
57599 -                               wled->ctrl_addr + WLED3_SINK_REG_SYNC,
57600 +                               wled->sink_addr + WLED3_SINK_REG_SYNC,
57601                                 mask, WLED3_SINK_REG_SYNC_CLEAR);
57603         return rc;
57606 -static int wled5_sync_toggle(struct wled *wled)
57607 +static int wled5_mod_sync_toggle(struct wled *wled)
57609         int rc;
57610         u8 val;
57611 @@ -445,10 +445,23 @@ static int wled_update_status(struct backlight_device *bl)
57612                         goto unlock_mutex;
57613                 }
57615 -               rc = wled->wled_sync_toggle(wled);
57616 -               if (rc < 0) {
57617 -                       dev_err(wled->dev, "wled sync failed rc:%d\n", rc);
57618 -                       goto unlock_mutex;
57619 +               if (wled->version < 5) {
57620 +                       rc = wled->wled_sync_toggle(wled);
57621 +                       if (rc < 0) {
57622 +                               dev_err(wled->dev, "wled sync failed rc:%d\n", rc);
57623 +                               goto unlock_mutex;
57624 +                       }
57625 +               } else {
57626 +                       /*
57627 +                        * For WLED5 toggling the MOD_SYNC_BIT updates the
57628 +                        * brightness
57629 +                        */
57630 +                       rc = wled5_mod_sync_toggle(wled);
57631 +                       if (rc < 0) {
57632 +                               dev_err(wled->dev, "wled mod sync failed rc:%d\n",
57633 +                                       rc);
57634 +                               goto unlock_mutex;
57635 +                       }
57636                 }
57637         }
57639 @@ -1459,7 +1472,7 @@ static int wled_configure(struct wled *wled)
57640                 size = ARRAY_SIZE(wled5_opts);
57641                 *cfg = wled5_config_defaults;
57642                 wled->wled_set_brightness = wled5_set_brightness;
57643 -               wled->wled_sync_toggle = wled5_sync_toggle;
57644 +               wled->wled_sync_toggle = wled3_sync_toggle;
57645                 wled->wled_cabc_config = wled5_cabc_config;
57646                 wled->wled_ovp_delay = wled5_ovp_delay;
57647                 wled->wled_auto_detection_required =
57648 diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
57649 index 757d5c3f620b..ff09e57f3c38 100644
57650 --- a/drivers/video/fbdev/core/fbcmap.c
57651 +++ b/drivers/video/fbdev/core/fbcmap.c
57652 @@ -101,17 +101,17 @@ int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags)
57653                 if (!len)
57654                         return 0;
57656 -               cmap->red = kmalloc(size, flags);
57657 +               cmap->red = kzalloc(size, flags);
57658                 if (!cmap->red)
57659                         goto fail;
57660 -               cmap->green = kmalloc(size, flags);
57661 +               cmap->green = kzalloc(size, flags);
57662                 if (!cmap->green)
57663                         goto fail;
57664 -               cmap->blue = kmalloc(size, flags);
57665 +               cmap->blue = kzalloc(size, flags);
57666                 if (!cmap->blue)
57667                         goto fail;
57668                 if (transp) {
57669 -                       cmap->transp = kmalloc(size, flags);
57670 +                       cmap->transp = kzalloc(size, flags);
57671                         if (!cmap->transp)
57672                                 goto fail;
57673                 } else {
57674 diff --git a/drivers/video/fbdev/omap/hwa742.c b/drivers/video/fbdev/omap/hwa742.c
57675 index cfe63932f825..71c00ef772a3 100644
57676 --- a/drivers/video/fbdev/omap/hwa742.c
57677 +++ b/drivers/video/fbdev/omap/hwa742.c
57678 @@ -913,7 +913,7 @@ static void hwa742_resume(void)
57679                 if (hwa742_read_reg(HWA742_PLL_DIV_REG) & (1 << 7))
57680                         break;
57681                 set_current_state(TASK_UNINTERRUPTIBLE);
57682 -               schedule_timeout(msecs_to_jiffies(5));
57683 +               schedule_msec_hrtimeout((5));
57684         }
57685         hwa742_set_update_mode(hwa742.update_mode_before_suspend);
57687 diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
57688 index f1551e00eb12..f0f651e92504 100644
57689 --- a/drivers/video/fbdev/pxafb.c
57690 +++ b/drivers/video/fbdev/pxafb.c
57691 @@ -1287,7 +1287,7 @@ static int pxafb_smart_thread(void *arg)
57692                 mutex_unlock(&fbi->ctrlr_lock);
57694                 set_current_state(TASK_INTERRUPTIBLE);
57695 -               schedule_timeout(msecs_to_jiffies(30));
57696 +               schedule_msec_hrtimeout((30));
57697         }
57699         pr_debug("%s(): task ending\n", __func__);
57700 diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev.c b/drivers/virt/nitro_enclaves/ne_misc_dev.c
57701 index f1964ea4b826..e21e1e86ad15 100644
57702 --- a/drivers/virt/nitro_enclaves/ne_misc_dev.c
57703 +++ b/drivers/virt/nitro_enclaves/ne_misc_dev.c
57704 @@ -1524,7 +1524,8 @@ static const struct file_operations ne_enclave_fops = {
57705   *                       enclave file descriptor to be further used for enclave
57706   *                       resources handling e.g. memory regions and CPUs.
57707   * @ne_pci_dev :       Private data associated with the PCI device.
57708 - * @slot_uid:          Generated unique slot id associated with an enclave.
57709 + * @slot_uid:          User pointer to store the generated unique slot id
57710 + *                     associated with an enclave to.
57711   *
57712   * Context: Process context. This function is called with the ne_pci_dev enclave
57713   *         mutex held.
57714 @@ -1532,7 +1533,7 @@ static const struct file_operations ne_enclave_fops = {
57715   * * Enclave fd on success.
57716   * * Negative return value on failure.
57717   */
57718 -static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 *slot_uid)
57719 +static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 __user *slot_uid)
57721         struct ne_pci_dev_cmd_reply cmd_reply = {};
57722         int enclave_fd = -1;
57723 @@ -1634,7 +1635,18 @@ static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 *slot_uid)
57725         list_add(&ne_enclave->enclave_list_entry, &ne_pci_dev->enclaves_list);
57727 -       *slot_uid = ne_enclave->slot_uid;
57728 +       if (copy_to_user(slot_uid, &ne_enclave->slot_uid, sizeof(ne_enclave->slot_uid))) {
57729 +               /*
57730 +                * As we're holding the only reference to 'enclave_file', fput()
57731 +                * will call ne_enclave_release() which will do a proper cleanup
57732 +                * of all so far allocated resources, leaving only the unused fd
57733 +                * for us to free.
57734 +                */
57735 +               fput(enclave_file);
57736 +               put_unused_fd(enclave_fd);
57738 +               return -EFAULT;
57739 +       }
57741         fd_install(enclave_fd, enclave_file);
57743 @@ -1671,34 +1683,13 @@ static long ne_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
57744         switch (cmd) {
57745         case NE_CREATE_VM: {
57746                 int enclave_fd = -1;
57747 -               struct file *enclave_file = NULL;
57748                 struct ne_pci_dev *ne_pci_dev = ne_devs.ne_pci_dev;
57749 -               int rc = -EINVAL;
57750 -               u64 slot_uid = 0;
57751 +               u64 __user *slot_uid = (void __user *)arg;
57753                 mutex_lock(&ne_pci_dev->enclaves_list_mutex);
57755 -               enclave_fd = ne_create_vm_ioctl(ne_pci_dev, &slot_uid);
57756 -               if (enclave_fd < 0) {
57757 -                       rc = enclave_fd;
57759 -                       mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
57761 -                       return rc;
57762 -               }
57764 +               enclave_fd = ne_create_vm_ioctl(ne_pci_dev, slot_uid);
57765                 mutex_unlock(&ne_pci_dev->enclaves_list_mutex);
57767 -               if (copy_to_user((void __user *)arg, &slot_uid, sizeof(slot_uid))) {
57768 -                       enclave_file = fget(enclave_fd);
57769 -                       /* Decrement file refs to have release() called. */
57770 -                       fput(enclave_file);
57771 -                       fput(enclave_file);
57772 -                       put_unused_fd(enclave_fd);
57774 -                       return -EFAULT;
57775 -               }
57777                 return enclave_fd;
57778         }
57780 diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
57781 index f01d58c7a042..a3e7be96527d 100644
57782 --- a/drivers/xen/gntdev.c
57783 +++ b/drivers/xen/gntdev.c
57784 @@ -1017,8 +1017,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
57785                 err = mmu_interval_notifier_insert_locked(
57786                         &map->notifier, vma->vm_mm, vma->vm_start,
57787                         vma->vm_end - vma->vm_start, &gntdev_mmu_ops);
57788 -               if (err)
57789 +               if (err) {
57790 +                       map->vma = NULL;
57791                         goto out_unlock_put;
57792 +               }
57793         }
57794         mutex_unlock(&priv->lock);
57796 diff --git a/drivers/xen/unpopulated-alloc.c b/drivers/xen/unpopulated-alloc.c
57797 index e64e6befc63b..87e6b7db892f 100644
57798 --- a/drivers/xen/unpopulated-alloc.c
57799 +++ b/drivers/xen/unpopulated-alloc.c
57800 @@ -39,8 +39,10 @@ static int fill_list(unsigned int nr_pages)
57801         }
57803         pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
57804 -       if (!pgmap)
57805 +       if (!pgmap) {
57806 +               ret = -ENOMEM;
57807                 goto err_pgmap;
57808 +       }
57810         pgmap->type = MEMORY_DEVICE_GENERIC;
57811         pgmap->range = (struct range) {
57812 diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
57813 index 649f04f112dc..59c32c9b799f 100644
57814 --- a/fs/9p/vfs_file.c
57815 +++ b/fs/9p/vfs_file.c
57816 @@ -86,8 +86,8 @@ int v9fs_file_open(struct inode *inode, struct file *file)
57817                  * to work.
57818                  */
57819                 writeback_fid = v9fs_writeback_fid(file_dentry(file));
57820 -               if (IS_ERR(fid)) {
57821 -                       err = PTR_ERR(fid);
57822 +               if (IS_ERR(writeback_fid)) {
57823 +                       err = PTR_ERR(writeback_fid);
57824                         mutex_unlock(&v9inode->v_mutex);
57825                         goto out_error;
57826                 }
57827 diff --git a/fs/Kconfig b/fs/Kconfig
57828 index a55bda4233bb..f61330e4efc0 100644
57829 --- a/fs/Kconfig
57830 +++ b/fs/Kconfig
57831 @@ -145,6 +145,7 @@ menu "DOS/FAT/EXFAT/NT Filesystems"
57832  source "fs/fat/Kconfig"
57833  source "fs/exfat/Kconfig"
57834  source "fs/ntfs/Kconfig"
57835 +source "fs/ntfs3/Kconfig"
57837  endmenu
57838  endif # BLOCK
57839 diff --git a/fs/Makefile b/fs/Makefile
57840 index 3215fe205256..6bdfcf712cb1 100644
57841 --- a/fs/Makefile
57842 +++ b/fs/Makefile
57843 @@ -99,6 +99,7 @@ obj-$(CONFIG_SYSV_FS)         += sysv/
57844  obj-$(CONFIG_CIFS)             += cifs/
57845  obj-$(CONFIG_HPFS_FS)          += hpfs/
57846  obj-$(CONFIG_NTFS_FS)          += ntfs/
57847 +obj-$(CONFIG_NTFS3_FS)         += ntfs3/
57848  obj-$(CONFIG_UFS_FS)           += ufs/
57849  obj-$(CONFIG_EFS_FS)           += efs/
57850  obj-$(CONFIG_JFFS2_FS)         += jffs2/
57851 diff --git a/fs/afs/dir.c b/fs/afs/dir.c
57852 index 17548c1faf02..31251d11d576 100644
57853 --- a/fs/afs/dir.c
57854 +++ b/fs/afs/dir.c
57855 @@ -1342,6 +1342,7 @@ static int afs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
57857         afs_op_set_vnode(op, 0, dvnode);
57858         op->file[0].dv_delta = 1;
57859 +       op->file[0].modification = true;
57860         op->file[0].update_ctime = true;
57861         op->dentry      = dentry;
57862         op->create.mode = S_IFDIR | mode;
57863 @@ -1423,6 +1424,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
57865         afs_op_set_vnode(op, 0, dvnode);
57866         op->file[0].dv_delta = 1;
57867 +       op->file[0].modification = true;
57868         op->file[0].update_ctime = true;
57870         op->dentry      = dentry;
57871 @@ -1559,6 +1561,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
57873         afs_op_set_vnode(op, 0, dvnode);
57874         op->file[0].dv_delta = 1;
57875 +       op->file[0].modification = true;
57876         op->file[0].update_ctime = true;
57878         /* Try to make sure we have a callback promise on the victim. */
57879 @@ -1641,6 +1644,7 @@ static int afs_create(struct user_namespace *mnt_userns, struct inode *dir,
57881         afs_op_set_vnode(op, 0, dvnode);
57882         op->file[0].dv_delta = 1;
57883 +       op->file[0].modification = true;
57884         op->file[0].update_ctime = true;
57886         op->dentry      = dentry;
57887 @@ -1715,6 +1719,7 @@ static int afs_link(struct dentry *from, struct inode *dir,
57888         afs_op_set_vnode(op, 0, dvnode);
57889         afs_op_set_vnode(op, 1, vnode);
57890         op->file[0].dv_delta = 1;
57891 +       op->file[0].modification = true;
57892         op->file[0].update_ctime = true;
57893         op->file[1].update_ctime = true;
57895 @@ -1910,6 +1915,8 @@ static int afs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
57896         afs_op_set_vnode(op, 1, new_dvnode); /* May be same as orig_dvnode */
57897         op->file[0].dv_delta = 1;
57898         op->file[1].dv_delta = 1;
57899 +       op->file[0].modification = true;
57900 +       op->file[1].modification = true;
57901         op->file[0].update_ctime = true;
57902         op->file[1].update_ctime = true;
57904 diff --git a/fs/afs/dir_silly.c b/fs/afs/dir_silly.c
57905 index 04f75a44f243..dae9a57d7ec0 100644
57906 --- a/fs/afs/dir_silly.c
57907 +++ b/fs/afs/dir_silly.c
57908 @@ -73,6 +73,8 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode
57909         afs_op_set_vnode(op, 1, dvnode);
57910         op->file[0].dv_delta = 1;
57911         op->file[1].dv_delta = 1;
57912 +       op->file[0].modification = true;
57913 +       op->file[1].modification = true;
57914         op->file[0].update_ctime = true;
57915         op->file[1].update_ctime = true;
57917 @@ -201,6 +203,7 @@ static int afs_do_silly_unlink(struct afs_vnode *dvnode, struct afs_vnode *vnode
57918         afs_op_set_vnode(op, 0, dvnode);
57919         afs_op_set_vnode(op, 1, vnode);
57920         op->file[0].dv_delta = 1;
57921 +       op->file[0].modification = true;
57922         op->file[0].update_ctime = true;
57923         op->file[1].op_unlinked = true;
57924         op->file[1].update_ctime = true;
57925 diff --git a/fs/afs/fs_operation.c b/fs/afs/fs_operation.c
57926 index 71c58723763d..a82515b47350 100644
57927 --- a/fs/afs/fs_operation.c
57928 +++ b/fs/afs/fs_operation.c
57929 @@ -118,6 +118,8 @@ static void afs_prepare_vnode(struct afs_operation *op, struct afs_vnode_param *
57930                 vp->cb_break_before     = afs_calc_vnode_cb_break(vnode);
57931                 if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
57932                         op->flags       |= AFS_OPERATION_CUR_ONLY;
57933 +               if (vp->modification)
57934 +                       set_bit(AFS_VNODE_MODIFYING, &vnode->flags);
57935         }
57937         if (vp->fid.vnode)
57938 @@ -223,6 +225,10 @@ int afs_put_operation(struct afs_operation *op)
57940         if (op->ops && op->ops->put)
57941                 op->ops->put(op);
57942 +       if (op->file[0].modification)
57943 +               clear_bit(AFS_VNODE_MODIFYING, &op->file[0].vnode->flags);
57944 +       if (op->file[1].modification && op->file[1].vnode != op->file[0].vnode)
57945 +               clear_bit(AFS_VNODE_MODIFYING, &op->file[1].vnode->flags);
57946         if (op->file[0].put_vnode)
57947                 iput(&op->file[0].vnode->vfs_inode);
57948         if (op->file[1].put_vnode)
57949 diff --git a/fs/afs/inode.c b/fs/afs/inode.c
57950 index 12be88716e4c..fddf7d54e0b7 100644
57951 --- a/fs/afs/inode.c
57952 +++ b/fs/afs/inode.c
57953 @@ -102,13 +102,13 @@ static int afs_inode_init_from_status(struct afs_operation *op,
57955         switch (status->type) {
57956         case AFS_FTYPE_FILE:
57957 -               inode->i_mode   = S_IFREG | status->mode;
57958 +               inode->i_mode   = S_IFREG | (status->mode & S_IALLUGO);
57959                 inode->i_op     = &afs_file_inode_operations;
57960                 inode->i_fop    = &afs_file_operations;
57961                 inode->i_mapping->a_ops = &afs_fs_aops;
57962                 break;
57963         case AFS_FTYPE_DIR:
57964 -               inode->i_mode   = S_IFDIR | status->mode;
57965 +               inode->i_mode   = S_IFDIR |  (status->mode & S_IALLUGO);
57966                 inode->i_op     = &afs_dir_inode_operations;
57967                 inode->i_fop    = &afs_dir_file_operations;
57968                 inode->i_mapping->a_ops = &afs_dir_aops;
57969 @@ -198,7 +198,7 @@ static void afs_apply_status(struct afs_operation *op,
57970         if (status->mode != vnode->status.mode) {
57971                 mode = inode->i_mode;
57972                 mode &= ~S_IALLUGO;
57973 -               mode |= status->mode;
57974 +               mode |= status->mode & S_IALLUGO;
57975                 WRITE_ONCE(inode->i_mode, mode);
57976         }
57978 @@ -293,8 +293,9 @@ void afs_vnode_commit_status(struct afs_operation *op, struct afs_vnode_param *v
57979                         op->flags &= ~AFS_OPERATION_DIR_CONFLICT;
57980                 }
57981         } else if (vp->scb.have_status) {
57982 -               if (vp->dv_before + vp->dv_delta != vp->scb.status.data_version &&
57983 -                   vp->speculative)
57984 +               if (vp->speculative &&
57985 +                   (test_bit(AFS_VNODE_MODIFYING, &vnode->flags) ||
57986 +                    vp->dv_before != vnode->status.data_version))
57987                         /* Ignore the result of a speculative bulk status fetch
57988                          * if it splits around a modification op, thereby
57989                          * appearing to regress the data version.
57990 @@ -910,6 +911,7 @@ int afs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
57991         }
57992         op->ctime = attr->ia_ctime;
57993         op->file[0].update_ctime = 1;
57994 +       op->file[0].modification = true;
57996         op->ops = &afs_setattr_operation;
57997         ret = afs_do_sync_operation(op);
57998 diff --git a/fs/afs/internal.h b/fs/afs/internal.h
57999 index 1627b1872812..be981a9a1add 100644
58000 --- a/fs/afs/internal.h
58001 +++ b/fs/afs/internal.h
58002 @@ -640,6 +640,7 @@ struct afs_vnode {
58003  #define AFS_VNODE_PSEUDODIR    7               /* set if Vnode is a pseudo directory */
58004  #define AFS_VNODE_NEW_CONTENT  8               /* Set if file has new content (create/trunc-0) */
58005  #define AFS_VNODE_SILLY_DELETED        9               /* Set if file has been silly-deleted */
58006 +#define AFS_VNODE_MODIFYING    10              /* Set if we're performing a modification op */
58008         struct list_head        wb_keys;        /* List of keys available for writeback */
58009         struct list_head        pending_locks;  /* locks waiting to be granted */
58010 @@ -756,6 +757,7 @@ struct afs_vnode_param {
58011         bool                    set_size:1;     /* Must update i_size */
58012         bool                    op_unlinked:1;  /* True if file was unlinked by op */
58013         bool                    speculative:1;  /* T if speculative status fetch (no vnode lock) */
58014 +       bool                    modification:1; /* Set if the content gets modified */
58015  };
58017  /*
58018 diff --git a/fs/afs/write.c b/fs/afs/write.c
58019 index eb737ed63afb..ebe3b6493fce 100644
58020 --- a/fs/afs/write.c
58021 +++ b/fs/afs/write.c
58022 @@ -450,6 +450,7 @@ static int afs_store_data(struct address_space *mapping,
58023         afs_op_set_vnode(op, 0, vnode);
58024         op->file[0].dv_delta = 1;
58025         op->store.mapping = mapping;
58026 +       op->file[0].modification = true;
58027         op->store.first = first;
58028         op->store.last = last;
58029         op->store.first_offset = offset;
58030 diff --git a/fs/block_dev.c b/fs/block_dev.c
58031 index 09d6f7229db9..a5a6a7930e5e 100644
58032 --- a/fs/block_dev.c
58033 +++ b/fs/block_dev.c
58034 @@ -1684,6 +1684,7 @@ ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
58035         struct inode *bd_inode = bdev_file_inode(file);
58036         loff_t size = i_size_read(bd_inode);
58037         struct blk_plug plug;
58038 +       size_t shorted = 0;
58039         ssize_t ret;
58041         if (bdev_read_only(I_BDEV(bd_inode)))
58042 @@ -1701,12 +1702,17 @@ ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
58043         if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
58044                 return -EOPNOTSUPP;
58046 -       iov_iter_truncate(from, size - iocb->ki_pos);
58047 +       size -= iocb->ki_pos;
58048 +       if (iov_iter_count(from) > size) {
58049 +               shorted = iov_iter_count(from) - size;
58050 +               iov_iter_truncate(from, size);
58051 +       }
58053         blk_start_plug(&plug);
58054         ret = __generic_file_write_iter(iocb, from);
58055         if (ret > 0)
58056                 ret = generic_write_sync(iocb, ret);
58057 +       iov_iter_reexpand(from, iov_iter_count(from) + shorted);
58058         blk_finish_plug(&plug);
58059         return ret;
58061 @@ -1718,13 +1724,21 @@ ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
58062         struct inode *bd_inode = bdev_file_inode(file);
58063         loff_t size = i_size_read(bd_inode);
58064         loff_t pos = iocb->ki_pos;
58065 +       size_t shorted = 0;
58066 +       ssize_t ret;
58068         if (pos >= size)
58069                 return 0;
58071         size -= pos;
58072 -       iov_iter_truncate(to, size);
58073 -       return generic_file_read_iter(iocb, to);
58074 +       if (iov_iter_count(to) > size) {
58075 +               shorted = iov_iter_count(to) - size;
58076 +               iov_iter_truncate(to, size);
58077 +       }
58079 +       ret = generic_file_read_iter(iocb, to);
58080 +       iov_iter_reexpand(to, iov_iter_count(to) + shorted);
58081 +       return ret;
58083  EXPORT_SYMBOL_GPL(blkdev_read_iter);
58085 diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
58086 index 744b99ddc28c..a7d9e147dee6 100644
58087 --- a/fs/btrfs/block-group.c
58088 +++ b/fs/btrfs/block-group.c
58089 @@ -3269,6 +3269,7 @@ static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
58090   */
58091  void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
58093 +       struct btrfs_transaction *cur_trans = trans->transaction;
58094         struct btrfs_fs_info *fs_info = trans->fs_info;
58095         struct btrfs_space_info *info;
58096         u64 left;
58097 @@ -3283,6 +3284,7 @@ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
58098         lockdep_assert_held(&fs_info->chunk_mutex);
58100         info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
58101 +again:
58102         spin_lock(&info->lock);
58103         left = info->total_bytes - btrfs_space_info_used(info, true);
58104         spin_unlock(&info->lock);
58105 @@ -3301,6 +3303,58 @@ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
58107         if (left < thresh) {
58108                 u64 flags = btrfs_system_alloc_profile(fs_info);
58109 +               u64 reserved = atomic64_read(&cur_trans->chunk_bytes_reserved);
58111 +               /*
58112 +                * If there's not available space for the chunk tree (system
58113 +                * space) and there are other tasks that reserved space for
58114 +                * creating a new system block group, wait for them to complete
58115 +                * the creation of their system block group and release excess
58116 +                * reserved space. We do this because:
58117 +                *
58118 +                * *) We can end up allocating more system chunks than necessary
58119 +                *    when there are multiple tasks that are concurrently
58120 +                *    allocating block groups, which can lead to exhaustion of
58121 +                *    the system array in the superblock;
58122 +                *
58123 +                * *) If we allocate extra and unnecessary system block groups,
58124 +                *    despite being empty for a long time, and possibly forever,
58125 +                *    they end not being added to the list of unused block groups
58126 +                *    because that typically happens only when deallocating the
58127 +                *    last extent from a block group - which never happens since
58128 +                *    we never allocate from them in the first place. The few
58129 +                *    exceptions are when mounting a filesystem or running scrub,
58130 +                *    which add unused block groups to the list of unused block
58131 +                *    groups, to be deleted by the cleaner kthread.
58132 +                *    And even when they are added to the list of unused block
58133 +                *    groups, it can take a long time until they get deleted,
58134 +                *    since the cleaner kthread might be sleeping or busy with
58135 +                *    other work (deleting subvolumes, running delayed iputs,
58136 +                *    defrag scheduling, etc);
58137 +                *
58138 +                * This is rare in practice, but can happen when too many tasks
58139 +                * are allocating blocks groups in parallel (via fallocate())
58140 +                * and before the one that reserved space for a new system block
58141 +                * group finishes the block group creation and releases the space
58142 +                * reserved in excess (at btrfs_create_pending_block_groups()),
58143 +                * other tasks end up here and see free system space temporarily
58144 +                * not enough for updating the chunk tree.
58145 +                *
58146 +                * We unlock the chunk mutex before waiting for such tasks and
58147 +                * lock it again after the wait, otherwise we would deadlock.
58148 +                * It is safe to do so because allocating a system chunk is the
58149 +                * first thing done while allocating a new block group.
58150 +                */
58151 +               if (reserved > trans->chunk_bytes_reserved) {
58152 +                       const u64 min_needed = reserved - thresh;
58154 +                       mutex_unlock(&fs_info->chunk_mutex);
58155 +                       wait_event(cur_trans->chunk_reserve_wait,
58156 +                          atomic64_read(&cur_trans->chunk_bytes_reserved) <=
58157 +                          min_needed);
58158 +                       mutex_lock(&fs_info->chunk_mutex);
58159 +                       goto again;
58160 +               }
58162                 /*
58163                  * Ignore failure to create system chunk. We might end up not
58164 @@ -3315,8 +3369,10 @@ void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
58165                 ret = btrfs_block_rsv_add(fs_info->chunk_root,
58166                                           &fs_info->chunk_block_rsv,
58167                                           thresh, BTRFS_RESERVE_NO_FLUSH);
58168 -               if (!ret)
58169 +               if (!ret) {
58170 +                       atomic64_add(thresh, &cur_trans->chunk_bytes_reserved);
58171                         trans->chunk_bytes_reserved += thresh;
58172 +               }
58173         }
58176 diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
58177 index 28e202e89660..418903604936 100644
58178 --- a/fs/btrfs/btrfs_inode.h
58179 +++ b/fs/btrfs/btrfs_inode.h
58180 @@ -299,6 +299,21 @@ static inline void btrfs_mod_outstanding_extents(struct btrfs_inode *inode,
58181                                                   mod);
58185 + * Called every time after doing a buffered, direct IO or memory mapped write.
58186 + *
58187 + * This is to ensure that if we write to a file that was previously fsynced in
58188 + * the current transaction, then try to fsync it again in the same transaction,
58189 + * we will know that there were changes in the file and that it needs to be
58190 + * logged.
58191 + */
58192 +static inline void btrfs_set_inode_last_sub_trans(struct btrfs_inode *inode)
58194 +       spin_lock(&inode->lock);
58195 +       inode->last_sub_trans = inode->root->log_transid;
58196 +       spin_unlock(&inode->lock);
58199  static inline int btrfs_inode_in_log(struct btrfs_inode *inode, u64 generation)
58201         int ret = 0;
58202 diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
58203 index 3f4c832abfed..81387cdf334d 100644
58204 --- a/fs/btrfs/compression.c
58205 +++ b/fs/btrfs/compression.c
58206 @@ -80,10 +80,15 @@ static int compression_compress_pages(int type, struct list_head *ws,
58207         case BTRFS_COMPRESS_NONE:
58208         default:
58209                 /*
58210 -                * This can't happen, the type is validated several times
58211 -                * before we get here. As a sane fallback, return what the
58212 -                * callers will understand as 'no compression happened'.
58213 +                * This can happen when compression races with remount setting
58214 +                * it to 'no compress', while caller doesn't call
58215 +                * inode_need_compress() to check if we really need to
58216 +                * compress.
58217 +                *
58218 +                * Not a big deal, just need to inform caller that we
58219 +                * haven't allocated any pages yet.
58220                  */
58221 +               *out_pages = 0;
58222                 return -E2BIG;
58223         }
58225 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
58226 index 34b929bd5c1a..f43ce82a6aed 100644
58227 --- a/fs/btrfs/ctree.c
58228 +++ b/fs/btrfs/ctree.c
58229 @@ -1365,10 +1365,30 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
58230                                    "failed to read tree block %llu from get_old_root",
58231                                    logical);
58232                 } else {
58233 +                       struct tree_mod_elem *tm2;
58235                         btrfs_tree_read_lock(old);
58236                         eb = btrfs_clone_extent_buffer(old);
58237 +                       /*
58238 +                        * After the lookup for the most recent tree mod operation
58239 +                        * above and before we locked and cloned the extent buffer
58240 +                        * 'old', a new tree mod log operation may have been added.
58241 +                        * So lookup for a more recent one to make sure the number
58242 +                        * of mod log operations we replay is consistent with the
58243 +                        * number of items we have in the cloned extent buffer,
58244 +                        * otherwise we can hit a BUG_ON when rewinding the extent
58245 +                        * buffer.
58246 +                        */
58247 +                       tm2 = tree_mod_log_search(fs_info, logical, time_seq);
58248                         btrfs_tree_read_unlock(old);
58249                         free_extent_buffer(old);
58250 +                       ASSERT(tm2);
58251 +                       ASSERT(tm2 == tm || tm2->seq > tm->seq);
58252 +                       if (!tm2 || tm2->seq < tm->seq) {
58253 +                               free_extent_buffer(eb);
58254 +                               return NULL;
58255 +                       }
58256 +                       tm = tm2;
58257                 }
58258         } else if (old_root) {
58259                 eb_root_owner = btrfs_header_owner(eb_root);
58260 diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
58261 index 9ae776ab3967..29ef969035df 100644
58262 --- a/fs/btrfs/ctree.h
58263 +++ b/fs/btrfs/ctree.h
58264 @@ -3110,7 +3110,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
58265                                struct btrfs_inode *inode, u64 new_size,
58266                                u32 min_type);
58268 -int btrfs_start_delalloc_snapshot(struct btrfs_root *root);
58269 +int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context);
58270  int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
58271                                bool in_reclaim_context);
58272  int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
58273 diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
58274 index 56642ca7af10..fa1c3bc93ccf 100644
58275 --- a/fs/btrfs/delalloc-space.c
58276 +++ b/fs/btrfs/delalloc-space.c
58277 @@ -311,7 +311,7 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
58278                         flush = BTRFS_RESERVE_FLUSH_LIMIT;
58280                 if (btrfs_transaction_in_commit(fs_info))
58281 -                       schedule_timeout(1);
58282 +                       schedule_min_hrtimeout();
58283         }
58285         num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
58286 diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
58287 index bf25401c9768..c1d2b6786129 100644
58288 --- a/fs/btrfs/delayed-inode.c
58289 +++ b/fs/btrfs/delayed-inode.c
58290 @@ -1589,8 +1589,8 @@ bool btrfs_readdir_get_delayed_items(struct inode *inode,
58291          * We can only do one readdir with delayed items at a time because of
58292          * item->readdir_list.
58293          */
58294 -       inode_unlock_shared(inode);
58295 -       inode_lock(inode);
58296 +       btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
58297 +       btrfs_inode_lock(inode, 0);
58299         mutex_lock(&delayed_node->mutex);
58300         item = __btrfs_first_delayed_insertion_item(delayed_node);
58301 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
58302 index 36a3c973fda1..5b82050b871a 100644
58303 --- a/fs/btrfs/extent-tree.c
58304 +++ b/fs/btrfs/extent-tree.c
58305 @@ -1340,12 +1340,16 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
58306                 stripe = bbio->stripes;
58307                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
58308                         u64 bytes;
58309 +                       struct btrfs_device *device = stripe->dev;
58311 -                       if (!stripe->dev->bdev) {
58312 +                       if (!device->bdev) {
58313                                 ASSERT(btrfs_test_opt(fs_info, DEGRADED));
58314                                 continue;
58315                         }
58317 +                       if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
58318 +                               continue;
58320                         ret = do_discard_extent(stripe, &bytes);
58321                         if (!ret) {
58322                                 discarded_bytes += bytes;
58323 diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
58324 index 0e155f013839..abee4b62741d 100644
58325 --- a/fs/btrfs/file.c
58326 +++ b/fs/btrfs/file.c
58327 @@ -2014,14 +2014,8 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
58328         else
58329                 num_written = btrfs_buffered_write(iocb, from);
58331 -       /*
58332 -        * We also have to set last_sub_trans to the current log transid,
58333 -        * otherwise subsequent syncs to a file that's been synced in this
58334 -        * transaction will appear to have already occurred.
58335 -        */
58336 -       spin_lock(&inode->lock);
58337 -       inode->last_sub_trans = inode->root->log_transid;
58338 -       spin_unlock(&inode->lock);
58339 +       btrfs_set_inode_last_sub_trans(inode);
58341         if (num_written > 0)
58342                 num_written = generic_write_sync(iocb, num_written);
58344 @@ -2073,6 +2067,30 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
58345         return ret;
58348 +static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
58350 +       struct btrfs_inode *inode = BTRFS_I(ctx->inode);
58351 +       struct btrfs_fs_info *fs_info = inode->root->fs_info;
58353 +       if (btrfs_inode_in_log(inode, fs_info->generation) &&
58354 +           list_empty(&ctx->ordered_extents))
58355 +               return true;
58357 +       /*
58358 +        * If we are doing a fast fsync we can not bail out if the inode's
58359 +        * last_trans is <= then the last committed transaction, because we only
58360 +        * update the last_trans of the inode during ordered extent completion,
58361 +        * and for a fast fsync we don't wait for that, we only wait for the
58362 +        * writeback to complete.
58363 +        */
58364 +       if (inode->last_trans <= fs_info->last_trans_committed &&
58365 +           (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) ||
58366 +            list_empty(&ctx->ordered_extents)))
58367 +               return true;
58369 +       return false;
58372  /*
58373   * fsync call for both files and directories.  This logs the inode into
58374   * the tree log instead of forcing full commits whenever possible.
58375 @@ -2122,7 +2140,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
58376         if (ret)
58377                 goto out;
58379 -       inode_lock(inode);
58380 +       btrfs_inode_lock(inode, 0);
58382         atomic_inc(&root->log_batch);
58384 @@ -2154,7 +2172,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
58385          */
58386         ret = start_ordered_ops(inode, start, end);
58387         if (ret) {
58388 -               inode_unlock(inode);
58389 +               btrfs_inode_unlock(inode, 0);
58390                 goto out;
58391         }
58393 @@ -2191,17 +2209,8 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
58395         atomic_inc(&root->log_batch);
58397 -       /*
58398 -        * If we are doing a fast fsync we can not bail out if the inode's
58399 -        * last_trans is <= then the last committed transaction, because we only
58400 -        * update the last_trans of the inode during ordered extent completion,
58401 -        * and for a fast fsync we don't wait for that, we only wait for the
58402 -        * writeback to complete.
58403 -        */
58404         smp_mb();
58405 -       if (btrfs_inode_in_log(BTRFS_I(inode), fs_info->generation) ||
58406 -           (BTRFS_I(inode)->last_trans <= fs_info->last_trans_committed &&
58407 -            (full_sync || list_empty(&ctx.ordered_extents)))) {
58408 +       if (skip_inode_logging(&ctx)) {
58409                 /*
58410                  * We've had everything committed since the last time we were
58411                  * modified so clear this flag in case it was set for whatever
58412 @@ -2255,7 +2264,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
58413          * file again, but that will end up using the synchronization
58414          * inside btrfs_sync_log to keep things safe.
58415          */
58416 -       inode_unlock(inode);
58417 +       btrfs_inode_unlock(inode, 0);
58419         if (ret != BTRFS_NO_LOG_SYNC) {
58420                 if (!ret) {
58421 @@ -2285,7 +2294,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
58423  out_release_extents:
58424         btrfs_release_log_ctx_extents(&ctx);
58425 -       inode_unlock(inode);
58426 +       btrfs_inode_unlock(inode, 0);
58427         goto out;
58430 @@ -2735,8 +2744,6 @@ int btrfs_replace_file_extents(struct inode *inode, struct btrfs_path *path,
58431                         extent_info->file_offset += replace_len;
58432                 }
58434 -               cur_offset = drop_args.drop_end;
58436                 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
58437                 if (ret)
58438                         break;
58439 @@ -2756,7 +2763,9 @@ int btrfs_replace_file_extents(struct inode *inode, struct btrfs_path *path,
58440                 BUG_ON(ret);    /* shouldn't happen */
58441                 trans->block_rsv = rsv;
58443 -               if (!extent_info) {
58444 +               cur_offset = drop_args.drop_end;
58445 +               len = end - cur_offset;
58446 +               if (!extent_info && len) {
58447                         ret = find_first_non_hole(BTRFS_I(inode), &cur_offset,
58448                                                   &len);
58449                         if (unlikely(ret < 0))
58450 @@ -2868,7 +2877,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
58451         if (ret)
58452                 return ret;
58454 -       inode_lock(inode);
58455 +       btrfs_inode_lock(inode, 0);
58456         ino_size = round_up(inode->i_size, fs_info->sectorsize);
58457         ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
58458         if (ret < 0)
58459 @@ -2908,7 +2917,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
58460                 truncated_block = true;
58461                 ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
58462                 if (ret) {
58463 -                       inode_unlock(inode);
58464 +                       btrfs_inode_unlock(inode, 0);
58465                         return ret;
58466                 }
58467         }
58468 @@ -3009,7 +3018,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
58469                                 ret = ret2;
58470                 }
58471         }
58472 -       inode_unlock(inode);
58473 +       btrfs_inode_unlock(inode, 0);
58474         return ret;
58477 @@ -3377,7 +3386,7 @@ static long btrfs_fallocate(struct file *file, int mode,
58479         if (mode & FALLOC_FL_ZERO_RANGE) {
58480                 ret = btrfs_zero_range(inode, offset, len, mode);
58481 -               inode_unlock(inode);
58482 +               btrfs_inode_unlock(inode, 0);
58483                 return ret;
58484         }
58486 @@ -3487,7 +3496,7 @@ static long btrfs_fallocate(struct file *file, int mode,
58487         unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
58488                              &cached_state);
58489  out:
58490 -       inode_unlock(inode);
58491 +       btrfs_inode_unlock(inode, 0);
58492         /* Let go of our reservation. */
58493         if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE))
58494                 btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
58495 diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
58496 index 9988decd5717..ac9c2691376d 100644
58497 --- a/fs/btrfs/free-space-cache.c
58498 +++ b/fs/btrfs/free-space-cache.c
58499 @@ -3942,7 +3942,7 @@ static int cleanup_free_space_cache_v1(struct btrfs_fs_info *fs_info,
58501         struct btrfs_block_group *block_group;
58502         struct rb_node *node;
58503 -       int ret;
58504 +       int ret = 0;
58506         btrfs_info(fs_info, "cleaning free space cache v1");
58508 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
58509 index a520775949a0..8c4d2eaa5d58 100644
58510 --- a/fs/btrfs/inode.c
58511 +++ b/fs/btrfs/inode.c
58512 @@ -8619,9 +8619,7 @@ vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
58513         set_page_dirty(page);
58514         SetPageUptodate(page);
58516 -       BTRFS_I(inode)->last_trans = fs_info->generation;
58517 -       BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
58518 -       BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
58519 +       btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
58521         unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
58523 @@ -9674,7 +9672,7 @@ static int start_delalloc_inodes(struct btrfs_root *root,
58524         return ret;
58527 -int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
58528 +int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context)
58530         struct writeback_control wbc = {
58531                 .nr_to_write = LONG_MAX,
58532 @@ -9687,7 +9685,7 @@ int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
58533         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
58534                 return -EROFS;
58536 -       return start_delalloc_inodes(root, &wbc, true, false);
58537 +       return start_delalloc_inodes(root, &wbc, true, in_reclaim_context);
58540  int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
58541 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
58542 index e8d53fea4c61..f9ecb6c0bf15 100644
58543 --- a/fs/btrfs/ioctl.c
58544 +++ b/fs/btrfs/ioctl.c
58545 @@ -226,7 +226,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
58546         if (ret)
58547                 return ret;
58549 -       inode_lock(inode);
58550 +       btrfs_inode_lock(inode, 0);
58551         fsflags = btrfs_mask_fsflags_for_type(inode, fsflags);
58552         old_fsflags = btrfs_inode_flags_to_fsflags(binode->flags);
58554 @@ -353,7 +353,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
58555   out_end_trans:
58556         btrfs_end_transaction(trans);
58557   out_unlock:
58558 -       inode_unlock(inode);
58559 +       btrfs_inode_unlock(inode, 0);
58560         mnt_drop_write_file(file);
58561         return ret;
58563 @@ -449,7 +449,7 @@ static int btrfs_ioctl_fssetxattr(struct file *file, void __user *arg)
58564         if (ret)
58565                 return ret;
58567 -       inode_lock(inode);
58568 +       btrfs_inode_lock(inode, 0);
58570         old_flags = binode->flags;
58571         old_i_flags = inode->i_flags;
58572 @@ -501,7 +501,7 @@ static int btrfs_ioctl_fssetxattr(struct file *file, void __user *arg)
58573                 inode->i_flags = old_i_flags;
58574         }
58576 -       inode_unlock(inode);
58577 +       btrfs_inode_unlock(inode, 0);
58578         mnt_drop_write_file(file);
58580         return ret;
58581 @@ -697,8 +697,6 @@ static noinline int create_subvol(struct inode *dir,
58582         btrfs_set_root_otransid(root_item, trans->transid);
58584         btrfs_tree_unlock(leaf);
58585 -       free_extent_buffer(leaf);
58586 -       leaf = NULL;
58588         btrfs_set_root_dirid(root_item, BTRFS_FIRST_FREE_OBJECTID);
58590 @@ -707,8 +705,22 @@ static noinline int create_subvol(struct inode *dir,
58591         key.type = BTRFS_ROOT_ITEM_KEY;
58592         ret = btrfs_insert_root(trans, fs_info->tree_root, &key,
58593                                 root_item);
58594 -       if (ret)
58595 +       if (ret) {
58596 +               /*
58597 +                * Since we don't abort the transaction in this case, free the
58598 +                * tree block so that we don't leak space and leave the
58599 +                * filesystem in an inconsistent state (an extent item in the
58600 +                * extent tree without backreferences). Also no need to have
58601 +                * the tree block locked since it is not in any tree at this
58602 +                * point, so no other task can find it and use it.
58603 +                */
58604 +               btrfs_free_tree_block(trans, root, leaf, 0, 1);
58605 +               free_extent_buffer(leaf);
58606                 goto fail;
58607 +       }
58609 +       free_extent_buffer(leaf);
58610 +       leaf = NULL;
58612         key.offset = (u64)-1;
58613         new_root = btrfs_get_new_fs_root(fs_info, objectid, anon_dev);
58614 @@ -1014,7 +1026,7 @@ static noinline int btrfs_mksubvol(const struct path *parent,
58615  out_dput:
58616         dput(dentry);
58617  out_unlock:
58618 -       inode_unlock(dir);
58619 +       btrfs_inode_unlock(dir, 0);
58620         return error;
58623 @@ -1034,7 +1046,7 @@ static noinline int btrfs_mksnapshot(const struct path *parent,
58624          */
58625         btrfs_drew_read_lock(&root->snapshot_lock);
58627 -       ret = btrfs_start_delalloc_snapshot(root);
58628 +       ret = btrfs_start_delalloc_snapshot(root, false);
58629         if (ret)
58630                 goto out;
58632 @@ -1612,7 +1624,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
58633                         ra_index += cluster;
58634                 }
58636 -               inode_lock(inode);
58637 +               btrfs_inode_lock(inode, 0);
58638                 if (IS_SWAPFILE(inode)) {
58639                         ret = -ETXTBSY;
58640                 } else {
58641 @@ -1621,13 +1633,13 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
58642                         ret = cluster_pages_for_defrag(inode, pages, i, cluster);
58643                 }
58644                 if (ret < 0) {
58645 -                       inode_unlock(inode);
58646 +                       btrfs_inode_unlock(inode, 0);
58647                         goto out_ra;
58648                 }
58650                 defrag_count += ret;
58651                 balance_dirty_pages_ratelimited(inode->i_mapping);
58652 -               inode_unlock(inode);
58653 +               btrfs_inode_unlock(inode, 0);
58655                 if (newer_than) {
58656                         if (newer_off == (u64)-1)
58657 @@ -1675,9 +1687,9 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
58659  out_ra:
58660         if (do_compress) {
58661 -               inode_lock(inode);
58662 +               btrfs_inode_lock(inode, 0);
58663                 BTRFS_I(inode)->defrag_compress = BTRFS_COMPRESS_NONE;
58664 -               inode_unlock(inode);
58665 +               btrfs_inode_unlock(inode, 0);
58666         }
58667         if (!file)
58668                 kfree(ra);
58669 @@ -3112,9 +3124,9 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
58670                 goto out_dput;
58671         }
58673 -       inode_lock(inode);
58674 +       btrfs_inode_lock(inode, 0);
58675         err = btrfs_delete_subvolume(dir, dentry);
58676 -       inode_unlock(inode);
58677 +       btrfs_inode_unlock(inode, 0);
58678         if (!err) {
58679                 fsnotify_rmdir(dir, dentry);
58680                 d_delete(dentry);
58681 @@ -3123,7 +3135,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
58682  out_dput:
58683         dput(dentry);
58684  out_unlock_dir:
58685 -       inode_unlock(dir);
58686 +       btrfs_inode_unlock(dir, 0);
58687  free_subvol_name:
58688         kfree(subvol_name_ptr);
58689  free_parent:
58690 diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
58691 index 985a21558437..043e3fa961e0 100644
58692 --- a/fs/btrfs/ordered-data.c
58693 +++ b/fs/btrfs/ordered-data.c
58694 @@ -995,7 +995,7 @@ int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre,
58696         if (pre)
58697                 ret = clone_ordered_extent(ordered, 0, pre);
58698 -       if (post)
58699 +       if (ret == 0 && post)
58700                 ret = clone_ordered_extent(ordered, pre + ordered->disk_num_bytes,
58701                                            post);
58703 diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
58704 index f0b9ef13153a..2991287a71a8 100644
58705 --- a/fs/btrfs/qgroup.c
58706 +++ b/fs/btrfs/qgroup.c
58707 @@ -3579,7 +3579,7 @@ static int try_flush_qgroup(struct btrfs_root *root)
58708                 return 0;
58709         }
58711 -       ret = btrfs_start_delalloc_snapshot(root);
58712 +       ret = btrfs_start_delalloc_snapshot(root, true);
58713         if (ret < 0)
58714                 goto out;
58715         btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
58716 diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
58717 index 762881b777b3..0abbf050580d 100644
58718 --- a/fs/btrfs/reflink.c
58719 +++ b/fs/btrfs/reflink.c
58720 @@ -833,7 +833,7 @@ loff_t btrfs_remap_file_range(struct file *src_file, loff_t off,
58721                 return -EINVAL;
58723         if (same_inode)
58724 -               inode_lock(src_inode);
58725 +               btrfs_inode_lock(src_inode, 0);
58726         else
58727                 lock_two_nondirectories(src_inode, dst_inode);
58729 @@ -849,7 +849,7 @@ loff_t btrfs_remap_file_range(struct file *src_file, loff_t off,
58731  out_unlock:
58732         if (same_inode)
58733 -               inode_unlock(src_inode);
58734 +               btrfs_inode_unlock(src_inode, 0);
58735         else
58736                 unlock_two_nondirectories(src_inode, dst_inode);
58738 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
58739 index 232d5da7b7be..829dc8dcc151 100644
58740 --- a/fs/btrfs/relocation.c
58741 +++ b/fs/btrfs/relocation.c
58742 @@ -733,10 +733,12 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
58743         struct extent_buffer *eb;
58744         struct btrfs_root_item *root_item;
58745         struct btrfs_key root_key;
58746 -       int ret;
58747 +       int ret = 0;
58748 +       bool must_abort = false;
58750         root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
58751 -       BUG_ON(!root_item);
58752 +       if (!root_item)
58753 +               return ERR_PTR(-ENOMEM);
58755         root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
58756         root_key.type = BTRFS_ROOT_ITEM_KEY;
58757 @@ -748,7 +750,9 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
58758                 /* called by btrfs_init_reloc_root */
58759                 ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
58760                                       BTRFS_TREE_RELOC_OBJECTID);
58761 -               BUG_ON(ret);
58762 +               if (ret)
58763 +                       goto fail;
58765                 /*
58766                  * Set the last_snapshot field to the generation of the commit
58767                  * root - like this ctree.c:btrfs_block_can_be_shared() behaves
58768 @@ -769,9 +773,16 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
58769                  */
58770                 ret = btrfs_copy_root(trans, root, root->node, &eb,
58771                                       BTRFS_TREE_RELOC_OBJECTID);
58772 -               BUG_ON(ret);
58773 +               if (ret)
58774 +                       goto fail;
58775         }
58777 +       /*
58778 +        * We have changed references at this point, we must abort the
58779 +        * transaction if anything fails.
58780 +        */
58781 +       must_abort = true;
58783         memcpy(root_item, &root->root_item, sizeof(*root_item));
58784         btrfs_set_root_bytenr(root_item, eb->start);
58785         btrfs_set_root_level(root_item, btrfs_header_level(eb));
58786 @@ -789,14 +800,25 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
58788         ret = btrfs_insert_root(trans, fs_info->tree_root,
58789                                 &root_key, root_item);
58790 -       BUG_ON(ret);
58791 +       if (ret)
58792 +               goto fail;
58794         kfree(root_item);
58796         reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key);
58797 -       BUG_ON(IS_ERR(reloc_root));
58798 +       if (IS_ERR(reloc_root)) {
58799 +               ret = PTR_ERR(reloc_root);
58800 +               goto abort;
58801 +       }
58802         set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
58803         reloc_root->last_trans = trans->transid;
58804         return reloc_root;
58805 +fail:
58806 +       kfree(root_item);
58807 +abort:
58808 +       if (must_abort)
58809 +               btrfs_abort_transaction(trans, ret);
58810 +       return ERR_PTR(ret);
58813  /*
58814 @@ -875,7 +897,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
58815         int ret;
58817         if (!have_reloc_root(root))
58818 -               goto out;
58819 +               return 0;
58821         reloc_root = root->reloc_root;
58822         root_item = &reloc_root->root_item;
58823 @@ -908,10 +930,8 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
58825         ret = btrfs_update_root(trans, fs_info->tree_root,
58826                                 &reloc_root->root_key, root_item);
58827 -       BUG_ON(ret);
58828         btrfs_put_root(reloc_root);
58829 -out:
58830 -       return 0;
58831 +       return ret;
58834  /*
58835 @@ -1185,8 +1205,8 @@ int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
58836         int ret;
58837         int slot;
58839 -       BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
58840 -       BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
58841 +       ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
58842 +       ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
58844         last_snapshot = btrfs_root_last_snapshot(&src->root_item);
58845  again:
58846 @@ -1217,7 +1237,7 @@ int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
58847         parent = eb;
58848         while (1) {
58849                 level = btrfs_header_level(parent);
58850 -               BUG_ON(level < lowest_level);
58851 +               ASSERT(level >= lowest_level);
58853                 ret = btrfs_bin_search(parent, &key, &slot);
58854                 if (ret < 0)
58855 @@ -2578,7 +2598,7 @@ static noinline_for_stack int prealloc_file_extent_cluster(
58856                 return btrfs_end_transaction(trans);
58857         }
58859 -       inode_lock(&inode->vfs_inode);
58860 +       btrfs_inode_lock(&inode->vfs_inode, 0);
58861         for (nr = 0; nr < cluster->nr; nr++) {
58862                 start = cluster->boundary[nr] - offset;
58863                 if (nr + 1 < cluster->nr)
58864 @@ -2596,7 +2616,7 @@ static noinline_for_stack int prealloc_file_extent_cluster(
58865                 if (ret)
58866                         break;
58867         }
58868 -       inode_unlock(&inode->vfs_inode);
58869 +       btrfs_inode_unlock(&inode->vfs_inode, 0);
58871         if (cur_offset < prealloc_end)
58872                 btrfs_free_reserved_data_space_noquota(inode->root->fs_info,
58873 diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
58874 index 3d9088eab2fc..b9202a1f1af1 100644
58875 --- a/fs/btrfs/scrub.c
58876 +++ b/fs/btrfs/scrub.c
58877 @@ -3682,8 +3682,8 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
58878                         spin_lock(&cache->lock);
58879                         if (!cache->to_copy) {
58880                                 spin_unlock(&cache->lock);
58881 -                               ro_set = 0;
58882 -                               goto done;
58883 +                               btrfs_put_block_group(cache);
58884 +                               goto skip;
58885                         }
58886                         spin_unlock(&cache->lock);
58887                 }
58888 @@ -3841,7 +3841,6 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
58889                                                       cache, found_key.offset))
58890                         ro_set = 0;
58892 -done:
58893                 down_write(&dev_replace->rwsem);
58894                 dev_replace->cursor_left = dev_replace->cursor_right;
58895                 dev_replace->item_needs_writeback = 1;
58896 diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
58897 index 8f323859156b..8ae8f1732fd2 100644
58898 --- a/fs/btrfs/send.c
58899 +++ b/fs/btrfs/send.c
58900 @@ -7139,7 +7139,7 @@ static int flush_delalloc_roots(struct send_ctx *sctx)
58901         int i;
58903         if (root) {
58904 -               ret = btrfs_start_delalloc_snapshot(root);
58905 +               ret = btrfs_start_delalloc_snapshot(root, false);
58906                 if (ret)
58907                         return ret;
58908                 btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
58909 @@ -7147,7 +7147,7 @@ static int flush_delalloc_roots(struct send_ctx *sctx)
58911         for (i = 0; i < sctx->clone_roots_cnt; i++) {
58912                 root = sctx->clone_roots[i].root;
58913 -               ret = btrfs_start_delalloc_snapshot(root);
58914 +               ret = btrfs_start_delalloc_snapshot(root, false);
58915                 if (ret)
58916                         return ret;
58917                 btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
58918 diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
58919 index acff6bb49a97..d56d3e7ca324 100644
58920 --- a/fs/btrfs/transaction.c
58921 +++ b/fs/btrfs/transaction.c
58922 @@ -260,6 +260,7 @@ static inline int extwriter_counter_read(struct btrfs_transaction *trans)
58923  void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
58925         struct btrfs_fs_info *fs_info = trans->fs_info;
58926 +       struct btrfs_transaction *cur_trans = trans->transaction;
58928         if (!trans->chunk_bytes_reserved)
58929                 return;
58930 @@ -268,6 +269,8 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
58932         btrfs_block_rsv_release(fs_info, &fs_info->chunk_block_rsv,
58933                                 trans->chunk_bytes_reserved, NULL);
58934 +       atomic64_sub(trans->chunk_bytes_reserved, &cur_trans->chunk_bytes_reserved);
58935 +       cond_wake_up(&cur_trans->chunk_reserve_wait);
58936         trans->chunk_bytes_reserved = 0;
58939 @@ -383,6 +386,8 @@ static noinline int join_transaction(struct btrfs_fs_info *fs_info,
58940         spin_lock_init(&cur_trans->dropped_roots_lock);
58941         INIT_LIST_HEAD(&cur_trans->releasing_ebs);
58942         spin_lock_init(&cur_trans->releasing_ebs_lock);
58943 +       atomic64_set(&cur_trans->chunk_bytes_reserved, 0);
58944 +       init_waitqueue_head(&cur_trans->chunk_reserve_wait);
58945         list_add_tail(&cur_trans->list, &fs_info->trans_list);
58946         extent_io_tree_init(fs_info, &cur_trans->dirty_pages,
58947                         IO_TREE_TRANS_DIRTY_PAGES, fs_info->btree_inode);
58948 @@ -1961,7 +1966,6 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
58949          */
58950         BUG_ON(list_empty(&cur_trans->list));
58952 -       list_del_init(&cur_trans->list);
58953         if (cur_trans == fs_info->running_transaction) {
58954                 cur_trans->state = TRANS_STATE_COMMIT_DOING;
58955                 spin_unlock(&fs_info->trans_lock);
58956 @@ -1970,6 +1974,17 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
58958                 spin_lock(&fs_info->trans_lock);
58959         }
58961 +       /*
58962 +        * Now that we know no one else is still using the transaction we can
58963 +        * remove the transaction from the list of transactions. This avoids
58964 +        * the transaction kthread from cleaning up the transaction while some
58965 +        * other task is still using it, which could result in a use-after-free
58966 +        * on things like log trees, as it forces the transaction kthread to
58967 +        * wait for this transaction to be cleaned up by us.
58968 +        */
58969 +       list_del_init(&cur_trans->list);
58971         spin_unlock(&fs_info->trans_lock);
58973         btrfs_cleanup_one_transaction(trans->transaction, fs_info);
58974 diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
58975 index 6335716e513f..364cfbb4c5c5 100644
58976 --- a/fs/btrfs/transaction.h
58977 +++ b/fs/btrfs/transaction.h
58978 @@ -96,6 +96,13 @@ struct btrfs_transaction {
58980         spinlock_t releasing_ebs_lock;
58981         struct list_head releasing_ebs;
58983 +       /*
58984 +        * The number of bytes currently reserved, by all transaction handles
58985 +        * attached to this transaction, for metadata extents of the chunk tree.
58986 +        */
58987 +       atomic64_t chunk_bytes_reserved;
58988 +       wait_queue_head_t chunk_reserve_wait;
58989  };
58991  #define __TRANS_FREEZABLE      (1U << 0)
58992 @@ -175,7 +182,7 @@ static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
58993         spin_lock(&inode->lock);
58994         inode->last_trans = trans->transaction->transid;
58995         inode->last_sub_trans = inode->root->log_transid;
58996 -       inode->last_log_commit = inode->root->last_log_commit;
58997 +       inode->last_log_commit = inode->last_sub_trans - 1;
58998         spin_unlock(&inode->lock);
59001 diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
59002 index 92a368627791..47e76e79b3d6 100644
59003 --- a/fs/btrfs/tree-log.c
59004 +++ b/fs/btrfs/tree-log.c
59005 @@ -3165,20 +3165,22 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
59006          */
59007         mutex_unlock(&root->log_mutex);
59009 -       btrfs_init_log_ctx(&root_log_ctx, NULL);
59011 -       mutex_lock(&log_root_tree->log_mutex);
59013         if (btrfs_is_zoned(fs_info)) {
59014 +               mutex_lock(&fs_info->tree_root->log_mutex);
59015                 if (!log_root_tree->node) {
59016                         ret = btrfs_alloc_log_tree_node(trans, log_root_tree);
59017                         if (ret) {
59018 -                               mutex_unlock(&log_root_tree->log_mutex);
59019 +                               mutex_unlock(&fs_info->tree_log_mutex);
59020                                 goto out;
59021                         }
59022                 }
59023 +               mutex_unlock(&fs_info->tree_root->log_mutex);
59024         }
59026 +       btrfs_init_log_ctx(&root_log_ctx, NULL);
59028 +       mutex_lock(&log_root_tree->log_mutex);
59030         index2 = log_root_tree->log_transid % 2;
59031         list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
59032         root_log_ctx.log_transid = log_root_tree->log_transid;
59033 @@ -6058,7 +6060,8 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
59034          * (since logging them is pointless, a link count of 0 means they
59035          * will never be accessible).
59036          */
59037 -       if (btrfs_inode_in_log(inode, trans->transid) ||
59038 +       if ((btrfs_inode_in_log(inode, trans->transid) &&
59039 +            list_empty(&ctx->ordered_extents)) ||
59040             inode->vfs_inode.i_nlink == 0) {
59041                 ret = BTRFS_NO_LOG_SYNC;
59042                 goto end_no_trans;
59043 diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
59044 index 1c6810bbaf8b..3912eda7905f 100644
59045 --- a/fs/btrfs/volumes.c
59046 +++ b/fs/btrfs/volumes.c
59047 @@ -4989,6 +4989,8 @@ static void init_alloc_chunk_ctl_policy_zoned(
59048                 ctl->max_chunk_size = 2 * ctl->max_stripe_size;
59049                 ctl->devs_max = min_t(int, ctl->devs_max,
59050                                       BTRFS_MAX_DEVS_SYS_CHUNK);
59051 +       } else {
59052 +               BUG();
59053         }
59055         /* We don't want a chunk larger than 10% of writable space */
59056 diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
59057 index eeb3ebe11d7a..304ce64c70a4 100644
59058 --- a/fs/btrfs/zoned.c
59059 +++ b/fs/btrfs/zoned.c
59060 @@ -342,6 +342,13 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
59061         if (!IS_ALIGNED(nr_sectors, zone_sectors))
59062                 zone_info->nr_zones++;
59064 +       if (bdev_is_zoned(bdev) && zone_info->max_zone_append_size == 0) {
59065 +               btrfs_err(fs_info, "zoned: device %pg does not support zone append",
59066 +                         bdev);
59067 +               ret = -EINVAL;
59068 +               goto out;
59069 +       }
59071         zone_info->seq_zones = bitmap_zalloc(zone_info->nr_zones, GFP_KERNEL);
59072         if (!zone_info->seq_zones) {
59073                 ret = -ENOMEM;
59074 @@ -1119,6 +1126,11 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
59075                         goto out;
59076                 }
59078 +               if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) {
59079 +                       ret = -EIO;
59080 +                       goto out;
59081 +               }
59083                 switch (zone.cond) {
59084                 case BLK_ZONE_COND_OFFLINE:
59085                 case BLK_ZONE_COND_READONLY:
59086 diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
59087 index 8e9626d63976..14418b02c189 100644
59088 --- a/fs/btrfs/zstd.c
59089 +++ b/fs/btrfs/zstd.c
59090 @@ -28,10 +28,10 @@
59091  /* 307s to avoid pathologically clashing with transaction commit */
59092  #define ZSTD_BTRFS_RECLAIM_JIFFIES (307 * HZ)
59094 -static ZSTD_parameters zstd_get_btrfs_parameters(unsigned int level,
59095 +static zstd_parameters zstd_get_btrfs_parameters(unsigned int level,
59096                                                  size_t src_len)
59098 -       ZSTD_parameters params = ZSTD_getParams(level, src_len, 0);
59099 +       zstd_parameters params = zstd_get_params(level, src_len);
59101         if (params.cParams.windowLog > ZSTD_BTRFS_MAX_WINDOWLOG)
59102                 params.cParams.windowLog = ZSTD_BTRFS_MAX_WINDOWLOG;
59103 @@ -48,8 +48,8 @@ struct workspace {
59104         unsigned long last_used; /* jiffies */
59105         struct list_head list;
59106         struct list_head lru_list;
59107 -       ZSTD_inBuffer in_buf;
59108 -       ZSTD_outBuffer out_buf;
59109 +       zstd_in_buffer in_buf;
59110 +       zstd_out_buffer out_buf;
59111  };
59113  /*
59114 @@ -155,12 +155,12 @@ static void zstd_calc_ws_mem_sizes(void)
59115         unsigned int level;
59117         for (level = 1; level <= ZSTD_BTRFS_MAX_LEVEL; level++) {
59118 -               ZSTD_parameters params =
59119 +               zstd_parameters params =
59120                         zstd_get_btrfs_parameters(level, ZSTD_BTRFS_MAX_INPUT);
59121                 size_t level_size =
59122                         max_t(size_t,
59123 -                             ZSTD_CStreamWorkspaceBound(params.cParams),
59124 -                             ZSTD_DStreamWorkspaceBound(ZSTD_BTRFS_MAX_INPUT));
59125 +                             zstd_cstream_workspace_bound(&params.cParams),
59126 +                             zstd_dstream_workspace_bound(ZSTD_BTRFS_MAX_INPUT));
59128                 max_size = max_t(size_t, max_size, level_size);
59129                 zstd_ws_mem_sizes[level - 1] = max_size;
59130 @@ -371,7 +371,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
59131                 unsigned long *total_in, unsigned long *total_out)
59133         struct workspace *workspace = list_entry(ws, struct workspace, list);
59134 -       ZSTD_CStream *stream;
59135 +       zstd_cstream *stream;
59136         int ret = 0;
59137         int nr_pages = 0;
59138         struct page *in_page = NULL;  /* The current page to read */
59139 @@ -381,7 +381,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
59140         unsigned long len = *total_out;
59141         const unsigned long nr_dest_pages = *out_pages;
59142         unsigned long max_out = nr_dest_pages * PAGE_SIZE;
59143 -       ZSTD_parameters params = zstd_get_btrfs_parameters(workspace->req_level,
59144 +       zstd_parameters params = zstd_get_btrfs_parameters(workspace->req_level,
59145                                                            len);
59147         *out_pages = 0;
59148 @@ -389,10 +389,10 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
59149         *total_in = 0;
59151         /* Initialize the stream */
59152 -       stream = ZSTD_initCStream(params, len, workspace->mem,
59153 +       stream = zstd_init_cstream(&params, len, workspace->mem,
59154                         workspace->size);
59155         if (!stream) {
59156 -               pr_warn("BTRFS: ZSTD_initCStream failed\n");
59157 +               pr_warn("BTRFS: zstd_init_cstream failed\n");
59158                 ret = -EIO;
59159                 goto out;
59160         }
59161 @@ -418,11 +418,11 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
59162         while (1) {
59163                 size_t ret2;
59165 -               ret2 = ZSTD_compressStream(stream, &workspace->out_buf,
59166 +               ret2 = zstd_compress_stream(stream, &workspace->out_buf,
59167                                 &workspace->in_buf);
59168 -               if (ZSTD_isError(ret2)) {
59169 -                       pr_debug("BTRFS: ZSTD_compressStream returned %d\n",
59170 -                                       ZSTD_getErrorCode(ret2));
59171 +               if (zstd_is_error(ret2)) {
59172 +                       pr_debug("BTRFS: zstd_compress_stream returned %d\n",
59173 +                                       zstd_get_error_code(ret2));
59174                         ret = -EIO;
59175                         goto out;
59176                 }
59177 @@ -487,10 +487,10 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
59178         while (1) {
59179                 size_t ret2;
59181 -               ret2 = ZSTD_endStream(stream, &workspace->out_buf);
59182 -               if (ZSTD_isError(ret2)) {
59183 -                       pr_debug("BTRFS: ZSTD_endStream returned %d\n",
59184 -                                       ZSTD_getErrorCode(ret2));
59185 +               ret2 = zstd_end_stream(stream, &workspace->out_buf);
59186 +               if (zstd_is_error(ret2)) {
59187 +                       pr_debug("BTRFS: zstd_end_stream returned %d\n",
59188 +                                       zstd_get_error_code(ret2));
59189                         ret = -EIO;
59190                         goto out;
59191                 }
59192 @@ -550,17 +550,17 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
59193         u64 disk_start = cb->start;
59194         struct bio *orig_bio = cb->orig_bio;
59195         size_t srclen = cb->compressed_len;
59196 -       ZSTD_DStream *stream;
59197 +       zstd_dstream *stream;
59198         int ret = 0;
59199         unsigned long page_in_index = 0;
59200         unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
59201         unsigned long buf_start;
59202         unsigned long total_out = 0;
59204 -       stream = ZSTD_initDStream(
59205 +       stream = zstd_init_dstream(
59206                         ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
59207         if (!stream) {
59208 -               pr_debug("BTRFS: ZSTD_initDStream failed\n");
59209 +               pr_debug("BTRFS: zstd_init_dstream failed\n");
59210                 ret = -EIO;
59211                 goto done;
59212         }
59213 @@ -576,11 +576,11 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
59214         while (1) {
59215                 size_t ret2;
59217 -               ret2 = ZSTD_decompressStream(stream, &workspace->out_buf,
59218 +               ret2 = zstd_decompress_stream(stream, &workspace->out_buf,
59219                                 &workspace->in_buf);
59220 -               if (ZSTD_isError(ret2)) {
59221 -                       pr_debug("BTRFS: ZSTD_decompressStream returned %d\n",
59222 -                                       ZSTD_getErrorCode(ret2));
59223 +               if (zstd_is_error(ret2)) {
59224 +                       pr_debug("BTRFS: zstd_decompress_stream returned %d\n",
59225 +                                       zstd_get_error_code(ret2));
59226                         ret = -EIO;
59227                         goto done;
59228                 }
59229 @@ -626,17 +626,17 @@ int zstd_decompress(struct list_head *ws, unsigned char *data_in,
59230                 size_t destlen)
59232         struct workspace *workspace = list_entry(ws, struct workspace, list);
59233 -       ZSTD_DStream *stream;
59234 +       zstd_dstream *stream;
59235         int ret = 0;
59236         size_t ret2;
59237         unsigned long total_out = 0;
59238         unsigned long pg_offset = 0;
59239         char *kaddr;
59241 -       stream = ZSTD_initDStream(
59242 +       stream = zstd_init_dstream(
59243                         ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
59244         if (!stream) {
59245 -               pr_warn("BTRFS: ZSTD_initDStream failed\n");
59246 +               pr_warn("BTRFS: zstd_init_dstream failed\n");
59247                 ret = -EIO;
59248                 goto finish;
59249         }
59250 @@ -660,15 +660,15 @@ int zstd_decompress(struct list_head *ws, unsigned char *data_in,
59252                 /* Check if the frame is over and we still need more input */
59253                 if (ret2 == 0) {
59254 -                       pr_debug("BTRFS: ZSTD_decompressStream ended early\n");
59255 +                       pr_debug("BTRFS: zstd_decompress_stream ended early\n");
59256                         ret = -EIO;
59257                         goto finish;
59258                 }
59259 -               ret2 = ZSTD_decompressStream(stream, &workspace->out_buf,
59260 +               ret2 = zstd_decompress_stream(stream, &workspace->out_buf,
59261                                 &workspace->in_buf);
59262 -               if (ZSTD_isError(ret2)) {
59263 -                       pr_debug("BTRFS: ZSTD_decompressStream returned %d\n",
59264 -                                       ZSTD_getErrorCode(ret2));
59265 +               if (zstd_is_error(ret2)) {
59266 +                       pr_debug("BTRFS: zstd_decompress_stream returned %d\n",
59267 +                                       zstd_get_error_code(ret2));
59268                         ret = -EIO;
59269                         goto finish;
59270                 }
59271 diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
59272 index 570731c4d019..d405ba801492 100644
59273 --- a/fs/ceph/caps.c
59274 +++ b/fs/ceph/caps.c
59275 @@ -1867,6 +1867,7 @@ static int try_nonblocking_invalidate(struct inode *inode)
59276         u32 invalidating_gen = ci->i_rdcache_gen;
59278         spin_unlock(&ci->i_ceph_lock);
59279 +       ceph_fscache_invalidate(inode);
59280         invalidate_mapping_pages(&inode->i_data, 0, -1);
59281         spin_lock(&ci->i_ceph_lock);
59283 diff --git a/fs/ceph/export.c b/fs/ceph/export.c
59284 index e088843a7734..042bb4a02c0a 100644
59285 --- a/fs/ceph/export.c
59286 +++ b/fs/ceph/export.c
59287 @@ -129,6 +129,10 @@ static struct inode *__lookup_inode(struct super_block *sb, u64 ino)
59289         vino.ino = ino;
59290         vino.snap = CEPH_NOSNAP;
59292 +       if (ceph_vino_is_reserved(vino))
59293 +               return ERR_PTR(-ESTALE);
59295         inode = ceph_find_inode(sb, vino);
59296         if (!inode) {
59297                 struct ceph_mds_request *req;
59298 @@ -178,8 +182,10 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino)
59299                 return ERR_CAST(inode);
59300         /* We need LINK caps to reliably check i_nlink */
59301         err = ceph_do_getattr(inode, CEPH_CAP_LINK_SHARED, false);
59302 -       if (err)
59303 +       if (err) {
59304 +               iput(inode);
59305                 return ERR_PTR(err);
59306 +       }
59307         /* -ESTALE if inode as been unlinked and no file is open */
59308         if ((inode->i_nlink == 0) && (atomic_read(&inode->i_count) == 1)) {
59309                 iput(inode);
59310 @@ -212,6 +218,10 @@ static struct dentry *__snapfh_to_dentry(struct super_block *sb,
59311                 vino.ino = sfh->ino;
59312                 vino.snap = sfh->snapid;
59313         }
59315 +       if (ceph_vino_is_reserved(vino))
59316 +               return ERR_PTR(-ESTALE);
59318         inode = ceph_find_inode(sb, vino);
59319         if (inode)
59320                 return d_obtain_alias(inode);
59321 diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
59322 index 156f849f5385..179d2ef69a24 100644
59323 --- a/fs/ceph/inode.c
59324 +++ b/fs/ceph/inode.c
59325 @@ -56,6 +56,9 @@ struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
59327         struct inode *inode;
59329 +       if (ceph_vino_is_reserved(vino))
59330 +               return ERR_PTR(-EREMOTEIO);
59332         inode = iget5_locked(sb, (unsigned long)vino.ino, ceph_ino_compare,
59333                              ceph_set_ino_cb, &vino);
59334         if (!inode)
59335 @@ -87,14 +90,15 @@ struct inode *ceph_get_snapdir(struct inode *parent)
59336         inode->i_mtime = parent->i_mtime;
59337         inode->i_ctime = parent->i_ctime;
59338         inode->i_atime = parent->i_atime;
59339 -       inode->i_op = &ceph_snapdir_iops;
59340 -       inode->i_fop = &ceph_snapdir_fops;
59341 -       ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
59342         ci->i_rbytes = 0;
59343         ci->i_btime = ceph_inode(parent)->i_btime;
59345 -       if (inode->i_state & I_NEW)
59346 +       if (inode->i_state & I_NEW) {
59347 +               inode->i_op = &ceph_snapdir_iops;
59348 +               inode->i_fop = &ceph_snapdir_fops;
59349 +               ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
59350                 unlock_new_inode(inode);
59351 +       }
59353         return inode;
59355 @@ -1863,6 +1867,7 @@ static void ceph_do_invalidate_pages(struct inode *inode)
59356         orig_gen = ci->i_rdcache_gen;
59357         spin_unlock(&ci->i_ceph_lock);
59359 +       ceph_fscache_invalidate(inode);
59360         if (invalidate_inode_pages2(inode->i_mapping) < 0) {
59361                 pr_err("invalidate_pages %p fails\n", inode);
59362         }
59363 diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
59364 index d87bd852ed96..298cb0b3d28c 100644
59365 --- a/fs/ceph/mds_client.c
59366 +++ b/fs/ceph/mds_client.c
59367 @@ -433,6 +433,13 @@ static int ceph_parse_deleg_inos(void **p, void *end,
59369                 ceph_decode_64_safe(p, end, start, bad);
59370                 ceph_decode_64_safe(p, end, len, bad);
59372 +               /* Don't accept a delegation of system inodes */
59373 +               if (start < CEPH_INO_SYSTEM_BASE) {
59374 +                       pr_warn_ratelimited("ceph: ignoring reserved inode range delegation (start=0x%llx len=0x%llx)\n",
59375 +                                       start, len);
59376 +                       continue;
59377 +               }
59378                 while (len--) {
59379                         int err = xa_insert(&s->s_delegated_inos, ino = start++,
59380                                             DELEGATED_INO_AVAILABLE,
59381 diff --git a/fs/ceph/super.h b/fs/ceph/super.h
59382 index c48bb30c8d70..1d2fe70439bd 100644
59383 --- a/fs/ceph/super.h
59384 +++ b/fs/ceph/super.h
59385 @@ -529,10 +529,34 @@ static inline int ceph_ino_compare(struct inode *inode, void *data)
59386                 ci->i_vino.snap == pvino->snap;
59390 + * The MDS reserves a set of inodes for its own usage. These should never
59391 + * be accessible by clients, and so the MDS has no reason to ever hand these
59392 + * out. The range is CEPH_MDS_INO_MDSDIR_OFFSET..CEPH_INO_SYSTEM_BASE.
59393 + *
59394 + * These come from src/mds/mdstypes.h in the ceph sources.
59395 + */
59396 +#define CEPH_MAX_MDS           0x100
59397 +#define CEPH_NUM_STRAY         10
59398 +#define CEPH_MDS_INO_MDSDIR_OFFSET     (1 * CEPH_MAX_MDS)
59399 +#define CEPH_INO_SYSTEM_BASE           ((6*CEPH_MAX_MDS) + (CEPH_MAX_MDS * CEPH_NUM_STRAY))
59401 +static inline bool ceph_vino_is_reserved(const struct ceph_vino vino)
59403 +       if (vino.ino < CEPH_INO_SYSTEM_BASE &&
59404 +           vino.ino >= CEPH_MDS_INO_MDSDIR_OFFSET) {
59405 +               WARN_RATELIMIT(1, "Attempt to access reserved inode number 0x%llx", vino.ino);
59406 +               return true;
59407 +       }
59408 +       return false;
59411  static inline struct inode *ceph_find_inode(struct super_block *sb,
59412                                             struct ceph_vino vino)
59414 +       if (ceph_vino_is_reserved(vino))
59415 +               return NULL;
59417         /*
59418          * NB: The hashval will be run through the fs/inode.c hash function
59419          * anyway, so there is no need to squash the inode number down to
59420 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
59421 index 5ddd20b62484..fa896a1c8b07 100644
59422 --- a/fs/cifs/cifsfs.c
59423 +++ b/fs/cifs/cifsfs.c
59424 @@ -834,7 +834,7 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
59425                 goto out;
59426         }
59428 -       rc = cifs_setup_volume_info(cifs_sb->ctx, NULL, old_ctx->UNC);
59429 +       rc = cifs_setup_volume_info(cifs_sb->ctx, NULL, NULL);
59430         if (rc) {
59431                 root = ERR_PTR(rc);
59432                 goto out;
59433 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
59434 index 24668eb006c6..3d62d52d730b 100644
59435 --- a/fs/cifs/connect.c
59436 +++ b/fs/cifs/connect.c
59437 @@ -488,6 +488,7 @@ server_unresponsive(struct TCP_Server_Info *server)
59438          */
59439         if ((server->tcpStatus == CifsGood ||
59440             server->tcpStatus == CifsNeedNegotiate) &&
59441 +           (!server->ops->can_echo || server->ops->can_echo(server)) &&
59442             time_after(jiffies, server->lstrp + 3 * server->echo_interval)) {
59443                 cifs_server_dbg(VFS, "has not responded in %lu seconds. Reconnecting...\n",
59444                          (3 * server->echo_interval) / HZ);
59445 @@ -3175,17 +3176,29 @@ static int do_dfs_failover(const char *path, const char *full_path, struct cifs_
59446  int
59447  cifs_setup_volume_info(struct smb3_fs_context *ctx, const char *mntopts, const char *devname)
59449 -       int rc = 0;
59450 +       int rc;
59452 -       smb3_parse_devname(devname, ctx);
59453 +       if (devname) {
59454 +               cifs_dbg(FYI, "%s: devname=%s\n", __func__, devname);
59455 +               rc = smb3_parse_devname(devname, ctx);
59456 +               if (rc) {
59457 +                       cifs_dbg(VFS, "%s: failed to parse %s: %d\n", __func__, devname, rc);
59458 +                       return rc;
59459 +               }
59460 +       }
59462         if (mntopts) {
59463                 char *ip;
59465 -               cifs_dbg(FYI, "%s: mntopts=%s\n", __func__, mntopts);
59466                 rc = smb3_parse_opt(mntopts, "ip", &ip);
59467 -               if (!rc && !cifs_convert_address((struct sockaddr *)&ctx->dstaddr, ip,
59468 -                                                strlen(ip))) {
59469 +               if (rc) {
59470 +                       cifs_dbg(VFS, "%s: failed to parse ip options: %d\n", __func__, rc);
59471 +                       return rc;
59472 +               }
59474 +               rc = cifs_convert_address((struct sockaddr *)&ctx->dstaddr, ip, strlen(ip));
59475 +               kfree(ip);
59476 +               if (!rc) {
59477                         cifs_dbg(VFS, "%s: failed to convert ip address\n", __func__);
59478                         return -EINVAL;
59479                 }
59480 @@ -3205,7 +3218,7 @@ cifs_setup_volume_info(struct smb3_fs_context *ctx, const char *mntopts, const c
59481                 return -EINVAL;
59482         }
59484 -       return rc;
59485 +       return 0;
59488  static int
59489 diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
59490 index 78889024a7ed..a7253eb2e955 100644
59491 --- a/fs/cifs/fs_context.c
59492 +++ b/fs/cifs/fs_context.c
59493 @@ -475,6 +475,7 @@ smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx)
59495         /* move "pos" up to delimiter or NULL */
59496         pos += len;
59497 +       kfree(ctx->UNC);
59498         ctx->UNC = kstrndup(devname, pos - devname, GFP_KERNEL);
59499         if (!ctx->UNC)
59500                 return -ENOMEM;
59501 @@ -485,6 +486,9 @@ smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx)
59502         if (*pos == '/' || *pos == '\\')
59503                 pos++;
59505 +       kfree(ctx->prepath);
59506 +       ctx->prepath = NULL;
59508         /* If pos is NULL then no prepath */
59509         if (!*pos)
59510                 return 0;
59511 @@ -995,6 +999,9 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
59512                         goto cifs_parse_mount_err;
59513                 }
59514                 ctx->max_channels = result.uint_32;
59515 +               /* If more than one channel requested ... they want multichan */
59516 +               if (result.uint_32 > 1)
59517 +                       ctx->multichannel = true;
59518                 break;
59519         case Opt_handletimeout:
59520                 ctx->handle_timeout = result.uint_32;
59521 diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
59522 index 63d517b9f2ff..a92a1fb7cb52 100644
59523 --- a/fs/cifs/sess.c
59524 +++ b/fs/cifs/sess.c
59525 @@ -97,6 +97,12 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
59526                 return 0;
59527         }
59529 +       if (!(ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
59530 +               cifs_dbg(VFS, "server %s does not support multichannel\n", ses->server->hostname);
59531 +               ses->chan_max = 1;
59532 +               return 0;
59533 +       }
59535         /*
59536          * Make a copy of the iface list at the time and use that
59537          * instead so as to not hold the iface spinlock for opening
59538 diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
59539 index f703204fb185..5df6daacc230 100644
59540 --- a/fs/cifs/smb2ops.c
59541 +++ b/fs/cifs/smb2ops.c
59542 @@ -1763,18 +1763,14 @@ smb2_ioctl_query_info(const unsigned int xid,
59543         }
59545   iqinf_exit:
59546 -       kfree(vars);
59547 -       kfree(buffer);
59548 -       SMB2_open_free(&rqst[0]);
59549 -       if (qi.flags & PASSTHRU_FSCTL)
59550 -               SMB2_ioctl_free(&rqst[1]);
59551 -       else
59552 -               SMB2_query_info_free(&rqst[1]);
59554 -       SMB2_close_free(&rqst[2]);
59555 +       cifs_small_buf_release(rqst[0].rq_iov[0].iov_base);
59556 +       cifs_small_buf_release(rqst[1].rq_iov[0].iov_base);
59557 +       cifs_small_buf_release(rqst[2].rq_iov[0].iov_base);
59558         free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
59559         free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
59560         free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
59561 +       kfree(vars);
59562 +       kfree(buffer);
59563         return rc;
59565  e_fault:
59566 @@ -2232,7 +2228,7 @@ smb3_notify(const unsigned int xid, struct file *pfile,
59568         cifs_sb = CIFS_SB(inode->i_sb);
59570 -       utf16_path = cifs_convert_path_to_utf16(path + 1, cifs_sb);
59571 +       utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
59572         if (utf16_path == NULL) {
59573                 rc = -ENOMEM;
59574                 goto notify_exit;
59575 @@ -4178,7 +4174,7 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
59576         }
59577         spin_unlock(&cifs_tcp_ses_lock);
59579 -       return 1;
59580 +       return -EAGAIN;
59582  /*
59583   * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
59584 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
59585 index 2199a9bfae8f..29272d99102c 100644
59586 --- a/fs/cifs/smb2pdu.c
59587 +++ b/fs/cifs/smb2pdu.c
59588 @@ -841,6 +841,8 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
59589                 req->SecurityMode = 0;
59591         req->Capabilities = cpu_to_le32(server->vals->req_capabilities);
59592 +       if (ses->chan_max > 1)
59593 +               req->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
59595         /* ClientGUID must be zero for SMB2.02 dialect */
59596         if (server->vals->protocol_id == SMB20_PROT_ID)
59597 @@ -1032,6 +1034,9 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
59599         pneg_inbuf->Capabilities =
59600                         cpu_to_le32(server->vals->req_capabilities);
59601 +       if (tcon->ses->chan_max > 1)
59602 +               pneg_inbuf->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
59604         memcpy(pneg_inbuf->Guid, server->client_guid,
59605                                         SMB2_CLIENT_GUID_SIZE);
59607 diff --git a/fs/dax.c b/fs/dax.c
59608 index b3d27fdc6775..df5485b4bddf 100644
59609 --- a/fs/dax.c
59610 +++ b/fs/dax.c
59611 @@ -144,6 +144,16 @@ struct wait_exceptional_entry_queue {
59612         struct exceptional_entry_key key;
59613  };
59615 +/**
59616 + * enum dax_wake_mode: waitqueue wakeup behaviour
59617 + * @WAKE_ALL: wake all waiters in the waitqueue
59618 + * @WAKE_NEXT: wake only the first waiter in the waitqueue
59619 + */
59620 +enum dax_wake_mode {
59621 +       WAKE_ALL,
59622 +       WAKE_NEXT,
59625  static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
59626                 void *entry, struct exceptional_entry_key *key)
59628 @@ -182,7 +192,8 @@ static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
59629   * The important information it's conveying is whether the entry at
59630   * this index used to be a PMD entry.
59631   */
59632 -static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
59633 +static void dax_wake_entry(struct xa_state *xas, void *entry,
59634 +                          enum dax_wake_mode mode)
59636         struct exceptional_entry_key key;
59637         wait_queue_head_t *wq;
59638 @@ -196,7 +207,7 @@ static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
59639          * must be in the waitqueue and the following check will see them.
59640          */
59641         if (waitqueue_active(wq))
59642 -               __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
59643 +               __wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key);
59646  /*
59647 @@ -264,11 +275,11 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
59648         finish_wait(wq, &ewait.wait);
59651 -static void put_unlocked_entry(struct xa_state *xas, void *entry)
59652 +static void put_unlocked_entry(struct xa_state *xas, void *entry,
59653 +                              enum dax_wake_mode mode)
59655 -       /* If we were the only waiter woken, wake the next one */
59656         if (entry && !dax_is_conflict(entry))
59657 -               dax_wake_entry(xas, entry, false);
59658 +               dax_wake_entry(xas, entry, mode);
59661  /*
59662 @@ -286,7 +297,7 @@ static void dax_unlock_entry(struct xa_state *xas, void *entry)
59663         old = xas_store(xas, entry);
59664         xas_unlock_irq(xas);
59665         BUG_ON(!dax_is_locked(old));
59666 -       dax_wake_entry(xas, entry, false);
59667 +       dax_wake_entry(xas, entry, WAKE_NEXT);
59670  /*
59671 @@ -524,7 +535,7 @@ static void *grab_mapping_entry(struct xa_state *xas,
59673                 dax_disassociate_entry(entry, mapping, false);
59674                 xas_store(xas, NULL);   /* undo the PMD join */
59675 -               dax_wake_entry(xas, entry, true);
59676 +               dax_wake_entry(xas, entry, WAKE_ALL);
59677                 mapping->nrexceptional--;
59678                 entry = NULL;
59679                 xas_set(xas, index);
59680 @@ -622,7 +633,7 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping,
59681                         entry = get_unlocked_entry(&xas, 0);
59682                 if (entry)
59683                         page = dax_busy_page(entry);
59684 -               put_unlocked_entry(&xas, entry);
59685 +               put_unlocked_entry(&xas, entry, WAKE_NEXT);
59686                 if (page)
59687                         break;
59688                 if (++scanned % XA_CHECK_SCHED)
59689 @@ -664,7 +675,7 @@ static int __dax_invalidate_entry(struct address_space *mapping,
59690         mapping->nrexceptional--;
59691         ret = 1;
59692  out:
59693 -       put_unlocked_entry(&xas, entry);
59694 +       put_unlocked_entry(&xas, entry, WAKE_ALL);
59695         xas_unlock_irq(&xas);
59696         return ret;
59698 @@ -937,13 +948,13 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
59699         xas_lock_irq(xas);
59700         xas_store(xas, entry);
59701         xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
59702 -       dax_wake_entry(xas, entry, false);
59703 +       dax_wake_entry(xas, entry, WAKE_NEXT);
59705         trace_dax_writeback_one(mapping->host, index, count);
59706         return ret;
59708   put_unlocked:
59709 -       put_unlocked_entry(xas, entry);
59710 +       put_unlocked_entry(xas, entry, WAKE_NEXT);
59711         return ret;
59714 @@ -1684,7 +1695,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
59715         /* Did we race with someone splitting entry or so? */
59716         if (!entry || dax_is_conflict(entry) ||
59717             (order == 0 && !dax_is_pte_entry(entry))) {
59718 -               put_unlocked_entry(&xas, entry);
59719 +               put_unlocked_entry(&xas, entry, WAKE_NEXT);
59720                 xas_unlock_irq(&xas);
59721                 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
59722                                                       VM_FAULT_NOPAGE);
59723 diff --git a/fs/dcache.c b/fs/dcache.c
59724 index 7d24ff7eb206..9deb97404201 100644
59725 --- a/fs/dcache.c
59726 +++ b/fs/dcache.c
59727 @@ -71,7 +71,7 @@
59728   * If no ancestor relationship:
59729   * arbitrary, since it's serialized on rename_lock
59730   */
59731 -int sysctl_vfs_cache_pressure __read_mostly = 100;
59732 +int sysctl_vfs_cache_pressure __read_mostly = 50;
59733  EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
59735  __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
59736 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
59737 index 22e86ae4dd5a..1d252164d97b 100644
59738 --- a/fs/debugfs/inode.c
59739 +++ b/fs/debugfs/inode.c
59740 @@ -35,7 +35,7 @@
59741  static struct vfsmount *debugfs_mount;
59742  static int debugfs_mount_count;
59743  static bool debugfs_registered;
59744 -static unsigned int debugfs_allow = DEFAULT_DEBUGFS_ALLOW_BITS;
59745 +static unsigned int debugfs_allow __ro_after_init = DEFAULT_DEBUGFS_ALLOW_BITS;
59747  /*
59748   * Don't allow access attributes to be changed whilst the kernel is locked down
59749 diff --git a/fs/dlm/config.c b/fs/dlm/config.c
59750 index 49c5f9407098..88d95d96e36c 100644
59751 --- a/fs/dlm/config.c
59752 +++ b/fs/dlm/config.c
59753 @@ -125,7 +125,7 @@ static ssize_t cluster_cluster_name_store(struct config_item *item,
59754  CONFIGFS_ATTR(cluster_, cluster_name);
59756  static ssize_t cluster_set(struct dlm_cluster *cl, unsigned int *cl_field,
59757 -                          int *info_field, bool (*check_cb)(unsigned int x),
59758 +                          int *info_field, int (*check_cb)(unsigned int x),
59759                            const char *buf, size_t len)
59761         unsigned int x;
59762 @@ -137,8 +137,11 @@ static ssize_t cluster_set(struct dlm_cluster *cl, unsigned int *cl_field,
59763         if (rc)
59764                 return rc;
59766 -       if (check_cb && check_cb(x))
59767 -               return -EINVAL;
59768 +       if (check_cb) {
59769 +               rc = check_cb(x);
59770 +               if (rc)
59771 +                       return rc;
59772 +       }
59774         *cl_field = x;
59775         *info_field = x;
59776 @@ -161,17 +164,53 @@ static ssize_t cluster_##name##_show(struct config_item *item, char *buf)     \
59777  }                                                                             \
59778  CONFIGFS_ATTR(cluster_, name);
59780 -static bool dlm_check_zero(unsigned int x)
59781 +static int dlm_check_protocol_and_dlm_running(unsigned int x)
59783 +       switch (x) {
59784 +       case 0:
59785 +               /* TCP */
59786 +               break;
59787 +       case 1:
59788 +               /* SCTP */
59789 +               break;
59790 +       default:
59791 +               return -EINVAL;
59792 +       }
59794 +       if (dlm_allow_conn)
59795 +               return -EBUSY;
59797 +       return 0;
59800 +static int dlm_check_zero_and_dlm_running(unsigned int x)
59802 +       if (!x)
59803 +               return -EINVAL;
59805 +       if (dlm_allow_conn)
59806 +               return -EBUSY;
59808 +       return 0;
59811 +static int dlm_check_zero(unsigned int x)
59813 -       return !x;
59814 +       if (!x)
59815 +               return -EINVAL;
59817 +       return 0;
59820 -static bool dlm_check_buffer_size(unsigned int x)
59821 +static int dlm_check_buffer_size(unsigned int x)
59823 -       return (x < DEFAULT_BUFFER_SIZE);
59824 +       if (x < DEFAULT_BUFFER_SIZE)
59825 +               return -EINVAL;
59827 +       return 0;
59830 -CLUSTER_ATTR(tcp_port, dlm_check_zero);
59831 +CLUSTER_ATTR(tcp_port, dlm_check_zero_and_dlm_running);
59832  CLUSTER_ATTR(buffer_size, dlm_check_buffer_size);
59833  CLUSTER_ATTR(rsbtbl_size, dlm_check_zero);
59834  CLUSTER_ATTR(recover_timer, dlm_check_zero);
59835 @@ -179,7 +218,7 @@ CLUSTER_ATTR(toss_secs, dlm_check_zero);
59836  CLUSTER_ATTR(scan_secs, dlm_check_zero);
59837  CLUSTER_ATTR(log_debug, NULL);
59838  CLUSTER_ATTR(log_info, NULL);
59839 -CLUSTER_ATTR(protocol, NULL);
59840 +CLUSTER_ATTR(protocol, dlm_check_protocol_and_dlm_running);
59841  CLUSTER_ATTR(mark, NULL);
59842  CLUSTER_ATTR(timewarn_cs, dlm_check_zero);
59843  CLUSTER_ATTR(waitwarn_us, NULL);
59844 @@ -688,6 +727,7 @@ static ssize_t comm_mark_show(struct config_item *item, char *buf)
59845  static ssize_t comm_mark_store(struct config_item *item, const char *buf,
59846                                size_t len)
59848 +       struct dlm_comm *comm;
59849         unsigned int mark;
59850         int rc;
59852 @@ -695,7 +735,15 @@ static ssize_t comm_mark_store(struct config_item *item, const char *buf,
59853         if (rc)
59854                 return rc;
59856 -       config_item_to_comm(item)->mark = mark;
59857 +       if (mark == 0)
59858 +               mark = dlm_config.ci_mark;
59860 +       comm = config_item_to_comm(item);
59861 +       rc = dlm_lowcomms_nodes_set_mark(comm->nodeid, mark);
59862 +       if (rc)
59863 +               return rc;
59865 +       comm->mark = mark;
59866         return len;
59869 @@ -870,24 +918,6 @@ int dlm_comm_seq(int nodeid, uint32_t *seq)
59870         return 0;
59873 -void dlm_comm_mark(int nodeid, unsigned int *mark)
59875 -       struct dlm_comm *cm;
59877 -       cm = get_comm(nodeid);
59878 -       if (!cm) {
59879 -               *mark = dlm_config.ci_mark;
59880 -               return;
59881 -       }
59883 -       if (cm->mark)
59884 -               *mark = cm->mark;
59885 -       else
59886 -               *mark = dlm_config.ci_mark;
59888 -       put_comm(cm);
59891  int dlm_our_nodeid(void)
59893         return local_comm ? local_comm->nodeid : 0;
59894 diff --git a/fs/dlm/config.h b/fs/dlm/config.h
59895 index c210250a2581..d2cd4bd20313 100644
59896 --- a/fs/dlm/config.h
59897 +++ b/fs/dlm/config.h
59898 @@ -48,7 +48,6 @@ void dlm_config_exit(void);
59899  int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out,
59900                      int *count_out);
59901  int dlm_comm_seq(int nodeid, uint32_t *seq);
59902 -void dlm_comm_mark(int nodeid, unsigned int *mark);
59903  int dlm_our_nodeid(void);
59904  int dlm_our_addr(struct sockaddr_storage *addr, int num);
59906 diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
59907 index d6bbccb0ed15..d5bd990bcab8 100644
59908 --- a/fs/dlm/debug_fs.c
59909 +++ b/fs/dlm/debug_fs.c
59910 @@ -542,6 +542,7 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
59912                 if (bucket >= ls->ls_rsbtbl_size) {
59913                         kfree(ri);
59914 +                       ++*pos;
59915                         return NULL;
59916                 }
59917                 tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep;
59918 diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
59919 index 561dcad08ad6..c14cf2b7faab 100644
59920 --- a/fs/dlm/lockspace.c
59921 +++ b/fs/dlm/lockspace.c
59922 @@ -404,12 +404,6 @@ static int threads_start(void)
59923         return error;
59926 -static void threads_stop(void)
59928 -       dlm_scand_stop();
59929 -       dlm_lowcomms_stop();
59932  static int new_lockspace(const char *name, const char *cluster,
59933                          uint32_t flags, int lvblen,
59934                          const struct dlm_lockspace_ops *ops, void *ops_arg,
59935 @@ -702,8 +696,11 @@ int dlm_new_lockspace(const char *name, const char *cluster,
59936                 ls_count++;
59937         if (error > 0)
59938                 error = 0;
59939 -       if (!ls_count)
59940 -               threads_stop();
59941 +       if (!ls_count) {
59942 +               dlm_scand_stop();
59943 +               dlm_lowcomms_shutdown();
59944 +               dlm_lowcomms_stop();
59945 +       }
59946   out:
59947         mutex_unlock(&ls_lock);
59948         return error;
59949 @@ -788,6 +785,11 @@ static int release_lockspace(struct dlm_ls *ls, int force)
59951         dlm_recoverd_stop(ls);
59953 +       if (ls_count == 1) {
59954 +               dlm_scand_stop();
59955 +               dlm_lowcomms_shutdown();
59956 +       }
59958         dlm_callback_stop(ls);
59960         remove_lockspace(ls);
59961 @@ -880,7 +882,7 @@ int dlm_release_lockspace(void *lockspace, int force)
59962         if (!error)
59963                 ls_count--;
59964         if (!ls_count)
59965 -               threads_stop();
59966 +               dlm_lowcomms_stop();
59967         mutex_unlock(&ls_lock);
59969         return error;
59970 diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
59971 index 372c34ff8594..45c2fdaf34c4 100644
59972 --- a/fs/dlm/lowcomms.c
59973 +++ b/fs/dlm/lowcomms.c
59974 @@ -116,6 +116,7 @@ struct writequeue_entry {
59975  struct dlm_node_addr {
59976         struct list_head list;
59977         int nodeid;
59978 +       int mark;
59979         int addr_count;
59980         int curr_addr_index;
59981         struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
59982 @@ -134,7 +135,7 @@ static DEFINE_SPINLOCK(dlm_node_addrs_spin);
59983  static struct listen_connection listen_con;
59984  static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT];
59985  static int dlm_local_count;
59986 -static int dlm_allow_conn;
59987 +int dlm_allow_conn;
59989  /* Work queues */
59990  static struct workqueue_struct *recv_workqueue;
59991 @@ -303,7 +304,8 @@ static int addr_compare(const struct sockaddr_storage *x,
59994  static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
59995 -                         struct sockaddr *sa_out, bool try_new_addr)
59996 +                         struct sockaddr *sa_out, bool try_new_addr,
59997 +                         unsigned int *mark)
59999         struct sockaddr_storage sas;
60000         struct dlm_node_addr *na;
60001 @@ -331,6 +333,8 @@ static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
60002         if (!na->addr_count)
60003                 return -ENOENT;
60005 +       *mark = na->mark;
60007         if (sas_out)
60008                 memcpy(sas_out, &sas, sizeof(struct sockaddr_storage));
60010 @@ -350,7 +354,8 @@ static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
60011         return 0;
60014 -static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid)
60015 +static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid,
60016 +                         unsigned int *mark)
60018         struct dlm_node_addr *na;
60019         int rv = -EEXIST;
60020 @@ -364,6 +369,7 @@ static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid)
60021                 for (addr_i = 0; addr_i < na->addr_count; addr_i++) {
60022                         if (addr_compare(na->addr[addr_i], addr)) {
60023                                 *nodeid = na->nodeid;
60024 +                               *mark = na->mark;
60025                                 rv = 0;
60026                                 goto unlock;
60027                         }
60028 @@ -412,6 +418,7 @@ int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
60029                 new_node->nodeid = nodeid;
60030                 new_node->addr[0] = new_addr;
60031                 new_node->addr_count = 1;
60032 +               new_node->mark = dlm_config.ci_mark;
60033                 list_add(&new_node->list, &dlm_node_addrs);
60034                 spin_unlock(&dlm_node_addrs_spin);
60035                 return 0;
60036 @@ -519,6 +526,23 @@ int dlm_lowcomms_connect_node(int nodeid)
60037         return 0;
60040 +int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark)
60042 +       struct dlm_node_addr *na;
60044 +       spin_lock(&dlm_node_addrs_spin);
60045 +       na = find_node_addr(nodeid);
60046 +       if (!na) {
60047 +               spin_unlock(&dlm_node_addrs_spin);
60048 +               return -ENOENT;
60049 +       }
60051 +       na->mark = mark;
60052 +       spin_unlock(&dlm_node_addrs_spin);
60054 +       return 0;
60057  static void lowcomms_error_report(struct sock *sk)
60059         struct connection *con;
60060 @@ -685,10 +709,7 @@ static void shutdown_connection(struct connection *con)
60062         int ret;
60064 -       if (cancel_work_sync(&con->swork)) {
60065 -               log_print("canceled swork for node %d", con->nodeid);
60066 -               clear_bit(CF_WRITE_PENDING, &con->flags);
60067 -       }
60068 +       flush_work(&con->swork);
60070         mutex_lock(&con->sock_mutex);
60071         /* nothing to shutdown */
60072 @@ -867,7 +888,7 @@ static int accept_from_sock(struct listen_connection *con)
60074         /* Get the new node's NODEID */
60075         make_sockaddr(&peeraddr, 0, &len);
60076 -       if (addr_to_nodeid(&peeraddr, &nodeid)) {
60077 +       if (addr_to_nodeid(&peeraddr, &nodeid, &mark)) {
60078                 unsigned char *b=(unsigned char *)&peeraddr;
60079                 log_print("connect from non cluster node");
60080                 print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE, 
60081 @@ -876,9 +897,6 @@ static int accept_from_sock(struct listen_connection *con)
60082                 return -1;
60083         }
60085 -       dlm_comm_mark(nodeid, &mark);
60086 -       sock_set_mark(newsock->sk, mark);
60088         log_print("got connection from %d", nodeid);
60090         /*  Check to see if we already have a connection to this node. This
60091 @@ -892,6 +910,8 @@ static int accept_from_sock(struct listen_connection *con)
60092                 goto accept_err;
60093         }
60095 +       sock_set_mark(newsock->sk, mark);
60097         mutex_lock(&newcon->sock_mutex);
60098         if (newcon->sock) {
60099                 struct connection *othercon = newcon->othercon;
60100 @@ -908,6 +928,7 @@ static int accept_from_sock(struct listen_connection *con)
60101                         result = dlm_con_init(othercon, nodeid);
60102                         if (result < 0) {
60103                                 kfree(othercon);
60104 +                               mutex_unlock(&newcon->sock_mutex);
60105                                 goto accept_err;
60106                         }
60108 @@ -1015,8 +1036,6 @@ static void sctp_connect_to_sock(struct connection *con)
60109         struct socket *sock;
60110         unsigned int mark;
60112 -       dlm_comm_mark(con->nodeid, &mark);
60114         mutex_lock(&con->sock_mutex);
60116         /* Some odd races can cause double-connects, ignore them */
60117 @@ -1029,7 +1048,7 @@ static void sctp_connect_to_sock(struct connection *con)
60118         }
60120         memset(&daddr, 0, sizeof(daddr));
60121 -       result = nodeid_to_addr(con->nodeid, &daddr, NULL, true);
60122 +       result = nodeid_to_addr(con->nodeid, &daddr, NULL, true, &mark);
60123         if (result < 0) {
60124                 log_print("no address for nodeid %d", con->nodeid);
60125                 goto out;
60126 @@ -1104,13 +1123,11 @@ static void sctp_connect_to_sock(struct connection *con)
60127  static void tcp_connect_to_sock(struct connection *con)
60129         struct sockaddr_storage saddr, src_addr;
60130 +       unsigned int mark;
60131         int addr_len;
60132         struct socket *sock = NULL;
60133 -       unsigned int mark;
60134         int result;
60136 -       dlm_comm_mark(con->nodeid, &mark);
60138         mutex_lock(&con->sock_mutex);
60139         if (con->retries++ > MAX_CONNECT_RETRIES)
60140                 goto out;
60141 @@ -1125,15 +1142,15 @@ static void tcp_connect_to_sock(struct connection *con)
60142         if (result < 0)
60143                 goto out_err;
60145 -       sock_set_mark(sock->sk, mark);
60147         memset(&saddr, 0, sizeof(saddr));
60148 -       result = nodeid_to_addr(con->nodeid, &saddr, NULL, false);
60149 +       result = nodeid_to_addr(con->nodeid, &saddr, NULL, false, &mark);
60150         if (result < 0) {
60151                 log_print("no address for nodeid %d", con->nodeid);
60152                 goto out_err;
60153         }
60155 +       sock_set_mark(sock->sk, mark);
60157         add_sock(sock, con);
60159         /* Bind to our cluster-known address connecting to avoid
60160 @@ -1355,9 +1372,11 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
60161         struct writequeue_entry *e;
60162         int offset = 0;
60164 -       if (len > LOWCOMMS_MAX_TX_BUFFER_LEN) {
60165 -               BUILD_BUG_ON(PAGE_SIZE < LOWCOMMS_MAX_TX_BUFFER_LEN);
60166 +       if (len > DEFAULT_BUFFER_SIZE ||
60167 +           len < sizeof(struct dlm_header)) {
60168 +               BUILD_BUG_ON(PAGE_SIZE < DEFAULT_BUFFER_SIZE);
60169                 log_print("failed to allocate a buffer of size %d", len);
60170 +               WARN_ON(1);
60171                 return NULL;
60172         }
60174 @@ -1589,6 +1608,29 @@ static int work_start(void)
60175         return 0;
60178 +static void shutdown_conn(struct connection *con)
60180 +       if (con->shutdown_action)
60181 +               con->shutdown_action(con);
60184 +void dlm_lowcomms_shutdown(void)
60186 +       /* Set all the flags to prevent any
60187 +        * socket activity.
60188 +        */
60189 +       dlm_allow_conn = 0;
60191 +       if (recv_workqueue)
60192 +               flush_workqueue(recv_workqueue);
60193 +       if (send_workqueue)
60194 +               flush_workqueue(send_workqueue);
60196 +       dlm_close_sock(&listen_con.sock);
60198 +       foreach_conn(shutdown_conn);
60201  static void _stop_conn(struct connection *con, bool and_other)
60203         mutex_lock(&con->sock_mutex);
60204 @@ -1610,12 +1652,6 @@ static void stop_conn(struct connection *con)
60205         _stop_conn(con, true);
60208 -static void shutdown_conn(struct connection *con)
60210 -       if (con->shutdown_action)
60211 -               con->shutdown_action(con);
60214  static void connection_release(struct rcu_head *rcu)
60216         struct connection *con = container_of(rcu, struct connection, rcu);
60217 @@ -1672,19 +1708,6 @@ static void work_flush(void)
60219  void dlm_lowcomms_stop(void)
60221 -       /* Set all the flags to prevent any
60222 -          socket activity.
60223 -       */
60224 -       dlm_allow_conn = 0;
60226 -       if (recv_workqueue)
60227 -               flush_workqueue(recv_workqueue);
60228 -       if (send_workqueue)
60229 -               flush_workqueue(send_workqueue);
60231 -       dlm_close_sock(&listen_con.sock);
60233 -       foreach_conn(shutdown_conn);
60234         work_flush();
60235         foreach_conn(free_conn);
60236         work_stop();
60237 diff --git a/fs/dlm/lowcomms.h b/fs/dlm/lowcomms.h
60238 index 0918f9376489..48bbc4e18761 100644
60239 --- a/fs/dlm/lowcomms.h
60240 +++ b/fs/dlm/lowcomms.h
60241 @@ -14,13 +14,18 @@
60243  #define LOWCOMMS_MAX_TX_BUFFER_LEN     4096
60245 +/* switch to check if dlm is running */
60246 +extern int dlm_allow_conn;
60248  int dlm_lowcomms_start(void);
60249 +void dlm_lowcomms_shutdown(void);
60250  void dlm_lowcomms_stop(void);
60251  void dlm_lowcomms_exit(void);
60252  int dlm_lowcomms_close(int nodeid);
60253  void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc);
60254  void dlm_lowcomms_commit_buffer(void *mh);
60255  int dlm_lowcomms_connect_node(int nodeid);
60256 +int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark);
60257  int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len);
60259  #endif                         /* __LOWCOMMS_DOT_H__ */
60260 diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c
60261 index fde3a6afe4be..0bedfa8606a2 100644
60262 --- a/fs/dlm/midcomms.c
60263 +++ b/fs/dlm/midcomms.c
60264 @@ -49,9 +49,10 @@ int dlm_process_incoming_buffer(int nodeid, unsigned char *buf, int len)
60265                  * cannot deliver this message to upper layers
60266                  */
60267                 msglen = get_unaligned_le16(&hd->h_length);
60268 -               if (msglen > DEFAULT_BUFFER_SIZE) {
60269 -                       log_print("received invalid length header: %u, will abort message parsing",
60270 -                                 msglen);
60271 +               if (msglen > DEFAULT_BUFFER_SIZE ||
60272 +                   msglen < sizeof(struct dlm_header)) {
60273 +                       log_print("received invalid length header: %u from node %d, will abort message parsing",
60274 +                                 msglen, nodeid);
60275                         return -EBADMSG;
60276                 }
60278 diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
60279 index cdf40a54a35d..cf772c72ab2b 100644
60280 --- a/fs/ecryptfs/main.c
60281 +++ b/fs/ecryptfs/main.c
60282 @@ -492,6 +492,12 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
60283                 goto out;
60284         }
60286 +       if (!dev_name) {
60287 +               rc = -EINVAL;
60288 +               err = "Device name cannot be null";
60289 +               goto out;
60290 +       }
60292         rc = ecryptfs_parse_options(sbi, raw_data, &check_ruid);
60293         if (rc) {
60294                 err = "Error parsing options";
60295 diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h
60296 index 9ad1615f4474..e8d04d808fa6 100644
60297 --- a/fs/erofs/erofs_fs.h
60298 +++ b/fs/erofs/erofs_fs.h
60299 @@ -75,6 +75,9 @@ static inline bool erofs_inode_is_data_compressed(unsigned int datamode)
60300  #define EROFS_I_VERSION_BIT             0
60301  #define EROFS_I_DATALAYOUT_BIT          1
60303 +#define EROFS_I_ALL    \
60304 +       ((1 << (EROFS_I_DATALAYOUT_BIT + EROFS_I_DATALAYOUT_BITS)) - 1)
60306  /* 32-byte reduced form of an ondisk inode */
60307  struct erofs_inode_compact {
60308         __le16 i_format;        /* inode format hints */
60309 diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
60310 index 119fdce1b520..7ed2d7391692 100644
60311 --- a/fs/erofs/inode.c
60312 +++ b/fs/erofs/inode.c
60313 @@ -44,6 +44,13 @@ static struct page *erofs_read_inode(struct inode *inode,
60314         dic = page_address(page) + *ofs;
60315         ifmt = le16_to_cpu(dic->i_format);
60317 +       if (ifmt & ~EROFS_I_ALL) {
60318 +               erofs_err(inode->i_sb, "unsupported i_format %u of nid %llu",
60319 +                         ifmt, vi->nid);
60320 +               err = -EOPNOTSUPP;
60321 +               goto err_out;
60322 +       }
60324         vi->datalayout = erofs_inode_datalayout(ifmt);
60325         if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
60326                 erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu",
60327 diff --git a/fs/eventpoll.c b/fs/eventpoll.c
60328 index 3196474cbe24..e42477fcbfa0 100644
60329 --- a/fs/eventpoll.c
60330 +++ b/fs/eventpoll.c
60331 @@ -657,6 +657,12 @@ static void ep_done_scan(struct eventpoll *ep,
60332          */
60333         list_splice(txlist, &ep->rdllist);
60334         __pm_relax(ep->ws);
60336 +       if (!list_empty(&ep->rdllist)) {
60337 +               if (waitqueue_active(&ep->wq))
60338 +                       wake_up(&ep->wq);
60339 +       }
60341         write_unlock_irq(&ep->lock);
60344 diff --git a/fs/exec.c b/fs/exec.c
60345 index 18594f11c31f..c691d4d7720c 100644
60346 --- a/fs/exec.c
60347 +++ b/fs/exec.c
60348 @@ -1008,6 +1008,7 @@ static int exec_mmap(struct mm_struct *mm)
60349         active_mm = tsk->active_mm;
60350         tsk->active_mm = mm;
60351         tsk->mm = mm;
60352 +       lru_gen_add_mm(mm);
60353         /*
60354          * This prevents preemption while active_mm is being loaded and
60355          * it and mm are being updated, which could cause problems for
60356 @@ -1018,6 +1019,7 @@ static int exec_mmap(struct mm_struct *mm)
60357         if (!IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
60358                 local_irq_enable();
60359         activate_mm(active_mm, mm);
60360 +       lru_gen_switch_mm(active_mm, mm);
60361         if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
60362                 local_irq_enable();
60363         tsk->mm->vmacache_seqnum = 0;
60364 diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c
60365 index 761c79c3a4ba..411fb0a8da10 100644
60366 --- a/fs/exfat/balloc.c
60367 +++ b/fs/exfat/balloc.c
60368 @@ -141,10 +141,6 @@ void exfat_free_bitmap(struct exfat_sb_info *sbi)
60369         kfree(sbi->vol_amap);
60373 - * If the value of "clu" is 0, it means cluster 2 which is the first cluster of
60374 - * the cluster heap.
60375 - */
60376  int exfat_set_bitmap(struct inode *inode, unsigned int clu)
60378         int i, b;
60379 @@ -162,10 +158,6 @@ int exfat_set_bitmap(struct inode *inode, unsigned int clu)
60380         return 0;
60384 - * If the value of "clu" is 0, it means cluster 2 which is the first cluster of
60385 - * the cluster heap.
60386 - */
60387  void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync)
60389         int i, b;
60390 @@ -186,8 +178,7 @@ void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync)
60391                 int ret_discard;
60393                 ret_discard = sb_issue_discard(sb,
60394 -                       exfat_cluster_to_sector(sbi, clu +
60395 -                                               EXFAT_RESERVED_CLUSTERS),
60396 +                       exfat_cluster_to_sector(sbi, clu),
60397                         (1 << sbi->sect_per_clus_bits), GFP_NOFS, 0);
60399                 if (ret_discard == -EOPNOTSUPP) {
60400 diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
60401 index 7541d0b5d706..eda14f630def 100644
60402 --- a/fs/ext4/fast_commit.c
60403 +++ b/fs/ext4/fast_commit.c
60404 @@ -1088,8 +1088,10 @@ static int ext4_fc_perform_commit(journal_t *journal)
60405                 head.fc_tid = cpu_to_le32(
60406                         sbi->s_journal->j_running_transaction->t_tid);
60407                 if (!ext4_fc_add_tlv(sb, EXT4_FC_TAG_HEAD, sizeof(head),
60408 -                       (u8 *)&head, &crc))
60409 +                       (u8 *)&head, &crc)) {
60410 +                       ret = -ENOSPC;
60411                         goto out;
60412 +               }
60413         }
60415         spin_lock(&sbi->s_fc_lock);
60416 @@ -1734,7 +1736,7 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
60417                 }
60419                 /* Range is mapped and needs a state change */
60420 -               jbd_debug(1, "Converting from %d to %d %lld",
60421 +               jbd_debug(1, "Converting from %ld to %d %lld",
60422                                 map.m_flags & EXT4_MAP_UNWRITTEN,
60423                         ext4_ext_is_unwritten(ex), map.m_pblk);
60424                 ret = ext4_ext_replay_update_ex(inode, cur, map.m_len,
60425 diff --git a/fs/ext4/file.c b/fs/ext4/file.c
60426 index 194f5d00fa32..7924634ab0bf 100644
60427 --- a/fs/ext4/file.c
60428 +++ b/fs/ext4/file.c
60429 @@ -371,15 +371,32 @@ static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
60430  static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
60431                                  int error, unsigned int flags)
60433 -       loff_t offset = iocb->ki_pos;
60434 +       loff_t pos = iocb->ki_pos;
60435         struct inode *inode = file_inode(iocb->ki_filp);
60437         if (error)
60438                 return error;
60440 -       if (size && flags & IOMAP_DIO_UNWRITTEN)
60441 -               return ext4_convert_unwritten_extents(NULL, inode,
60442 -                                                     offset, size);
60443 +       if (size && flags & IOMAP_DIO_UNWRITTEN) {
60444 +               error = ext4_convert_unwritten_extents(NULL, inode, pos, size);
60445 +               if (error < 0)
60446 +                       return error;
60447 +       }
60448 +       /*
60449 +        * If we are extending the file, we have to update i_size here before
60450 +        * page cache gets invalidated in iomap_dio_rw(). Otherwise racing
60451 +        * buffered reads could zero out too much from page cache pages. Update
60452 +        * of on-disk size will happen later in ext4_dio_write_iter() where
60453 +        * we have enough information to also perform orphan list handling etc.
60454 +        * Note that we perform all extending writes synchronously under
60455 +        * i_rwsem held exclusively so i_size update is safe here in that case.
60456 +        * If the write was not extending, we cannot see pos > i_size here
60457 +        * because operations reducing i_size like truncate wait for all
60458 +        * outstanding DIO before updating i_size.
60459 +        */
60460 +       pos += size;
60461 +       if (pos > i_size_read(inode))
60462 +               i_size_write(inode, pos);
60464         return 0;
60466 diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
60467 index 633ae7becd61..71d321b3b984 100644
60468 --- a/fs/ext4/ialloc.c
60469 +++ b/fs/ext4/ialloc.c
60470 @@ -1292,7 +1292,8 @@ struct inode *__ext4_new_inode(struct user_namespace *mnt_userns,
60472         ei->i_extra_isize = sbi->s_want_extra_isize;
60473         ei->i_inline_off = 0;
60474 -       if (ext4_has_feature_inline_data(sb))
60475 +       if (ext4_has_feature_inline_data(sb) &&
60476 +           (!(ei->i_flags & EXT4_DAX_FL) || S_ISDIR(mode)))
60477                 ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
60478         ret = inode;
60479         err = dquot_alloc_inode(inode);
60480 @@ -1513,6 +1514,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
60481         handle_t *handle;
60482         ext4_fsblk_t blk;
60483         int num, ret = 0, used_blks = 0;
60484 +       unsigned long used_inos = 0;
60486         /* This should not happen, but just to be sure check this */
60487         if (sb_rdonly(sb)) {
60488 @@ -1543,22 +1545,37 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
60489          * used inodes so we need to skip blocks with used inodes in
60490          * inode table.
60491          */
60492 -       if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)))
60493 -               used_blks = DIV_ROUND_UP((EXT4_INODES_PER_GROUP(sb) -
60494 -                           ext4_itable_unused_count(sb, gdp)),
60495 -                           sbi->s_inodes_per_block);
60497 -       if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) ||
60498 -           ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) -
60499 -                              ext4_itable_unused_count(sb, gdp)) <
60500 -                             EXT4_FIRST_INO(sb)))) {
60501 -               ext4_error(sb, "Something is wrong with group %u: "
60502 -                          "used itable blocks: %d; "
60503 -                          "itable unused count: %u",
60504 -                          group, used_blks,
60505 -                          ext4_itable_unused_count(sb, gdp));
60506 -               ret = 1;
60507 -               goto err_out;
60508 +       if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) {
60509 +               used_inos = EXT4_INODES_PER_GROUP(sb) -
60510 +                           ext4_itable_unused_count(sb, gdp);
60511 +               used_blks = DIV_ROUND_UP(used_inos, sbi->s_inodes_per_block);
60513 +               /* Bogus inode unused count? */
60514 +               if (used_blks < 0 || used_blks > sbi->s_itb_per_group) {
60515 +                       ext4_error(sb, "Something is wrong with group %u: "
60516 +                                  "used itable blocks: %d; "
60517 +                                  "itable unused count: %u",
60518 +                                  group, used_blks,
60519 +                                  ext4_itable_unused_count(sb, gdp));
60520 +                       ret = 1;
60521 +                       goto err_out;
60522 +               }
60524 +               used_inos += group * EXT4_INODES_PER_GROUP(sb);
60525 +               /*
60526 +                * Are there some uninitialized inodes in the inode table
60527 +                * before the first normal inode?
60528 +                */
60529 +               if ((used_blks != sbi->s_itb_per_group) &&
60530 +                    (used_inos < EXT4_FIRST_INO(sb))) {
60531 +                       ext4_error(sb, "Something is wrong with group %u: "
60532 +                                  "itable unused count: %u; "
60533 +                                  "itables initialized count: %ld",
60534 +                                  group, ext4_itable_unused_count(sb, gdp),
60535 +                                  used_inos);
60536 +                       ret = 1;
60537 +                       goto err_out;
60538 +               }
60539         }
60541         blk = ext4_inode_table(sb, gdp) + used_blks;
60542 diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
60543 index a2cf35066f46..0796bfa72829 100644
60544 --- a/fs/ext4/ioctl.c
60545 +++ b/fs/ext4/ioctl.c
60546 @@ -315,6 +315,12 @@ static void ext4_dax_dontcache(struct inode *inode, unsigned int flags)
60547  static bool dax_compatible(struct inode *inode, unsigned int oldflags,
60548                            unsigned int flags)
60550 +       /* Allow the DAX flag to be changed on inline directories */
60551 +       if (S_ISDIR(inode->i_mode)) {
60552 +               flags &= ~EXT4_INLINE_DATA_FL;
60553 +               oldflags &= ~EXT4_INLINE_DATA_FL;
60554 +       }
60556         if (flags & EXT4_DAX_FL) {
60557                 if ((oldflags & EXT4_DAX_MUT_EXCL) ||
60558                      ext4_test_inode_state(inode,
60559 diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
60560 index 795c3ff2907c..68fbeedd627b 100644
60561 --- a/fs/ext4/mmp.c
60562 +++ b/fs/ext4/mmp.c
60563 @@ -56,7 +56,7 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
60564         wait_on_buffer(bh);
60565         sb_end_write(sb);
60566         if (unlikely(!buffer_uptodate(bh)))
60567 -               return 1;
60568 +               return -EIO;
60570         return 0;
60572 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
60573 index b9693680463a..77c1cb258262 100644
60574 --- a/fs/ext4/super.c
60575 +++ b/fs/ext4/super.c
60576 @@ -667,9 +667,6 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
60577                         ext4_commit_super(sb);
60578         }
60580 -       if (sb_rdonly(sb) || continue_fs)
60581 -               return;
60583         /*
60584          * We force ERRORS_RO behavior when system is rebooting. Otherwise we
60585          * could panic during 'reboot -f' as the underlying device got already
60586 @@ -679,6 +676,10 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
60587                 panic("EXT4-fs (device %s): panic forced after error\n",
60588                         sb->s_id);
60589         }
60591 +       if (sb_rdonly(sb) || continue_fs)
60592 +               return;
60594         ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
60595         /*
60596          * Make sure updated value of ->s_mount_flags will be visible before
60597 @@ -3023,9 +3024,6 @@ static void ext4_orphan_cleanup(struct super_block *sb,
60598                 sb->s_flags &= ~SB_RDONLY;
60599         }
60600  #ifdef CONFIG_QUOTA
60601 -       /* Needed for iput() to work correctly and not trash data */
60602 -       sb->s_flags |= SB_ACTIVE;
60604         /*
60605          * Turn on quotas which were not enabled for read-only mounts if
60606          * filesystem has quota feature, so that they are updated correctly.
60607 @@ -5561,8 +5559,10 @@ static int ext4_commit_super(struct super_block *sb)
60608         struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
60609         int error = 0;
60611 -       if (!sbh || block_device_ejected(sb))
60612 -               return error;
60613 +       if (!sbh)
60614 +               return -EINVAL;
60615 +       if (block_device_ejected(sb))
60616 +               return -ENODEV;
60618         ext4_update_super(sb);
60620 diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
60621 index 77fa342de38f..582b11afb0d5 100644
60622 --- a/fs/f2fs/compress.c
60623 +++ b/fs/f2fs/compress.c
60624 @@ -123,19 +123,6 @@ static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
60625         f2fs_drop_rpages(cc, len, true);
60628 -static void f2fs_put_rpages_mapping(struct address_space *mapping,
60629 -                               pgoff_t start, int len)
60631 -       int i;
60633 -       for (i = 0; i < len; i++) {
60634 -               struct page *page = find_get_page(mapping, start + i);
60636 -               put_page(page);
60637 -               put_page(page);
60638 -       }
60641  static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
60642                 struct writeback_control *wbc, bool redirty, int unlock)
60644 @@ -164,13 +151,14 @@ int f2fs_init_compress_ctx(struct compress_ctx *cc)
60645         return cc->rpages ? 0 : -ENOMEM;
60648 -void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
60649 +void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
60651         page_array_free(cc->inode, cc->rpages, cc->cluster_size);
60652         cc->rpages = NULL;
60653         cc->nr_rpages = 0;
60654         cc->nr_cpages = 0;
60655 -       cc->cluster_idx = NULL_CLUSTER;
60656 +       if (!reuse)
60657 +               cc->cluster_idx = NULL_CLUSTER;
60660  void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
60661 @@ -351,8 +339,8 @@ static const struct f2fs_compress_ops f2fs_lz4_ops = {
60663  static int zstd_init_compress_ctx(struct compress_ctx *cc)
60665 -       ZSTD_parameters params;
60666 -       ZSTD_CStream *stream;
60667 +       zstd_parameters params;
60668 +       zstd_cstream *stream;
60669         void *workspace;
60670         unsigned int workspace_size;
60671         unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
60672 @@ -361,17 +349,17 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc)
60673         if (!level)
60674                 level = F2FS_ZSTD_DEFAULT_CLEVEL;
60676 -       params = ZSTD_getParams(level, cc->rlen, 0);
60677 -       workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
60678 +       params = zstd_get_params(F2FS_ZSTD_DEFAULT_CLEVEL, cc->rlen);
60679 +       workspace_size = zstd_cstream_workspace_bound(&params.cParams);
60681         workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
60682                                         workspace_size, GFP_NOFS);
60683         if (!workspace)
60684                 return -ENOMEM;
60686 -       stream = ZSTD_initCStream(params, 0, workspace, workspace_size);
60687 +       stream = zstd_init_cstream(&params, 0, workspace, workspace_size);
60688         if (!stream) {
60689 -               printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
60690 +               printk_ratelimited("%sF2FS-fs (%s): %s zstd_init_cstream failed\n",
60691                                 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
60692                                 __func__);
60693                 kvfree(workspace);
60694 @@ -394,9 +382,9 @@ static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
60696  static int zstd_compress_pages(struct compress_ctx *cc)
60698 -       ZSTD_CStream *stream = cc->private2;
60699 -       ZSTD_inBuffer inbuf;
60700 -       ZSTD_outBuffer outbuf;
60701 +       zstd_cstream *stream = cc->private2;
60702 +       zstd_in_buffer inbuf;
60703 +       zstd_out_buffer outbuf;
60704         int src_size = cc->rlen;
60705         int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
60706         int ret;
60707 @@ -409,19 +397,19 @@ static int zstd_compress_pages(struct compress_ctx *cc)
60708         outbuf.dst = cc->cbuf->cdata;
60709         outbuf.size = dst_size;
60711 -       ret = ZSTD_compressStream(stream, &outbuf, &inbuf);
60712 -       if (ZSTD_isError(ret)) {
60713 -               printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
60714 +       ret = zstd_compress_stream(stream, &outbuf, &inbuf);
60715 +       if (zstd_is_error(ret)) {
60716 +               printk_ratelimited("%sF2FS-fs (%s): %s zstd_compress_stream failed, ret: %d\n",
60717                                 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
60718 -                               __func__, ZSTD_getErrorCode(ret));
60719 +                               __func__, zstd_get_error_code(ret));
60720                 return -EIO;
60721         }
60723 -       ret = ZSTD_endStream(stream, &outbuf);
60724 -       if (ZSTD_isError(ret)) {
60725 -               printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
60726 +       ret = zstd_end_stream(stream, &outbuf);
60727 +       if (zstd_is_error(ret)) {
60728 +               printk_ratelimited("%sF2FS-fs (%s): %s zstd_end_stream returned %d\n",
60729                                 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
60730 -                               __func__, ZSTD_getErrorCode(ret));
60731 +                               __func__, zstd_get_error_code(ret));
60732                 return -EIO;
60733         }
60735 @@ -438,22 +426,22 @@ static int zstd_compress_pages(struct compress_ctx *cc)
60737  static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
60739 -       ZSTD_DStream *stream;
60740 +       zstd_dstream *stream;
60741         void *workspace;
60742         unsigned int workspace_size;
60743         unsigned int max_window_size =
60744                         MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
60746 -       workspace_size = ZSTD_DStreamWorkspaceBound(max_window_size);
60747 +       workspace_size = zstd_dstream_workspace_bound(max_window_size);
60749         workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
60750                                         workspace_size, GFP_NOFS);
60751         if (!workspace)
60752                 return -ENOMEM;
60754 -       stream = ZSTD_initDStream(max_window_size, workspace, workspace_size);
60755 +       stream = zstd_init_dstream(max_window_size, workspace, workspace_size);
60756         if (!stream) {
60757 -               printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
60758 +               printk_ratelimited("%sF2FS-fs (%s): %s zstd_init_dstream failed\n",
60759                                 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
60760                                 __func__);
60761                 kvfree(workspace);
60762 @@ -475,9 +463,9 @@ static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
60764  static int zstd_decompress_pages(struct decompress_io_ctx *dic)
60766 -       ZSTD_DStream *stream = dic->private2;
60767 -       ZSTD_inBuffer inbuf;
60768 -       ZSTD_outBuffer outbuf;
60769 +       zstd_dstream *stream = dic->private2;
60770 +       zstd_in_buffer inbuf;
60771 +       zstd_out_buffer outbuf;
60772         int ret;
60774         inbuf.pos = 0;
60775 @@ -488,11 +476,11 @@ static int zstd_decompress_pages(struct decompress_io_ctx *dic)
60776         outbuf.dst = dic->rbuf;
60777         outbuf.size = dic->rlen;
60779 -       ret = ZSTD_decompressStream(stream, &outbuf, &inbuf);
60780 -       if (ZSTD_isError(ret)) {
60781 -               printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
60782 +       ret = zstd_decompress_stream(stream, &outbuf, &inbuf);
60783 +       if (zstd_is_error(ret)) {
60784 +               printk_ratelimited("%sF2FS-fs (%s): %s zstd_decompress_stream failed, ret: %d\n",
60785                                 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
60786 -                               __func__, ZSTD_getErrorCode(ret));
60787 +                               __func__, zstd_get_error_code(ret));
60788                 return -EIO;
60789         }
60791 @@ -1048,7 +1036,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
60792                 }
60794                 if (PageUptodate(page))
60795 -                       unlock_page(page);
60796 +                       f2fs_put_page(page, 1);
60797                 else
60798                         f2fs_compress_ctx_add_page(cc, page);
60799         }
60800 @@ -1058,33 +1046,35 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
60802                 ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
60803                                         &last_block_in_bio, false, true);
60804 -               f2fs_destroy_compress_ctx(cc);
60805 +               f2fs_put_rpages(cc);
60806 +               f2fs_destroy_compress_ctx(cc, true);
60807                 if (ret)
60808 -                       goto release_pages;
60809 +                       goto out;
60810                 if (bio)
60811                         f2fs_submit_bio(sbi, bio, DATA);
60813                 ret = f2fs_init_compress_ctx(cc);
60814                 if (ret)
60815 -                       goto release_pages;
60816 +                       goto out;
60817         }
60819         for (i = 0; i < cc->cluster_size; i++) {
60820                 f2fs_bug_on(sbi, cc->rpages[i]);
60822                 page = find_lock_page(mapping, start_idx + i);
60823 -               f2fs_bug_on(sbi, !page);
60824 +               if (!page) {
60825 +                       /* page can be truncated */
60826 +                       goto release_and_retry;
60827 +               }
60829                 f2fs_wait_on_page_writeback(page, DATA, true, true);
60831                 f2fs_compress_ctx_add_page(cc, page);
60832 -               f2fs_put_page(page, 0);
60834                 if (!PageUptodate(page)) {
60835 +release_and_retry:
60836 +                       f2fs_put_rpages(cc);
60837                         f2fs_unlock_rpages(cc, i + 1);
60838 -                       f2fs_put_rpages_mapping(mapping, start_idx,
60839 -                                       cc->cluster_size);
60840 -                       f2fs_destroy_compress_ctx(cc);
60841 +                       f2fs_destroy_compress_ctx(cc, true);
60842                         goto retry;
60843                 }
60844         }
60845 @@ -1115,10 +1105,10 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
60846         }
60848  unlock_pages:
60849 +       f2fs_put_rpages(cc);
60850         f2fs_unlock_rpages(cc, i);
60851 -release_pages:
60852 -       f2fs_put_rpages_mapping(mapping, start_idx, i);
60853 -       f2fs_destroy_compress_ctx(cc);
60854 +       f2fs_destroy_compress_ctx(cc, true);
60855 +out:
60856         return ret;
60859 @@ -1153,7 +1143,7 @@ bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
60860                 set_cluster_dirty(&cc);
60862         f2fs_put_rpages_wbc(&cc, NULL, false, 1);
60863 -       f2fs_destroy_compress_ctx(&cc);
60864 +       f2fs_destroy_compress_ctx(&cc, false);
60866         return first_index;
60868 @@ -1372,7 +1362,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
60869         f2fs_put_rpages(cc);
60870         page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
60871         cc->cpages = NULL;
60872 -       f2fs_destroy_compress_ctx(cc);
60873 +       f2fs_destroy_compress_ctx(cc, false);
60874         return 0;
60876  out_destroy_crypt:
60877 @@ -1383,7 +1373,8 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
60878         for (i = 0; i < cc->nr_cpages; i++) {
60879                 if (!cc->cpages[i])
60880                         continue;
60881 -               f2fs_put_page(cc->cpages[i], 1);
60882 +               f2fs_compress_free_page(cc->cpages[i]);
60883 +               cc->cpages[i] = NULL;
60884         }
60885  out_put_cic:
60886         kmem_cache_free(cic_entry_slab, cic);
60887 @@ -1533,7 +1524,7 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,
60888         err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
60889         f2fs_put_rpages_wbc(cc, wbc, false, 0);
60890  destroy_out:
60891 -       f2fs_destroy_compress_ctx(cc);
60892 +       f2fs_destroy_compress_ctx(cc, false);
60893         return err;
60896 diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
60897 index 4e5257c763d0..8804a5d51380 100644
60898 --- a/fs/f2fs/data.c
60899 +++ b/fs/f2fs/data.c
60900 @@ -2276,7 +2276,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
60901                                                         max_nr_pages,
60902                                                         &last_block_in_bio,
60903                                                         rac != NULL, false);
60904 -                               f2fs_destroy_compress_ctx(&cc);
60905 +                               f2fs_destroy_compress_ctx(&cc, false);
60906                                 if (ret)
60907                                         goto set_error_page;
60908                         }
60909 @@ -2321,7 +2321,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
60910                                                         max_nr_pages,
60911                                                         &last_block_in_bio,
60912                                                         rac != NULL, false);
60913 -                               f2fs_destroy_compress_ctx(&cc);
60914 +                               f2fs_destroy_compress_ctx(&cc, false);
60915                         }
60916                 }
60917  #endif
60918 @@ -3022,7 +3022,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
60919                 }
60920         }
60921         if (f2fs_compressed_file(inode))
60922 -               f2fs_destroy_compress_ctx(&cc);
60923 +               f2fs_destroy_compress_ctx(&cc, false);
60924  #endif
60925         if (retry) {
60926                 index = 0;
60927 diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
60928 index e2d302ae3a46..f3fabb1edfe9 100644
60929 --- a/fs/f2fs/f2fs.h
60930 +++ b/fs/f2fs/f2fs.h
60931 @@ -3376,6 +3376,7 @@ block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi);
60932  int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable);
60933  void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
60934  int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
60935 +bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno);
60936  void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi);
60937  void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi);
60938  void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi);
60939 @@ -3383,7 +3384,7 @@ void f2fs_get_new_segment(struct f2fs_sb_info *sbi,
60940                         unsigned int *newseg, bool new_sec, int dir);
60941  void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
60942                                         unsigned int start, unsigned int end);
60943 -void f2fs_allocate_new_segment(struct f2fs_sb_info *sbi, int type);
60944 +void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type);
60945  void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
60946  int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
60947  bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
60948 @@ -3547,7 +3548,7 @@ void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi);
60949  int f2fs_start_gc_thread(struct f2fs_sb_info *sbi);
60950  void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi);
60951  block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
60952 -int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background,
60953 +int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, bool force,
60954                         unsigned int segno);
60955  void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
60956  int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count);
60957 @@ -3949,7 +3950,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
60958  void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed);
60959  void f2fs_put_page_dic(struct page *page);
60960  int f2fs_init_compress_ctx(struct compress_ctx *cc);
60961 -void f2fs_destroy_compress_ctx(struct compress_ctx *cc);
60962 +void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
60963  void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
60964  int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
60965  void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
60966 diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
60967 index d26ff2ae3f5e..dc79694e512c 100644
60968 --- a/fs/f2fs/file.c
60969 +++ b/fs/f2fs/file.c
60970 @@ -1619,9 +1619,10 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
60971         struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
60972                         .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
60973                         .m_may_create = true };
60974 -       pgoff_t pg_end;
60975 +       pgoff_t pg_start, pg_end;
60976         loff_t new_size = i_size_read(inode);
60977         loff_t off_end;
60978 +       block_t expanded = 0;
60979         int err;
60981         err = inode_newsize_ok(inode, (len + offset));
60982 @@ -1634,11 +1635,12 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
60984         f2fs_balance_fs(sbi, true);
60986 +       pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
60987         pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
60988         off_end = (offset + len) & (PAGE_SIZE - 1);
60990 -       map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
60991 -       map.m_len = pg_end - map.m_lblk;
60992 +       map.m_lblk = pg_start;
60993 +       map.m_len = pg_end - pg_start;
60994         if (off_end)
60995                 map.m_len++;
60997 @@ -1646,19 +1648,15 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
60998                 return 0;
61000         if (f2fs_is_pinned_file(inode)) {
61001 -               block_t len = (map.m_len >> sbi->log_blocks_per_seg) <<
61002 -                                       sbi->log_blocks_per_seg;
61003 -               block_t done = 0;
61005 -               if (map.m_len % sbi->blocks_per_seg)
61006 -                       len += sbi->blocks_per_seg;
61007 +               block_t sec_blks = BLKS_PER_SEC(sbi);
61008 +               block_t sec_len = roundup(map.m_len, sec_blks);
61010 -               map.m_len = sbi->blocks_per_seg;
61011 +               map.m_len = sec_blks;
61012  next_alloc:
61013                 if (has_not_enough_free_secs(sbi, 0,
61014                         GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
61015                         down_write(&sbi->gc_lock);
61016 -                       err = f2fs_gc(sbi, true, false, NULL_SEGNO);
61017 +                       err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
61018                         if (err && err != -ENODATA && err != -EAGAIN)
61019                                 goto out_err;
61020                 }
61021 @@ -1666,7 +1664,7 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
61022                 down_write(&sbi->pin_sem);
61024                 f2fs_lock_op(sbi);
61025 -               f2fs_allocate_new_segment(sbi, CURSEG_COLD_DATA_PINNED);
61026 +               f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED);
61027                 f2fs_unlock_op(sbi);
61029                 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
61030 @@ -1674,24 +1672,25 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
61032                 up_write(&sbi->pin_sem);
61034 -               done += map.m_len;
61035 -               len -= map.m_len;
61036 +               expanded += map.m_len;
61037 +               sec_len -= map.m_len;
61038                 map.m_lblk += map.m_len;
61039 -               if (!err && len)
61040 +               if (!err && sec_len)
61041                         goto next_alloc;
61043 -               map.m_len = done;
61044 +               map.m_len = expanded;
61045         } else {
61046                 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
61047 +               expanded = map.m_len;
61048         }
61049  out_err:
61050         if (err) {
61051                 pgoff_t last_off;
61053 -               if (!map.m_len)
61054 +               if (!expanded)
61055                         return err;
61057 -               last_off = map.m_lblk + map.m_len - 1;
61058 +               last_off = pg_start + expanded - 1;
61060                 /* update new size to the failed position */
61061                 new_size = (last_off == pg_end) ? offset + len :
61062 @@ -2489,7 +2488,7 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
61063                 down_write(&sbi->gc_lock);
61064         }
61066 -       ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
61067 +       ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
61068  out:
61069         mnt_drop_write_file(filp);
61070         return ret;
61071 @@ -2525,7 +2524,8 @@ static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
61072                 down_write(&sbi->gc_lock);
61073         }
61075 -       ret = f2fs_gc(sbi, range->sync, true, GET_SEGNO(sbi, range->start));
61076 +       ret = f2fs_gc(sbi, range->sync, true, false,
61077 +                               GET_SEGNO(sbi, range->start));
61078         if (ret) {
61079                 if (ret == -EBUSY)
61080                         ret = -EAGAIN;
61081 @@ -2978,7 +2978,7 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
61082                 sm->last_victim[GC_CB] = end_segno + 1;
61083                 sm->last_victim[GC_GREEDY] = end_segno + 1;
61084                 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
61085 -               ret = f2fs_gc(sbi, true, true, start_segno);
61086 +               ret = f2fs_gc(sbi, true, true, true, start_segno);
61087                 if (ret == -EAGAIN)
61088                         ret = 0;
61089                 else if (ret < 0)
61090 diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
61091 index 39330ad3c44e..a8567cb47621 100644
61092 --- a/fs/f2fs/gc.c
61093 +++ b/fs/f2fs/gc.c
61094 @@ -112,7 +112,7 @@ static int gc_thread_func(void *data)
61095                 sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
61097                 /* if return value is not zero, no victim was selected */
61098 -               if (f2fs_gc(sbi, sync_mode, true, NULL_SEGNO))
61099 +               if (f2fs_gc(sbi, sync_mode, true, false, NULL_SEGNO))
61100                         wait_ms = gc_th->no_gc_sleep_time;
61102                 trace_f2fs_background_gc(sbi->sb, wait_ms,
61103 @@ -392,10 +392,6 @@ static void add_victim_entry(struct f2fs_sb_info *sbi,
61104                 if (p->gc_mode == GC_AT &&
61105                         get_valid_blocks(sbi, segno, true) == 0)
61106                         return;
61108 -               if (p->alloc_mode == AT_SSR &&
61109 -                       get_seg_entry(sbi, segno)->ckpt_valid_blocks == 0)
61110 -                       return;
61111         }
61113         for (i = 0; i < sbi->segs_per_sec; i++)
61114 @@ -728,11 +724,27 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
61116                 if (sec_usage_check(sbi, secno))
61117                         goto next;
61119                 /* Don't touch checkpointed data */
61120 -               if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
61121 -                                       get_ckpt_valid_blocks(sbi, segno) &&
61122 -                                       p.alloc_mode == LFS))
61123 -                       goto next;
61124 +               if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
61125 +                       if (p.alloc_mode == LFS) {
61126 +                               /*
61127 +                                * LFS is set to find source section during GC.
61128 +                                * The victim should have no checkpointed data.
61129 +                                */
61130 +                               if (get_ckpt_valid_blocks(sbi, segno, true))
61131 +                                       goto next;
61132 +                       } else {
61133 +                               /*
61134 +                                * SSR | AT_SSR are set to find target segment
61135 +                                * for writes which can be full by checkpointed
61136 +                                * and newly written blocks.
61137 +                                */
61138 +                               if (!f2fs_segment_has_free_slot(sbi, segno))
61139 +                                       goto next;
61140 +                       }
61141 +               }
61143                 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
61144                         goto next;
61146 @@ -1354,7 +1366,8 @@ static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
61147   * the victim data block is ignored.
61148   */
61149  static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
61150 -               struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
61151 +               struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
61152 +               bool force_migrate)
61154         struct super_block *sb = sbi->sb;
61155         struct f2fs_summary *entry;
61156 @@ -1383,8 +1396,8 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
61157                  * race condition along with SSR block allocation.
61158                  */
61159                 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
61160 -                               get_valid_blocks(sbi, segno, true) ==
61161 -                                                       BLKS_PER_SEC(sbi))
61162 +                       (!force_migrate && get_valid_blocks(sbi, segno, true) ==
61163 +                                                       BLKS_PER_SEC(sbi)))
61164                         return submitted;
61166                 if (check_valid_map(sbi, segno, off) == 0)
61167 @@ -1519,7 +1532,8 @@ static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
61169  static int do_garbage_collect(struct f2fs_sb_info *sbi,
61170                                 unsigned int start_segno,
61171 -                               struct gc_inode_list *gc_list, int gc_type)
61172 +                               struct gc_inode_list *gc_list, int gc_type,
61173 +                               bool force_migrate)
61175         struct page *sum_page;
61176         struct f2fs_summary_block *sum;
61177 @@ -1606,7 +1620,8 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
61178                                                                 gc_type);
61179                 else
61180                         submitted += gc_data_segment(sbi, sum->entries, gc_list,
61181 -                                                       segno, gc_type);
61182 +                                                       segno, gc_type,
61183 +                                                       force_migrate);
61185                 stat_inc_seg_count(sbi, type, gc_type);
61186                 migrated++;
61187 @@ -1634,7 +1649,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
61190  int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
61191 -                       bool background, unsigned int segno)
61192 +                       bool background, bool force, unsigned int segno)
61194         int gc_type = sync ? FG_GC : BG_GC;
61195         int sec_freed = 0, seg_freed = 0, total_freed = 0;
61196 @@ -1696,7 +1711,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
61197         if (ret)
61198                 goto stop;
61200 -       seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
61201 +       seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, force);
61202         if (gc_type == FG_GC &&
61203                 seg_freed == f2fs_usable_segs_in_sec(sbi, segno))
61204                 sec_freed++;
61205 @@ -1835,7 +1850,7 @@ static int free_segment_range(struct f2fs_sb_info *sbi,
61206                         .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
61207                 };
61209 -               do_garbage_collect(sbi, segno, &gc_list, FG_GC);
61210 +               do_garbage_collect(sbi, segno, &gc_list, FG_GC, true);
61211                 put_gc_inode(&gc_list);
61213                 if (!gc_only && get_valid_blocks(sbi, segno, true)) {
61214 @@ -1974,7 +1989,20 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
61216         /* stop CP to protect MAIN_SEC in free_segment_range */
61217         f2fs_lock_op(sbi);
61219 +       spin_lock(&sbi->stat_lock);
61220 +       if (shrunk_blocks + valid_user_blocks(sbi) +
61221 +               sbi->current_reserved_blocks + sbi->unusable_block_count +
61222 +               F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
61223 +               err = -ENOSPC;
61224 +       spin_unlock(&sbi->stat_lock);
61226 +       if (err)
61227 +               goto out_unlock;
61229         err = free_segment_range(sbi, secs, true);
61231 +out_unlock:
61232         f2fs_unlock_op(sbi);
61233         up_write(&sbi->gc_lock);
61234         if (err)
61235 diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
61236 index 993caefcd2bb..92652ca7a7c8 100644
61237 --- a/fs/f2fs/inline.c
61238 +++ b/fs/f2fs/inline.c
61239 @@ -219,7 +219,8 @@ int f2fs_convert_inline_inode(struct inode *inode)
61241         f2fs_put_page(page, 1);
61243 -       f2fs_balance_fs(sbi, dn.node_changed);
61244 +       if (!err)
61245 +               f2fs_balance_fs(sbi, dn.node_changed);
61247         return err;
61249 diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
61250 index 4b0e2e3c2c88..45c8cf1afe66 100644
61251 --- a/fs/f2fs/node.c
61252 +++ b/fs/f2fs/node.c
61253 @@ -2785,6 +2785,9 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
61254                 struct f2fs_nat_entry raw_ne;
61255                 nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
61257 +               if (f2fs_check_nid_range(sbi, nid))
61258 +                       continue;
61260                 raw_ne = nat_in_journal(journal, i);
61262                 ne = __lookup_nat_cache(nm_i, nid);
61263 diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
61264 index c2866561263e..bb6d86255741 100644
61265 --- a/fs/f2fs/segment.c
61266 +++ b/fs/f2fs/segment.c
61267 @@ -186,7 +186,10 @@ void f2fs_register_inmem_page(struct inode *inode, struct page *page)
61269         struct inmem_pages *new;
61271 -       f2fs_set_page_private(page, ATOMIC_WRITTEN_PAGE);
61272 +       if (PagePrivate(page))
61273 +               set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
61274 +       else
61275 +               f2fs_set_page_private(page, ATOMIC_WRITTEN_PAGE);
61277         new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
61279 @@ -324,23 +327,27 @@ void f2fs_drop_inmem_pages(struct inode *inode)
61280         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
61281         struct f2fs_inode_info *fi = F2FS_I(inode);
61283 -       while (!list_empty(&fi->inmem_pages)) {
61284 +       do {
61285                 mutex_lock(&fi->inmem_lock);
61286 +               if (list_empty(&fi->inmem_pages)) {
61287 +                       fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
61289 +                       spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
61290 +                       if (!list_empty(&fi->inmem_ilist))
61291 +                               list_del_init(&fi->inmem_ilist);
61292 +                       if (f2fs_is_atomic_file(inode)) {
61293 +                               clear_inode_flag(inode, FI_ATOMIC_FILE);
61294 +                               sbi->atomic_files--;
61295 +                       }
61296 +                       spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
61298 +                       mutex_unlock(&fi->inmem_lock);
61299 +                       break;
61300 +               }
61301                 __revoke_inmem_pages(inode, &fi->inmem_pages,
61302                                                 true, false, true);
61303                 mutex_unlock(&fi->inmem_lock);
61304 -       }
61306 -       fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
61308 -       spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
61309 -       if (!list_empty(&fi->inmem_ilist))
61310 -               list_del_init(&fi->inmem_ilist);
61311 -       if (f2fs_is_atomic_file(inode)) {
61312 -               clear_inode_flag(inode, FI_ATOMIC_FILE);
61313 -               sbi->atomic_files--;
61314 -       }
61315 -       spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
61316 +       } while (1);
61319  void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
61320 @@ -504,7 +511,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
61321          */
61322         if (has_not_enough_free_secs(sbi, 0, 0)) {
61323                 down_write(&sbi->gc_lock);
61324 -               f2fs_gc(sbi, false, false, NULL_SEGNO);
61325 +               f2fs_gc(sbi, false, false, false, NULL_SEGNO);
61326         }
61329 @@ -861,7 +868,7 @@ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
61330         mutex_lock(&dirty_i->seglist_lock);
61332         valid_blocks = get_valid_blocks(sbi, segno, false);
61333 -       ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno);
61334 +       ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno, false);
61336         if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) ||
61337                 ckpt_valid_blocks == usable_blocks)) {
61338 @@ -946,7 +953,7 @@ static unsigned int get_free_segment(struct f2fs_sb_info *sbi)
61339         for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
61340                 if (get_valid_blocks(sbi, segno, false))
61341                         continue;
61342 -               if (get_ckpt_valid_blocks(sbi, segno))
61343 +               if (get_ckpt_valid_blocks(sbi, segno, false))
61344                         continue;
61345                 mutex_unlock(&dirty_i->seglist_lock);
61346                 return segno;
61347 @@ -2636,6 +2643,23 @@ static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
61348                 seg->next_blkoff++;
61351 +bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno)
61353 +       struct seg_entry *se = get_seg_entry(sbi, segno);
61354 +       int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
61355 +       unsigned long *target_map = SIT_I(sbi)->tmp_map;
61356 +       unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
61357 +       unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
61358 +       int i, pos;
61360 +       for (i = 0; i < entries; i++)
61361 +               target_map[i] = ckpt_map[i] | cur_map[i];
61363 +       pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, 0);
61365 +       return pos < sbi->blocks_per_seg;
61368  /*
61369   * This function always allocates a used segment(from dirty seglist) by SSR
61370   * manner, so it should recover the existing segment information of valid blocks
61371 @@ -2893,7 +2917,8 @@ void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
61372         up_read(&SM_I(sbi)->curseg_lock);
61375 -static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type)
61376 +static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type,
61377 +                                                               bool new_sec)
61379         struct curseg_info *curseg = CURSEG_I(sbi, type);
61380         unsigned int old_segno;
61381 @@ -2901,32 +2926,42 @@ static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type)
61382         if (!curseg->inited)
61383                 goto alloc;
61385 -       if (!curseg->next_blkoff &&
61386 -               !get_valid_blocks(sbi, curseg->segno, false) &&
61387 -               !get_ckpt_valid_blocks(sbi, curseg->segno))
61388 -               return;
61389 +       if (curseg->next_blkoff ||
61390 +               get_valid_blocks(sbi, curseg->segno, new_sec))
61391 +               goto alloc;
61393 +       if (!get_ckpt_valid_blocks(sbi, curseg->segno, new_sec))
61394 +               return;
61395  alloc:
61396         old_segno = curseg->segno;
61397         SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true);
61398         locate_dirty_segment(sbi, old_segno);
61401 -void f2fs_allocate_new_segment(struct f2fs_sb_info *sbi, int type)
61402 +static void __allocate_new_section(struct f2fs_sb_info *sbi, int type)
61404 +       __allocate_new_segment(sbi, type, true);
61407 +void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type)
61409 +       down_read(&SM_I(sbi)->curseg_lock);
61410         down_write(&SIT_I(sbi)->sentry_lock);
61411 -       __allocate_new_segment(sbi, type);
61412 +       __allocate_new_section(sbi, type);
61413         up_write(&SIT_I(sbi)->sentry_lock);
61414 +       up_read(&SM_I(sbi)->curseg_lock);
61417  void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
61419         int i;
61421 +       down_read(&SM_I(sbi)->curseg_lock);
61422         down_write(&SIT_I(sbi)->sentry_lock);
61423         for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
61424 -               __allocate_new_segment(sbi, i);
61425 +               __allocate_new_segment(sbi, i, false);
61426         up_write(&SIT_I(sbi)->sentry_lock);
61427 +       up_read(&SM_I(sbi)->curseg_lock);
61430  static const struct segment_allocation default_salloc_ops = {
61431 @@ -3365,12 +3400,12 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
61432                 f2fs_inode_chksum_set(sbi, page);
61433         }
61435 -       if (F2FS_IO_ALIGNED(sbi))
61436 -               fio->retry = false;
61438         if (fio) {
61439                 struct f2fs_bio_info *io;
61441 +               if (F2FS_IO_ALIGNED(sbi))
61442 +                       fio->retry = false;
61444                 INIT_LIST_HEAD(&fio->list);
61445                 fio->in_list = true;
61446                 io = sbi->write_io[fio->type] + fio->temp;
61447 diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
61448 index e9a7a637d688..afb175739de5 100644
61449 --- a/fs/f2fs/segment.h
61450 +++ b/fs/f2fs/segment.h
61451 @@ -361,8 +361,20 @@ static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
61454  static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi,
61455 -                               unsigned int segno)
61456 +                               unsigned int segno, bool use_section)
61458 +       if (use_section && __is_large_section(sbi)) {
61459 +               unsigned int start_segno = START_SEGNO(segno);
61460 +               unsigned int blocks = 0;
61461 +               int i;
61463 +               for (i = 0; i < sbi->segs_per_sec; i++, start_segno++) {
61464 +                       struct seg_entry *se = get_seg_entry(sbi, start_segno);
61466 +                       blocks += se->ckpt_valid_blocks;
61467 +               }
61468 +               return blocks;
61469 +       }
61470         return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
61473 diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
61474 index 82592b19b4e0..3c8426709f34 100644
61475 --- a/fs/f2fs/super.c
61476 +++ b/fs/f2fs/super.c
61477 @@ -525,7 +525,7 @@ static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
61478         if (kstrtouint(str + 1, 10, &level))
61479                 return -EINVAL;
61481 -       if (!level || level > ZSTD_maxCLevel()) {
61482 +       if (!level || level > zstd_max_clevel()) {
61483                 f2fs_info(sbi, "invalid zstd compress level: %d", level);
61484                 return -EINVAL;
61485         }
61486 @@ -1865,7 +1865,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
61488         while (!f2fs_time_over(sbi, DISABLE_TIME)) {
61489                 down_write(&sbi->gc_lock);
61490 -               err = f2fs_gc(sbi, true, false, NULL_SEGNO);
61491 +               err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
61492                 if (err == -ENODATA) {
61493                         err = 0;
61494                         break;
61495 @@ -3929,10 +3929,18 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
61496                  * previous checkpoint was not done by clean system shutdown.
61497                  */
61498                 if (f2fs_hw_is_readonly(sbi)) {
61499 -                       if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))
61500 -                               f2fs_err(sbi, "Need to recover fsync data, but write access unavailable");
61501 -                       else
61502 -                               f2fs_info(sbi, "write access unavailable, skipping recovery");
61503 +                       if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
61504 +                               err = f2fs_recover_fsync_data(sbi, true);
61505 +                               if (err > 0) {
61506 +                                       err = -EROFS;
61507 +                                       f2fs_err(sbi, "Need to recover fsync data, but "
61508 +                                               "write access unavailable, please try "
61509 +                                               "mount w/ disable_roll_forward or norecovery");
61510 +                               }
61511 +                               if (err < 0)
61512 +                                       goto free_meta;
61513 +                       }
61514 +                       f2fs_info(sbi, "write access unavailable, skipping recovery");
61515                         goto reset_checkpoint;
61516                 }
61518 diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c
61519 index 054ec852b5ea..15ba36926fad 100644
61520 --- a/fs/f2fs/verity.c
61521 +++ b/fs/f2fs/verity.c
61522 @@ -152,40 +152,73 @@ static int f2fs_end_enable_verity(struct file *filp, const void *desc,
61523                                   size_t desc_size, u64 merkle_tree_size)
61525         struct inode *inode = file_inode(filp);
61526 +       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
61527         u64 desc_pos = f2fs_verity_metadata_pos(inode) + merkle_tree_size;
61528         struct fsverity_descriptor_location dloc = {
61529                 .version = cpu_to_le32(F2FS_VERIFY_VER),
61530                 .size = cpu_to_le32(desc_size),
61531                 .pos = cpu_to_le64(desc_pos),
61532         };
61533 -       int err = 0;
61534 +       int err = 0, err2 = 0;
61536 -       if (desc != NULL) {
61537 -               /* Succeeded; write the verity descriptor. */
61538 -               err = pagecache_write(inode, desc, desc_size, desc_pos);
61539 +       /*
61540 +        * If an error already occurred (which fs/verity/ signals by passing
61541 +        * desc == NULL), then only clean-up is needed.
61542 +        */
61543 +       if (desc == NULL)
61544 +               goto cleanup;
61546 -               /* Write all pages before clearing FI_VERITY_IN_PROGRESS. */
61547 -               if (!err)
61548 -                       err = filemap_write_and_wait(inode->i_mapping);
61549 -       }
61550 +       /* Append the verity descriptor. */
61551 +       err = pagecache_write(inode, desc, desc_size, desc_pos);
61552 +       if (err)
61553 +               goto cleanup;
61555 +       /*
61556 +        * Write all pages (both data and verity metadata).  Note that this must
61557 +        * happen before clearing FI_VERITY_IN_PROGRESS; otherwise pages beyond
61558 +        * i_size won't be written properly.  For crash consistency, this also
61559 +        * must happen before the verity inode flag gets persisted.
61560 +        */
61561 +       err = filemap_write_and_wait(inode->i_mapping);
61562 +       if (err)
61563 +               goto cleanup;
61565 +       /* Set the verity xattr. */
61566 +       err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_VERITY,
61567 +                           F2FS_XATTR_NAME_VERITY, &dloc, sizeof(dloc),
61568 +                           NULL, XATTR_CREATE);
61569 +       if (err)
61570 +               goto cleanup;
61572 -       /* If we failed, truncate anything we wrote past i_size. */
61573 -       if (desc == NULL || err)
61574 -               f2fs_truncate(inode);
61575 +       /* Finally, set the verity inode flag. */
61576 +       file_set_verity(inode);
61577 +       f2fs_set_inode_flags(inode);
61578 +       f2fs_mark_inode_dirty_sync(inode, true);
61580         clear_inode_flag(inode, FI_VERITY_IN_PROGRESS);
61581 +       return 0;
61583 -       if (desc != NULL && !err) {
61584 -               err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_VERITY,
61585 -                                   F2FS_XATTR_NAME_VERITY, &dloc, sizeof(dloc),
61586 -                                   NULL, XATTR_CREATE);
61587 -               if (!err) {
61588 -                       file_set_verity(inode);
61589 -                       f2fs_set_inode_flags(inode);
61590 -                       f2fs_mark_inode_dirty_sync(inode, true);
61591 -               }
61592 +cleanup:
61593 +       /*
61594 +        * Verity failed to be enabled, so clean up by truncating any verity
61595 +        * metadata that was written beyond i_size (both from cache and from
61596 +        * disk) and clearing FI_VERITY_IN_PROGRESS.
61597 +        *
61598 +        * Taking i_gc_rwsem[WRITE] is needed to stop f2fs garbage collection
61599 +        * from re-instantiating cached pages we are truncating (since unlike
61600 +        * normal file accesses, garbage collection isn't limited by i_size).
61601 +        */
61602 +       down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
61603 +       truncate_inode_pages(inode->i_mapping, inode->i_size);
61604 +       err2 = f2fs_truncate(inode);
61605 +       if (err2) {
61606 +               f2fs_err(sbi, "Truncating verity metadata failed (errno=%d)",
61607 +                        err2);
61608 +               set_sbi_flag(sbi, SBI_NEED_FSCK);
61609         }
61610 -       return err;
61611 +       up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
61612 +       clear_inode_flag(inode, FI_VERITY_IN_PROGRESS);
61613 +       return err ?: err2;
61616  static int f2fs_get_verity_descriptor(struct inode *inode, void *buf,
61617 diff --git a/fs/file.c b/fs/file.c
61618 index f633348029a5..b56c4dd78a19 100644
61619 --- a/fs/file.c
61620 +++ b/fs/file.c
61621 @@ -771,6 +771,7 @@ int __close_fd_get_file(unsigned int fd, struct file **res)
61622         *res = NULL;
61623         return -ENOENT;
61625 +EXPORT_SYMBOL(close_fd_get_file);
61627  /*
61628   * variant of close_fd that gets a ref on the file for later fput.
61629 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
61630 index 45082269e698..a37528b51798 100644
61631 --- a/fs/fuse/cuse.c
61632 +++ b/fs/fuse/cuse.c
61633 @@ -627,6 +627,8 @@ static int __init cuse_init(void)
61634         cuse_channel_fops.owner         = THIS_MODULE;
61635         cuse_channel_fops.open          = cuse_channel_open;
61636         cuse_channel_fops.release       = cuse_channel_release;
61637 +       /* CUSE is not prepared for FUSE_DEV_IOC_CLONE */
61638 +       cuse_channel_fops.unlocked_ioctl        = NULL;
61640         cuse_class = class_create(THIS_MODULE, "cuse");
61641         if (IS_ERR(cuse_class))
61642 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
61643 index c0fee830a34e..f784c118f00f 100644
61644 --- a/fs/fuse/dev.c
61645 +++ b/fs/fuse/dev.c
61646 @@ -784,7 +784,8 @@ static int fuse_check_page(struct page *page)
61647                1 << PG_lru |
61648                1 << PG_active |
61649                1 << PG_reclaim |
61650 -              1 << PG_waiters))) {
61651 +              1 << PG_waiters |
61652 +              LRU_GEN_MASK | LRU_USAGE_MASK))) {
61653                 dump_page(page, "fuse: trying to steal weird page");
61654                 return 1;
61655         }
61656 @@ -2233,11 +2234,8 @@ static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
61657         int oldfd;
61658         struct fuse_dev *fud = NULL;
61660 -       if (_IOC_TYPE(cmd) != FUSE_DEV_IOC_MAGIC)
61661 -               return -ENOTTY;
61663 -       switch (_IOC_NR(cmd)) {
61664 -       case _IOC_NR(FUSE_DEV_IOC_CLONE):
61665 +       switch (cmd) {
61666 +       case FUSE_DEV_IOC_CLONE:
61667                 res = -EFAULT;
61668                 if (!get_user(oldfd, (__u32 __user *)arg)) {
61669                         struct file *old = fget(oldfd);
61670 diff --git a/fs/fuse/file.c b/fs/fuse/file.c
61671 index 8cccecb55fb8..6e6d1e599869 100644
61672 --- a/fs/fuse/file.c
61673 +++ b/fs/fuse/file.c
61674 @@ -1099,6 +1099,7 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
61675         struct fuse_file *ff = file->private_data;
61676         struct fuse_mount *fm = ff->fm;
61677         unsigned int offset, i;
61678 +       bool short_write;
61679         int err;
61681         for (i = 0; i < ap->num_pages; i++)
61682 @@ -1113,32 +1114,38 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
61683         if (!err && ia->write.out.size > count)
61684                 err = -EIO;
61686 +       short_write = ia->write.out.size < count;
61687         offset = ap->descs[0].offset;
61688         count = ia->write.out.size;
61689         for (i = 0; i < ap->num_pages; i++) {
61690                 struct page *page = ap->pages[i];
61692 -               if (!err && !offset && count >= PAGE_SIZE)
61693 -                       SetPageUptodate(page);
61695 -               if (count > PAGE_SIZE - offset)
61696 -                       count -= PAGE_SIZE - offset;
61697 -               else
61698 -                       count = 0;
61699 -               offset = 0;
61701 -               unlock_page(page);
61702 +               if (err) {
61703 +                       ClearPageUptodate(page);
61704 +               } else {
61705 +                       if (count >= PAGE_SIZE - offset)
61706 +                               count -= PAGE_SIZE - offset;
61707 +                       else {
61708 +                               if (short_write)
61709 +                                       ClearPageUptodate(page);
61710 +                               count = 0;
61711 +                       }
61712 +                       offset = 0;
61713 +               }
61714 +               if (ia->write.page_locked && (i == ap->num_pages - 1))
61715 +                       unlock_page(page);
61716                 put_page(page);
61717         }
61719         return err;
61722 -static ssize_t fuse_fill_write_pages(struct fuse_args_pages *ap,
61723 +static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
61724                                      struct address_space *mapping,
61725                                      struct iov_iter *ii, loff_t pos,
61726                                      unsigned int max_pages)
61728 +       struct fuse_args_pages *ap = &ia->ap;
61729         struct fuse_conn *fc = get_fuse_conn(mapping->host);
61730         unsigned offset = pos & (PAGE_SIZE - 1);
61731         size_t count = 0;
61732 @@ -1191,6 +1198,16 @@ static ssize_t fuse_fill_write_pages(struct fuse_args_pages *ap,
61733                 if (offset == PAGE_SIZE)
61734                         offset = 0;
61736 +               /* If we copied full page, mark it uptodate */
61737 +               if (tmp == PAGE_SIZE)
61738 +                       SetPageUptodate(page);
61740 +               if (PageUptodate(page)) {
61741 +                       unlock_page(page);
61742 +               } else {
61743 +                       ia->write.page_locked = true;
61744 +                       break;
61745 +               }
61746                 if (!fc->big_writes)
61747                         break;
61748         } while (iov_iter_count(ii) && count < fc->max_write &&
61749 @@ -1234,7 +1251,7 @@ static ssize_t fuse_perform_write(struct kiocb *iocb,
61750                         break;
61751                 }
61753 -               count = fuse_fill_write_pages(ap, mapping, ii, pos, nr_pages);
61754 +               count = fuse_fill_write_pages(&ia, mapping, ii, pos, nr_pages);
61755                 if (count <= 0) {
61756                         err = count;
61757                 } else {
61758 @@ -1759,8 +1776,17 @@ static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args,
61759                 container_of(args, typeof(*wpa), ia.ap.args);
61760         struct inode *inode = wpa->inode;
61761         struct fuse_inode *fi = get_fuse_inode(inode);
61762 +       struct fuse_conn *fc = get_fuse_conn(inode);
61764         mapping_set_error(inode->i_mapping, error);
61765 +       /*
61766 +        * A writeback finished and this might have updated mtime/ctime on
61767 +        * server making local mtime/ctime stale.  Hence invalidate attrs.
61768 +        * Do this only if writeback_cache is not enabled.  If writeback_cache
61769 +        * is enabled, we trust local ctime/mtime.
61770 +        */
61771 +       if (!fc->writeback_cache)
61772 +               fuse_invalidate_attr(inode);
61773         spin_lock(&fi->lock);
61774         rb_erase(&wpa->writepages_entry, &fi->writepages);
61775         while (wpa->next) {
61776 diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
61777 index 63d97a15ffde..74d888c78fa4 100644
61778 --- a/fs/fuse/fuse_i.h
61779 +++ b/fs/fuse/fuse_i.h
61780 @@ -912,6 +912,7 @@ struct fuse_io_args {
61781                 struct {
61782                         struct fuse_write_in in;
61783                         struct fuse_write_out out;
61784 +                       bool page_locked;
61785                 } write;
61786         };
61787         struct fuse_args_pages ap;
61788 diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
61789 index 4ee6f734ba83..005209b1cd50 100644
61790 --- a/fs/fuse/virtio_fs.c
61791 +++ b/fs/fuse/virtio_fs.c
61792 @@ -896,6 +896,7 @@ static int virtio_fs_probe(struct virtio_device *vdev)
61793  out_vqs:
61794         vdev->config->reset(vdev);
61795         virtio_fs_cleanup_vqs(vdev, fs);
61796 +       kfree(fs->vqs);
61798  out:
61799         vdev->priv = NULL;
61800 @@ -1436,8 +1437,7 @@ static int virtio_fs_get_tree(struct fs_context *fsc)
61801         if (!fm)
61802                 goto out_err;
61804 -       fuse_conn_init(fc, fm, get_user_ns(current_user_ns()),
61805 -                      &virtio_fs_fiq_ops, fs);
61806 +       fuse_conn_init(fc, fm, fsc->user_ns, &virtio_fs_fiq_ops, fs);
61807         fc->release = fuse_free_conn;
61808         fc->delete_stale = true;
61809         fc->auto_submounts = true;
61810 diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
61811 index a930ddd15681..7054a542689f 100644
61812 --- a/fs/hfsplus/extents.c
61813 +++ b/fs/hfsplus/extents.c
61814 @@ -598,13 +598,15 @@ void hfsplus_file_truncate(struct inode *inode)
61815                 res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
61816                 if (res)
61817                         break;
61818 -               hfs_brec_remove(&fd);
61820 -               mutex_unlock(&fd.tree->tree_lock);
61821                 start = hip->cached_start;
61822 +               if (blk_cnt <= start)
61823 +                       hfs_brec_remove(&fd);
61824 +               mutex_unlock(&fd.tree->tree_lock);
61825                 hfsplus_free_extents(sb, hip->cached_extents,
61826                                      alloc_cnt - start, alloc_cnt - blk_cnt);
61827                 hfsplus_dump_extent(hip->cached_extents);
61828 +               mutex_lock(&fd.tree->tree_lock);
61829                 if (blk_cnt > start) {
61830                         hip->extent_state |= HFSPLUS_EXT_DIRTY;
61831                         break;
61832 @@ -612,7 +614,6 @@ void hfsplus_file_truncate(struct inode *inode)
61833                 alloc_cnt = start;
61834                 hip->cached_start = hip->cached_blocks = 0;
61835                 hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
61836 -               mutex_lock(&fd.tree->tree_lock);
61837         }
61838         hfs_find_exit(&fd);
61840 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
61841 index 701c82c36138..99df69b84822 100644
61842 --- a/fs/hugetlbfs/inode.c
61843 +++ b/fs/hugetlbfs/inode.c
61844 @@ -131,6 +131,7 @@ static void huge_pagevec_release(struct pagevec *pvec)
61845  static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
61847         struct inode *inode = file_inode(file);
61848 +       struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
61849         loff_t len, vma_len;
61850         int ret;
61851         struct hstate *h = hstate_file(file);
61852 @@ -146,6 +147,10 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
61853         vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
61854         vma->vm_ops = &hugetlb_vm_ops;
61856 +       ret = seal_check_future_write(info->seals, vma);
61857 +       if (ret)
61858 +               return ret;
61860         /*
61861          * page based offset in vm_pgoff could be sufficiently large to
61862          * overflow a loff_t when converted to byte offset.  This can
61863 diff --git a/fs/inode.c b/fs/inode.c
61864 index a047ab306f9a..c5e1dd13fd40 100644
61865 --- a/fs/inode.c
61866 +++ b/fs/inode.c
61867 @@ -139,6 +139,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
61868         inode->i_blkbits = sb->s_blocksize_bits;
61869         inode->i_flags = 0;
61870         atomic64_set(&inode->i_sequence, 0);
61871 +       atomic64_set(&inode->i_sequence2, 0);
61872         atomic_set(&inode->i_count, 1);
61873         inode->i_op = &empty_iops;
61874         inode->i_fop = &no_open_fops;
61875 diff --git a/fs/io_uring.c b/fs/io_uring.c
61876 index dff34975d86b..144056b0cac9 100644
61877 --- a/fs/io_uring.c
61878 +++ b/fs/io_uring.c
61879 @@ -238,7 +238,7 @@ struct fixed_rsrc_data {
61880  struct io_buffer {
61881         struct list_head list;
61882         __u64 addr;
61883 -       __s32 len;
61884 +       __u32 len;
61885         __u16 bid;
61886  };
61888 @@ -614,7 +614,7 @@ struct io_splice {
61889  struct io_provide_buf {
61890         struct file                     *file;
61891         __u64                           addr;
61892 -       __s32                           len;
61893 +       __u32                           len;
61894         __u32                           bgid;
61895         __u16                           nbufs;
61896         __u16                           bid;
61897 @@ -1008,7 +1008,7 @@ static void io_uring_del_task_file(unsigned long index);
61898  static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
61899                                          struct task_struct *task,
61900                                          struct files_struct *files);
61901 -static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx);
61902 +static void io_uring_cancel_sqpoll(struct io_sq_data *sqd);
61903  static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node);
61904  static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node(
61905                         struct io_ring_ctx *ctx);
61906 @@ -3979,7 +3979,7 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
61907  static int io_provide_buffers_prep(struct io_kiocb *req,
61908                                    const struct io_uring_sqe *sqe)
61910 -       unsigned long size;
61911 +       unsigned long size, tmp_check;
61912         struct io_provide_buf *p = &req->pbuf;
61913         u64 tmp;
61915 @@ -3993,6 +3993,12 @@ static int io_provide_buffers_prep(struct io_kiocb *req,
61916         p->addr = READ_ONCE(sqe->addr);
61917         p->len = READ_ONCE(sqe->len);
61919 +       if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
61920 +                               &size))
61921 +               return -EOVERFLOW;
61922 +       if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
61923 +               return -EOVERFLOW;
61925         size = (unsigned long)p->len * p->nbufs;
61926         if (!access_ok(u64_to_user_ptr(p->addr), size))
61927                 return -EFAULT;
61928 @@ -4017,7 +4023,7 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
61929                         break;
61931                 buf->addr = addr;
61932 -               buf->len = pbuf->len;
61933 +               buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
61934                 buf->bid = bid;
61935                 addr += pbuf->len;
61936                 bid++;
61937 @@ -6710,6 +6716,10 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
61938                 if (!list_empty(&ctx->iopoll_list))
61939                         io_do_iopoll(ctx, &nr_events, 0);
61941 +               /*
61942 +                * Don't submit if refs are dying, good for io_uring_register(),
61943 +                * but also it is relied upon by io_ring_exit_work()
61944 +                */
61945                 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
61946                     !(ctx->flags & IORING_SETUP_R_DISABLED))
61947                         ret = io_submit_sqes(ctx, to_submit);
61948 @@ -6832,15 +6842,14 @@ static int io_sq_thread(void *data)
61949                 timeout = jiffies + sqd->sq_thread_idle;
61950         }
61952 -       list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
61953 -               io_uring_cancel_sqpoll(ctx);
61954 +       io_uring_cancel_sqpoll(sqd);
61955         sqd->thread = NULL;
61956         list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
61957                 io_ring_set_wakeup_flag(ctx);
61958 -       mutex_unlock(&sqd->lock);
61960         io_run_task_work();
61961         io_run_task_work_head(&sqd->park_task_work);
61962 +       mutex_unlock(&sqd->lock);
61964         complete(&sqd->exited);
61965         do_exit(0);
61967 @@ -7200,8 +7209,6 @@ static void io_sq_thread_finish(struct io_ring_ctx *ctx)
61969                 io_put_sq_data(sqd);
61970                 ctx->sq_data = NULL;
61971 -               if (ctx->sq_creds)
61972 -                       put_cred(ctx->sq_creds);
61973         }
61976 @@ -8469,6 +8476,8 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
61977         mutex_unlock(&ctx->uring_lock);
61978         io_eventfd_unregister(ctx);
61979         io_destroy_buffers(ctx);
61980 +       if (ctx->sq_creds)
61981 +               put_cred(ctx->sq_creds);
61983  #if defined(CONFIG_UNIX)
61984         if (ctx->ring_sock) {
61985 @@ -8568,6 +8577,13 @@ static void io_tctx_exit_cb(struct callback_head *cb)
61986         complete(&work->completion);
61989 +static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
61991 +       struct io_kiocb *req = container_of(work, struct io_kiocb, work);
61993 +       return req->ctx == data;
61996  static void io_ring_exit_work(struct work_struct *work)
61998         struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
61999 @@ -8576,14 +8592,6 @@ static void io_ring_exit_work(struct work_struct *work)
62000         struct io_tctx_node *node;
62001         int ret;
62003 -       /* prevent SQPOLL from submitting new requests */
62004 -       if (ctx->sq_data) {
62005 -               io_sq_thread_park(ctx->sq_data);
62006 -               list_del_init(&ctx->sqd_list);
62007 -               io_sqd_update_thread_idle(ctx->sq_data);
62008 -               io_sq_thread_unpark(ctx->sq_data);
62009 -       }
62011         /*
62012          * If we're doing polled IO and end up having requests being
62013          * submitted async (out-of-line), then completions can come in while
62014 @@ -8592,6 +8600,17 @@ static void io_ring_exit_work(struct work_struct *work)
62015          */
62016         do {
62017                 io_uring_try_cancel_requests(ctx, NULL, NULL);
62018 +               if (ctx->sq_data) {
62019 +                       struct io_sq_data *sqd = ctx->sq_data;
62020 +                       struct task_struct *tsk;
62022 +                       io_sq_thread_park(sqd);
62023 +                       tsk = sqd->thread;
62024 +                       if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
62025 +                               io_wq_cancel_cb(tsk->io_uring->io_wq,
62026 +                                               io_cancel_ctx_cb, ctx, true);
62027 +                       io_sq_thread_unpark(sqd);
62028 +               }
62030                 WARN_ON_ONCE(time_after(jiffies, timeout));
62031         } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
62032 @@ -8736,13 +8755,6 @@ static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
62033         return true;
62036 -static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
62038 -       struct io_kiocb *req = container_of(work, struct io_kiocb, work);
62040 -       return req->ctx == data;
62043  static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
62045         struct io_tctx_node *node;
62046 @@ -8935,11 +8947,11 @@ static s64 tctx_inflight(struct io_uring_task *tctx)
62047  static void io_sqpoll_cancel_cb(struct callback_head *cb)
62049         struct io_tctx_exit *work = container_of(cb, struct io_tctx_exit, task_work);
62050 -       struct io_ring_ctx *ctx = work->ctx;
62051 -       struct io_sq_data *sqd = ctx->sq_data;
62052 +       struct io_sq_data *sqd = work->ctx->sq_data;
62054         if (sqd->thread)
62055 -               io_uring_cancel_sqpoll(ctx);
62056 +               io_uring_cancel_sqpoll(sqd);
62057 +       list_del_init(&work->ctx->sqd_list);
62058         complete(&work->completion);
62061 @@ -8950,7 +8962,6 @@ static void io_sqpoll_cancel_sync(struct io_ring_ctx *ctx)
62062         struct task_struct *task;
62064         io_sq_thread_park(sqd);
62065 -       list_del_init(&ctx->sqd_list);
62066         io_sqd_update_thread_idle(sqd);
62067         task = sqd->thread;
62068         if (task) {
62069 @@ -8958,6 +8969,8 @@ static void io_sqpoll_cancel_sync(struct io_ring_ctx *ctx)
62070                 init_task_work(&work.task_work, io_sqpoll_cancel_cb);
62071                 io_task_work_add_head(&sqd->park_task_work, &work.task_work);
62072                 wake_up_process(task);
62073 +       } else {
62074 +               list_del_init(&ctx->sqd_list);
62075         }
62076         io_sq_thread_unpark(sqd);
62078 @@ -8991,14 +9004,16 @@ void __io_uring_files_cancel(struct files_struct *files)
62081  /* should only be called by SQPOLL task */
62082 -static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
62083 +static void io_uring_cancel_sqpoll(struct io_sq_data *sqd)
62085 -       struct io_sq_data *sqd = ctx->sq_data;
62086         struct io_uring_task *tctx = current->io_uring;
62087 +       struct io_ring_ctx *ctx;
62088         s64 inflight;
62089         DEFINE_WAIT(wait);
62091 -       WARN_ON_ONCE(!sqd || ctx->sq_data->thread != current);
62092 +       if (!current->io_uring)
62093 +               return;
62094 +       WARN_ON_ONCE(!sqd || sqd->thread != current);
62096         atomic_inc(&tctx->in_idle);
62097         do {
62098 @@ -9006,7 +9021,8 @@ static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
62099                 inflight = tctx_inflight(tctx);
62100                 if (!inflight)
62101                         break;
62102 -               io_uring_try_cancel_requests(ctx, current, NULL);
62103 +               list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
62104 +                       io_uring_try_cancel_requests(ctx, current, NULL);
62106                 prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
62107                 /*
62108 diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
62109 index 69f18fe20923..d47a0d96bf30 100644
62110 --- a/fs/jbd2/recovery.c
62111 +++ b/fs/jbd2/recovery.c
62112 @@ -245,15 +245,14 @@ static int fc_do_one_pass(journal_t *journal,
62113                 return 0;
62115         while (next_fc_block <= journal->j_fc_last) {
62116 -               jbd_debug(3, "Fast commit replay: next block %ld",
62117 +               jbd_debug(3, "Fast commit replay: next block %ld\n",
62118                           next_fc_block);
62119                 err = jread(&bh, journal, next_fc_block);
62120                 if (err) {
62121 -                       jbd_debug(3, "Fast commit replay: read error");
62122 +                       jbd_debug(3, "Fast commit replay: read error\n");
62123                         break;
62124                 }
62126 -               jbd_debug(3, "Processing fast commit blk with seq %d");
62127                 err = journal->j_fc_replay_callback(journal, bh, pass,
62128                                         next_fc_block - journal->j_fc_first,
62129                                         expected_commit_id);
62130 diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
62131 index 9396666b7314..e8fc45fd751f 100644
62132 --- a/fs/jbd2/transaction.c
62133 +++ b/fs/jbd2/transaction.c
62134 @@ -349,7 +349,12 @@ static int start_this_handle(journal_t *journal, handle_t *handle,
62135         }
62137  alloc_transaction:
62138 -       if (!journal->j_running_transaction) {
62139 +       /*
62140 +        * This check is racy but it is just an optimization of allocating new
62141 +        * transaction early if there are high chances we'll need it. If we
62142 +        * guess wrong, we'll retry or free unused transaction.
62143 +        */
62144 +       if (!data_race(journal->j_running_transaction)) {
62145                 /*
62146                  * If __GFP_FS is not present, then we may be being called from
62147                  * inside the fs writeback layer, so we MUST NOT fail.
62148 @@ -1474,8 +1479,8 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
62149          * crucial to catch bugs so let's do a reliable check until the
62150          * lockless handling is fully proven.
62151          */
62152 -       if (jh->b_transaction != transaction &&
62153 -           jh->b_next_transaction != transaction) {
62154 +       if (data_race(jh->b_transaction != transaction &&
62155 +           jh->b_next_transaction != transaction)) {
62156                 spin_lock(&jh->b_state_lock);
62157                 J_ASSERT_JH(jh, jh->b_transaction == transaction ||
62158                                 jh->b_next_transaction == transaction);
62159 @@ -1483,8 +1488,8 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
62160         }
62161         if (jh->b_modified == 1) {
62162                 /* If it's in our transaction it must be in BJ_Metadata list. */
62163 -               if (jh->b_transaction == transaction &&
62164 -                   jh->b_jlist != BJ_Metadata) {
62165 +               if (data_race(jh->b_transaction == transaction &&
62166 +                   jh->b_jlist != BJ_Metadata)) {
62167                         spin_lock(&jh->b_state_lock);
62168                         if (jh->b_transaction == transaction &&
62169                             jh->b_jlist != BJ_Metadata)
62170 diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
62171 index f8fb89b10227..4fc8cd698d1a 100644
62172 --- a/fs/jffs2/file.c
62173 +++ b/fs/jffs2/file.c
62174 @@ -57,6 +57,7 @@ const struct file_operations jffs2_file_operations =
62175         .mmap =         generic_file_readonly_mmap,
62176         .fsync =        jffs2_fsync,
62177         .splice_read =  generic_file_splice_read,
62178 +       .splice_write = iter_file_splice_write,
62179  };
62181  /* jffs2_file_inode_operations */
62182 diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
62183 index db72a9d2d0af..b676056826be 100644
62184 --- a/fs/jffs2/scan.c
62185 +++ b/fs/jffs2/scan.c
62186 @@ -1079,7 +1079,7 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
62187         memcpy(&fd->name, rd->name, checkedlen);
62188         fd->name[checkedlen] = 0;
62190 -       crc = crc32(0, fd->name, rd->nsize);
62191 +       crc = crc32(0, fd->name, checkedlen);
62192         if (crc != je32_to_cpu(rd->name_crc)) {
62193                 pr_notice("%s(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
62194                           __func__, ofs, je32_to_cpu(rd->name_crc), crc);
62195 diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
62196 index f7786e00a6a7..ed9d580826f5 100644
62197 --- a/fs/nfs/callback_proc.c
62198 +++ b/fs/nfs/callback_proc.c
62199 @@ -137,12 +137,12 @@ static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp,
62200                 list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
62201                         if (!pnfs_layout_is_valid(lo))
62202                                 continue;
62203 -                       if (stateid != NULL &&
62204 -                           !nfs4_stateid_match_other(stateid, &lo->plh_stateid))
62205 +                       if (!nfs4_stateid_match_other(stateid, &lo->plh_stateid))
62206                                 continue;
62207 -                       if (!nfs_sb_active(server->super))
62208 -                               continue;
62209 -                       inode = igrab(lo->plh_inode);
62210 +                       if (nfs_sb_active(server->super))
62211 +                               inode = igrab(lo->plh_inode);
62212 +                       else
62213 +                               inode = ERR_PTR(-EAGAIN);
62214                         rcu_read_unlock();
62215                         if (inode)
62216                                 return inode;
62217 @@ -176,9 +176,10 @@ static struct inode *nfs_layout_find_inode_by_fh(struct nfs_client *clp,
62218                                 continue;
62219                         if (nfsi->layout != lo)
62220                                 continue;
62221 -                       if (!nfs_sb_active(server->super))
62222 -                               continue;
62223 -                       inode = igrab(lo->plh_inode);
62224 +                       if (nfs_sb_active(server->super))
62225 +                               inode = igrab(lo->plh_inode);
62226 +                       else
62227 +                               inode = ERR_PTR(-EAGAIN);
62228                         rcu_read_unlock();
62229                         if (inode)
62230                                 return inode;
62231 diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
62232 index fc4f490f2d78..0cd7c59a6601 100644
62233 --- a/fs/nfs/dir.c
62234 +++ b/fs/nfs/dir.c
62235 @@ -866,6 +866,8 @@ static int nfs_readdir_xdr_to_array(struct nfs_readdir_descriptor *desc,
62236                         break;
62237                 }
62239 +               verf_arg = verf_res;
62241                 status = nfs_readdir_page_filler(desc, entry, pages, pglen,
62242                                                  arrays, narrays);
62243         } while (!status && nfs_readdir_page_needs_filling(page));
62244 @@ -927,7 +929,12 @@ static int find_and_lock_cache_page(struct nfs_readdir_descriptor *desc)
62245                         }
62246                         return res;
62247                 }
62248 -               memcpy(nfsi->cookieverf, verf, sizeof(nfsi->cookieverf));
62249 +               /*
62250 +                * Set the cookie verifier if the page cache was empty
62251 +                */
62252 +               if (desc->page_index == 0)
62253 +                       memcpy(nfsi->cookieverf, verf,
62254 +                              sizeof(nfsi->cookieverf));
62255         }
62256         res = nfs_readdir_search_array(desc);
62257         if (res == 0) {
62258 @@ -974,10 +981,10 @@ static int readdir_search_pagecache(struct nfs_readdir_descriptor *desc)
62259  /*
62260   * Once we've found the start of the dirent within a page: fill 'er up...
62261   */
62262 -static void nfs_do_filldir(struct nfs_readdir_descriptor *desc)
62263 +static void nfs_do_filldir(struct nfs_readdir_descriptor *desc,
62264 +                          const __be32 *verf)
62266         struct file     *file = desc->file;
62267 -       struct nfs_inode *nfsi = NFS_I(file_inode(file));
62268         struct nfs_cache_array *array;
62269         unsigned int i = 0;
62271 @@ -991,7 +998,7 @@ static void nfs_do_filldir(struct nfs_readdir_descriptor *desc)
62272                         desc->eof = true;
62273                         break;
62274                 }
62275 -               memcpy(desc->verf, nfsi->cookieverf, sizeof(desc->verf));
62276 +               memcpy(desc->verf, verf, sizeof(desc->verf));
62277                 if (i < (array->size-1))
62278                         desc->dir_cookie = array->array[i+1].cookie;
62279                 else
62280 @@ -1048,7 +1055,7 @@ static int uncached_readdir(struct nfs_readdir_descriptor *desc)
62282         for (i = 0; !desc->eof && i < sz && arrays[i]; i++) {
62283                 desc->page = arrays[i];
62284 -               nfs_do_filldir(desc);
62285 +               nfs_do_filldir(desc, verf);
62286         }
62287         desc->page = NULL;
62289 @@ -1069,6 +1076,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
62291         struct dentry   *dentry = file_dentry(file);
62292         struct inode    *inode = d_inode(dentry);
62293 +       struct nfs_inode *nfsi = NFS_I(inode);
62294         struct nfs_open_dir_context *dir_ctx = file->private_data;
62295         struct nfs_readdir_descriptor *desc;
62296         int res;
62297 @@ -1122,7 +1130,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
62298                         break;
62299                 }
62300                 if (res == -ETOOSMALL && desc->plus) {
62301 -                       clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
62302 +                       clear_bit(NFS_INO_ADVISE_RDPLUS, &nfsi->flags);
62303                         nfs_zap_caches(inode);
62304                         desc->page_index = 0;
62305                         desc->plus = false;
62306 @@ -1132,7 +1140,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
62307                 if (res < 0)
62308                         break;
62310 -               nfs_do_filldir(desc);
62311 +               nfs_do_filldir(desc, nfsi->cookieverf);
62312                 nfs_readdir_page_unlock_and_put_cached(desc);
62313         } while (!desc->eof);
62315 diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
62316 index 872112bffcab..d383de00d486 100644
62317 --- a/fs/nfs/flexfilelayout/flexfilelayout.c
62318 +++ b/fs/nfs/flexfilelayout/flexfilelayout.c
62319 @@ -106,7 +106,7 @@ static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
62320         if (unlikely(!p))
62321                 return -ENOBUFS;
62322         fh->size = be32_to_cpup(p++);
62323 -       if (fh->size > sizeof(struct nfs_fh)) {
62324 +       if (fh->size > NFS_MAXFHSIZE) {
62325                 printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
62326                        fh->size);
62327                 return -EOVERFLOW;
62328 diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
62329 index 971a9251c1d9..902db1262d2b 100644
62330 --- a/fs/nfs/fs_context.c
62331 +++ b/fs/nfs/fs_context.c
62332 @@ -973,6 +973,15 @@ static int nfs23_parse_monolithic(struct fs_context *fc,
62333                         memset(mntfh->data + mntfh->size, 0,
62334                                sizeof(mntfh->data) - mntfh->size);
62336 +               /*
62337 +                * for proto == XPRT_TRANSPORT_UDP, which is what uses
62338 +                * to_exponential, implying shift: limit the shift value
62339 +                * to BITS_PER_LONG (majortimeo is unsigned long)
62340 +                */
62341 +               if (!(data->flags & NFS_MOUNT_TCP)) /* this will be UDP */
62342 +                       if (data->retrans >= 64) /* shift value is too large */
62343 +                               goto out_invalid_data;
62345                 /*
62346                  * Translate to nfs_fs_context, which nfs_fill_super
62347                  * can deal with.
62348 @@ -1073,6 +1082,9 @@ static int nfs23_parse_monolithic(struct fs_context *fc,
62350  out_invalid_fh:
62351         return nfs_invalf(fc, "NFS: invalid root filehandle");
62353 +out_invalid_data:
62354 +       return nfs_invalf(fc, "NFS: invalid binary mount data");
62357  #if IS_ENABLED(CONFIG_NFS_V4)
62358 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
62359 index a7fb076a5f44..ae8bc84e39fb 100644
62360 --- a/fs/nfs/inode.c
62361 +++ b/fs/nfs/inode.c
62362 @@ -219,15 +219,16 @@ void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
62363                                 | NFS_INO_INVALID_SIZE
62364                                 | NFS_INO_REVAL_PAGECACHE
62365                                 | NFS_INO_INVALID_XATTR);
62366 -       }
62367 +       } else if (flags & NFS_INO_REVAL_PAGECACHE)
62368 +               flags |= NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE;
62370         if (!nfs_has_xattr_cache(nfsi))
62371                 flags &= ~NFS_INO_INVALID_XATTR;
62372 +       if (flags & NFS_INO_INVALID_DATA)
62373 +               nfs_fscache_invalidate(inode);
62374         if (inode->i_mapping->nrpages == 0)
62375                 flags &= ~(NFS_INO_INVALID_DATA|NFS_INO_DATA_INVAL_DEFER);
62376         nfsi->cache_validity |= flags;
62377 -       if (flags & NFS_INO_INVALID_DATA)
62378 -               nfs_fscache_invalidate(inode);
62380  EXPORT_SYMBOL_GPL(nfs_set_cache_invalid);
62382 @@ -1662,10 +1663,10 @@ EXPORT_SYMBOL_GPL(_nfs_display_fhandle);
62383   */
62384  static int nfs_inode_attrs_need_update(const struct inode *inode, const struct nfs_fattr *fattr)
62386 -       const struct nfs_inode *nfsi = NFS_I(inode);
62387 +       unsigned long attr_gencount = NFS_I(inode)->attr_gencount;
62389 -       return ((long)fattr->gencount - (long)nfsi->attr_gencount) > 0 ||
62390 -               ((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0);
62391 +       return (long)(fattr->gencount - attr_gencount) > 0 ||
62392 +              (long)(attr_gencount - nfs_read_attr_generation_counter()) > 0;
62395  static int nfs_refresh_inode_locked(struct inode *inode, struct nfs_fattr *fattr)
62396 @@ -2094,7 +2095,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
62397                         nfsi->attrtimeo_timestamp = now;
62398                 }
62399                 /* Set the barrier to be more recent than this fattr */
62400 -               if ((long)fattr->gencount - (long)nfsi->attr_gencount > 0)
62401 +               if ((long)(fattr->gencount - nfsi->attr_gencount) > 0)
62402                         nfsi->attr_gencount = fattr->gencount;
62403         }
62405 diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
62406 index 094024b0aca1..3875120ef3ef 100644
62407 --- a/fs/nfs/nfs42proc.c
62408 +++ b/fs/nfs/nfs42proc.c
62409 @@ -46,11 +46,12 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
62411         struct inode *inode = file_inode(filep);
62412         struct nfs_server *server = NFS_SERVER(inode);
62413 +       u32 bitmask[3];
62414         struct nfs42_falloc_args args = {
62415                 .falloc_fh      = NFS_FH(inode),
62416                 .falloc_offset  = offset,
62417                 .falloc_length  = len,
62418 -               .falloc_bitmask = nfs4_fattr_bitmap,
62419 +               .falloc_bitmask = bitmask,
62420         };
62421         struct nfs42_falloc_res res = {
62422                 .falloc_server  = server,
62423 @@ -68,6 +69,10 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
62424                 return status;
62425         }
62427 +       memcpy(bitmask, server->cache_consistency_bitmask, sizeof(bitmask));
62428 +       if (server->attr_bitmask[1] & FATTR4_WORD1_SPACE_USED)
62429 +               bitmask[1] |= FATTR4_WORD1_SPACE_USED;
62431         res.falloc_fattr = nfs_alloc_fattr();
62432         if (!res.falloc_fattr)
62433                 return -ENOMEM;
62434 @@ -75,7 +80,8 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
62435         status = nfs4_call_sync(server->client, server, msg,
62436                                 &args.seq_args, &res.seq_res, 0);
62437         if (status == 0)
62438 -               status = nfs_post_op_update_inode(inode, res.falloc_fattr);
62439 +               status = nfs_post_op_update_inode_force_wcc(inode,
62440 +                                                           res.falloc_fattr);
62442         kfree(res.falloc_fattr);
62443         return status;
62444 @@ -84,7 +90,8 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
62445  static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
62446                                 loff_t offset, loff_t len)
62448 -       struct nfs_server *server = NFS_SERVER(file_inode(filep));
62449 +       struct inode *inode = file_inode(filep);
62450 +       struct nfs_server *server = NFS_SERVER(inode);
62451         struct nfs4_exception exception = { };
62452         struct nfs_lock_context *lock;
62453         int err;
62454 @@ -93,9 +100,13 @@ static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
62455         if (IS_ERR(lock))
62456                 return PTR_ERR(lock);
62458 -       exception.inode = file_inode(filep);
62459 +       exception.inode = inode;
62460         exception.state = lock->open_context->state;
62462 +       err = nfs_sync_inode(inode);
62463 +       if (err)
62464 +               goto out;
62466         do {
62467                 err = _nfs42_proc_fallocate(msg, filep, lock, offset, len);
62468                 if (err == -ENOTSUPP) {
62469 @@ -104,7 +115,7 @@ static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
62470                 }
62471                 err = nfs4_handle_exception(server, err, &exception);
62472         } while (exception.retry);
62474 +out:
62475         nfs_put_lock_context(lock);
62476         return err;
62478 @@ -142,16 +153,13 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
62479                 return -EOPNOTSUPP;
62481         inode_lock(inode);
62482 -       err = nfs_sync_inode(inode);
62483 -       if (err)
62484 -               goto out_unlock;
62486         err = nfs42_proc_fallocate(&msg, filep, offset, len);
62487         if (err == 0)
62488                 truncate_pagecache_range(inode, offset, (offset + len) -1);
62489         if (err == -EOPNOTSUPP)
62490                 NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE;
62491 -out_unlock:
62493         inode_unlock(inode);
62494         return err;
62496 @@ -261,6 +269,33 @@ static int process_copy_commit(struct file *dst, loff_t pos_dst,
62497         return status;
62500 +/**
62501 + * nfs42_copy_dest_done - perform inode cache updates after clone/copy offload
62502 + * @inode: pointer to destination inode
62503 + * @pos: destination offset
62504 + * @len: copy length
62505 + *
62506 + * Punch a hole in the inode page cache, so that the NFS client will
62507 + * know to retrieve new data.
62508 + * Update the file size if necessary, and then mark the inode as having
62509 + * invalid cached values for change attribute, ctime, mtime and space used.
62510 + */
62511 +static void nfs42_copy_dest_done(struct inode *inode, loff_t pos, loff_t len)
62513 +       loff_t newsize = pos + len;
62514 +       loff_t end = newsize - 1;
62516 +       truncate_pagecache_range(inode, pos, end);
62517 +       spin_lock(&inode->i_lock);
62518 +       if (newsize > i_size_read(inode))
62519 +               i_size_write(inode, newsize);
62520 +       nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
62521 +                                            NFS_INO_INVALID_CTIME |
62522 +                                            NFS_INO_INVALID_MTIME |
62523 +                                            NFS_INO_INVALID_BLOCKS);
62524 +       spin_unlock(&inode->i_lock);
62527  static ssize_t _nfs42_proc_copy(struct file *src,
62528                                 struct nfs_lock_context *src_lock,
62529                                 struct file *dst,
62530 @@ -354,14 +389,8 @@ static ssize_t _nfs42_proc_copy(struct file *src,
62531                         goto out;
62532         }
62534 -       truncate_pagecache_range(dst_inode, pos_dst,
62535 -                                pos_dst + res->write_res.count);
62536 -       spin_lock(&dst_inode->i_lock);
62537 -       nfs_set_cache_invalid(
62538 -               dst_inode, NFS_INO_REVAL_PAGECACHE | NFS_INO_REVAL_FORCED |
62539 -                                  NFS_INO_INVALID_SIZE | NFS_INO_INVALID_ATTR |
62540 -                                  NFS_INO_INVALID_DATA);
62541 -       spin_unlock(&dst_inode->i_lock);
62542 +       nfs42_copy_dest_done(dst_inode, pos_dst, res->write_res.count);
62544         spin_lock(&src_inode->i_lock);
62545         nfs_set_cache_invalid(src_inode, NFS_INO_REVAL_PAGECACHE |
62546                                                  NFS_INO_REVAL_FORCED |
62547 @@ -659,7 +688,10 @@ static loff_t _nfs42_proc_llseek(struct file *filep,
62548         if (status)
62549                 return status;
62551 -       return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
62552 +       if (whence == SEEK_DATA && res.sr_eof)
62553 +               return -NFS4ERR_NXIO;
62554 +       else
62555 +               return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
62558  loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
62559 @@ -1044,8 +1076,10 @@ static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
62561         status = nfs4_call_sync(server->client, server, msg,
62562                                 &args.seq_args, &res.seq_res, 0);
62563 -       if (status == 0)
62564 +       if (status == 0) {
62565 +               nfs42_copy_dest_done(dst_inode, dst_offset, count);
62566                 status = nfs_post_op_update_inode(dst_inode, res.dst_fattr);
62567 +       }
62569         kfree(res.dst_fattr);
62570         return status;
62571 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
62572 index c65c4b41e2c1..820abae88cf0 100644
62573 --- a/fs/nfs/nfs4proc.c
62574 +++ b/fs/nfs/nfs4proc.c
62575 @@ -108,9 +108,10 @@ static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
62576  static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *,
62577                 const struct cred *, bool);
62578  #endif
62579 -static void nfs4_bitmask_adjust(__u32 *bitmask, struct inode *inode,
62580 -               struct nfs_server *server,
62581 -               struct nfs4_label *label);
62582 +static void nfs4_bitmask_set(__u32 bitmask[NFS4_BITMASK_SZ],
62583 +                            const __u32 *src, struct inode *inode,
62584 +                            struct nfs_server *server,
62585 +                            struct nfs4_label *label);
62587  #ifdef CONFIG_NFS_V4_SECURITY_LABEL
62588  static inline struct nfs4_label *
62589 @@ -3591,6 +3592,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
62590         struct nfs4_closedata *calldata = data;
62591         struct nfs4_state *state = calldata->state;
62592         struct inode *inode = calldata->inode;
62593 +       struct nfs_server *server = NFS_SERVER(inode);
62594         struct pnfs_layout_hdr *lo;
62595         bool is_rdonly, is_wronly, is_rdwr;
62596         int call_close = 0;
62597 @@ -3647,8 +3649,10 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
62598         if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) {
62599                 /* Close-to-open cache consistency revalidation */
62600                 if (!nfs4_have_delegation(inode, FMODE_READ)) {
62601 -                       calldata->arg.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
62602 -                       nfs4_bitmask_adjust(calldata->arg.bitmask, inode, NFS_SERVER(inode), NULL);
62603 +                       nfs4_bitmask_set(calldata->arg.bitmask_store,
62604 +                                        server->cache_consistency_bitmask,
62605 +                                        inode, server, NULL);
62606 +                       calldata->arg.bitmask = calldata->arg.bitmask_store;
62607                 } else
62608                         calldata->arg.bitmask = NULL;
62609         }
62610 @@ -5416,19 +5420,17 @@ bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
62611         return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
62614 -static void nfs4_bitmask_adjust(__u32 *bitmask, struct inode *inode,
62615 -                               struct nfs_server *server,
62616 -                               struct nfs4_label *label)
62617 +static void nfs4_bitmask_set(__u32 bitmask[NFS4_BITMASK_SZ], const __u32 *src,
62618 +                            struct inode *inode, struct nfs_server *server,
62619 +                            struct nfs4_label *label)
62622         unsigned long cache_validity = READ_ONCE(NFS_I(inode)->cache_validity);
62623 +       unsigned int i;
62625 -       if ((cache_validity & NFS_INO_INVALID_DATA) ||
62626 -               (cache_validity & NFS_INO_REVAL_PAGECACHE) ||
62627 -               (cache_validity & NFS_INO_REVAL_FORCED) ||
62628 -               (cache_validity & NFS_INO_INVALID_OTHER))
62629 -               nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, label), inode);
62630 +       memcpy(bitmask, src, sizeof(*bitmask) * NFS4_BITMASK_SZ);
62632 +       if (cache_validity & (NFS_INO_INVALID_CHANGE | NFS_INO_REVAL_PAGECACHE))
62633 +               bitmask[0] |= FATTR4_WORD0_CHANGE;
62634         if (cache_validity & NFS_INO_INVALID_ATIME)
62635                 bitmask[1] |= FATTR4_WORD1_TIME_ACCESS;
62636         if (cache_validity & NFS_INO_INVALID_OTHER)
62637 @@ -5437,16 +5439,22 @@ static void nfs4_bitmask_adjust(__u32 *bitmask, struct inode *inode,
62638                                 FATTR4_WORD1_NUMLINKS;
62639         if (label && label->len && cache_validity & NFS_INO_INVALID_LABEL)
62640                 bitmask[2] |= FATTR4_WORD2_SECURITY_LABEL;
62641 -       if (cache_validity & NFS_INO_INVALID_CHANGE)
62642 -               bitmask[0] |= FATTR4_WORD0_CHANGE;
62643         if (cache_validity & NFS_INO_INVALID_CTIME)
62644                 bitmask[1] |= FATTR4_WORD1_TIME_METADATA;
62645         if (cache_validity & NFS_INO_INVALID_MTIME)
62646                 bitmask[1] |= FATTR4_WORD1_TIME_MODIFY;
62647 -       if (cache_validity & NFS_INO_INVALID_SIZE)
62648 -               bitmask[0] |= FATTR4_WORD0_SIZE;
62649         if (cache_validity & NFS_INO_INVALID_BLOCKS)
62650                 bitmask[1] |= FATTR4_WORD1_SPACE_USED;
62652 +       if (nfs4_have_delegation(inode, FMODE_READ) &&
62653 +           !(cache_validity & NFS_INO_REVAL_FORCED))
62654 +               bitmask[0] &= ~FATTR4_WORD0_SIZE;
62655 +       else if (cache_validity &
62656 +                (NFS_INO_INVALID_SIZE | NFS_INO_REVAL_PAGECACHE))
62657 +               bitmask[0] |= FATTR4_WORD0_SIZE;
62659 +       for (i = 0; i < NFS4_BITMASK_SZ; i++)
62660 +               bitmask[i] &= server->attr_bitmask[i];
62663  static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
62664 @@ -5459,8 +5467,10 @@ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
62665                 hdr->args.bitmask = NULL;
62666                 hdr->res.fattr = NULL;
62667         } else {
62668 -               hdr->args.bitmask = server->cache_consistency_bitmask;
62669 -               nfs4_bitmask_adjust(hdr->args.bitmask, hdr->inode, server, NULL);
62670 +               nfs4_bitmask_set(hdr->args.bitmask_store,
62671 +                                server->cache_consistency_bitmask,
62672 +                                hdr->inode, server, NULL);
62673 +               hdr->args.bitmask = hdr->args.bitmask_store;
62674         }
62676         if (!hdr->pgio_done_cb)
62677 @@ -6502,8 +6512,10 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
62679         data->args.fhandle = &data->fh;
62680         data->args.stateid = &data->stateid;
62681 -       data->args.bitmask = server->cache_consistency_bitmask;
62682 -       nfs4_bitmask_adjust(data->args.bitmask, inode, server, NULL);
62683 +       nfs4_bitmask_set(data->args.bitmask_store,
62684 +                        server->cache_consistency_bitmask, inode, server,
62685 +                        NULL);
62686 +       data->args.bitmask = data->args.bitmask_store;
62687         nfs_copy_fh(&data->fh, NFS_FH(inode));
62688         nfs4_stateid_copy(&data->stateid, stateid);
62689         data->res.fattr = &data->fattr;
62690 diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
62691 index 102b66e0bdef..f726f8b12b7e 100644
62692 --- a/fs/nfs/pnfs.c
62693 +++ b/fs/nfs/pnfs.c
62694 @@ -1344,7 +1344,7 @@ _pnfs_return_layout(struct inode *ino)
62695         }
62696         valid_layout = pnfs_layout_is_valid(lo);
62697         pnfs_clear_layoutcommit(ino, &tmp_list);
62698 -       pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL, 0);
62699 +       pnfs_mark_matching_lsegs_return(lo, &tmp_list, NULL, 0);
62701         if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
62702                 struct pnfs_layout_range range = {
62703 @@ -2468,6 +2468,9 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
62705         assert_spin_locked(&lo->plh_inode->i_lock);
62707 +       if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
62708 +               tmp_list = &lo->plh_return_segs;
62710         list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
62711                 if (pnfs_match_lseg_recall(lseg, return_range, seq)) {
62712                         dprintk("%s: marking lseg %p iomode %d "
62713 @@ -2475,6 +2478,8 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
62714                                 lseg, lseg->pls_range.iomode,
62715                                 lseg->pls_range.offset,
62716                                 lseg->pls_range.length);
62717 +                       if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
62718 +                               tmp_list = &lo->plh_return_segs;
62719                         if (mark_lseg_invalid(lseg, tmp_list))
62720                                 continue;
62721                         remaining++;
62722 diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
62723 index dd9f38d072dd..e13c4c81fb89 100644
62724 --- a/fs/nfsd/nfs4proc.c
62725 +++ b/fs/nfsd/nfs4proc.c
62726 @@ -1538,8 +1538,8 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
62727                 if (!nfs4_init_copy_state(nn, copy))
62728                         goto out_err;
62729                 refcount_set(&async_copy->refcount, 1);
62730 -               memcpy(&copy->cp_res.cb_stateid, &copy->cp_stateid,
62731 -                       sizeof(copy->cp_stateid));
62732 +               memcpy(&copy->cp_res.cb_stateid, &copy->cp_stateid.stid,
62733 +                       sizeof(copy->cp_res.cb_stateid));
62734                 dup_copy_fields(copy, async_copy);
62735                 async_copy->copy_task = kthread_create(nfsd4_do_async_copy,
62736                                 async_copy, "%s", "copy thread");
62737 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
62738 index 97447a64bad0..886e50ed07c2 100644
62739 --- a/fs/nfsd/nfs4state.c
62740 +++ b/fs/nfsd/nfs4state.c
62741 @@ -4869,6 +4869,11 @@ static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
62742         if (nf)
62743                 nfsd_file_put(nf);
62745 +       status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode,
62746 +                                                               access));
62747 +       if (status)
62748 +               goto out_put_access;
62750         status = nfsd4_truncate(rqstp, cur_fh, open);
62751         if (status)
62752                 goto out_put_access;
62753 @@ -6849,11 +6854,20 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
62754  static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
62756         struct nfsd_file *nf;
62757 -       __be32 err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
62758 -       if (!err) {
62759 -               err = nfserrno(vfs_test_lock(nf->nf_file, lock));
62760 -               nfsd_file_put(nf);
62761 -       }
62762 +       __be32 err;
62764 +       err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
62765 +       if (err)
62766 +               return err;
62767 +       fh_lock(fhp); /* to block new leases till after test_lock: */
62768 +       err = nfserrno(nfsd_open_break_lease(fhp->fh_dentry->d_inode,
62769 +                                                       NFSD_MAY_READ));
62770 +       if (err)
62771 +               goto out;
62772 +       err = nfserrno(vfs_test_lock(nf->nf_file, lock));
62773 +out:
62774 +       fh_unlock(fhp);
62775 +       nfsd_file_put(nf);
62776         return err;
62779 diff --git a/fs/ntfs3/Kconfig b/fs/ntfs3/Kconfig
62780 new file mode 100644
62781 index 000000000000..6e4cbc48ab8e
62782 --- /dev/null
62783 +++ b/fs/ntfs3/Kconfig
62784 @@ -0,0 +1,46 @@
62785 +# SPDX-License-Identifier: GPL-2.0-only
62786 +config NTFS3_FS
62787 +       tristate "NTFS Read-Write file system support"
62788 +       select NLS
62789 +       help
62790 +         Windows OS native file system (NTFS) support up to NTFS version 3.1.
62792 +         Y or M enables the NTFS3 driver with full features enabled (read,
62793 +         write, journal replaying, sparse/compressed files support).
62794 +         File system type to use on mount is "ntfs3". Module name (M option)
62795 +         is also "ntfs3".
62797 +         Documentation: <file:Documentation/filesystems/ntfs3.rst>
62799 +config NTFS3_64BIT_CLUSTER
62800 +       bool "64 bits per NTFS clusters"
62801 +       depends on NTFS3_FS && 64BIT
62802 +       help
62803 +         Windows implementation of ntfs.sys uses 32 bits per clusters.
62804 +         If activated 64 bits per clusters you will be able to use 4k cluster
62805 +         for 16T+ volumes. Windows will not be able to mount such volumes.
62807 +         It is recommended to say N here.
62809 +config NTFS3_LZX_XPRESS
62810 +       bool "activate support of external compressions lzx/xpress"
62811 +       depends on NTFS3_FS
62812 +       help
62813 +         In Windows 10 one can use command "compact" to compress any files.
62814 +         4 possible variants of compression are: xpress4k, xpress8k, xpress16k and lzx.
62815 +         If activated you will be able to read such files correctly.
62817 +         It is recommended to say Y here.
62819 +config NTFS3_FS_POSIX_ACL
62820 +       bool "NTFS POSIX Access Control Lists"
62821 +       depends on NTFS3_FS
62822 +       select FS_POSIX_ACL
62823 +       help
62824 +         POSIX Access Control Lists (ACLs) support additional access rights
62825 +         for users and groups beyond the standard owner/group/world scheme,
62826 +         and this option selects support for ACLs specifically for ntfs
62827 +         filesystems.
62828 +         NOTE: this is linux only feature. Windows will ignore these ACLs.
62830 +         If you don't know what Access Control Lists are, say N.
62831 diff --git a/fs/ntfs3/Makefile b/fs/ntfs3/Makefile
62832 new file mode 100644
62833 index 000000000000..5adc54ebac5a
62834 --- /dev/null
62835 +++ b/fs/ntfs3/Makefile
62836 @@ -0,0 +1,38 @@
62837 +# SPDX-License-Identifier: GPL-2.0
62839 +# Makefile for the ntfs3 filesystem support.
62842 +# to check robot warnings
62843 +ccflags-y += -Wint-to-pointer-cast
62844 +condflags := \
62845 +       $(call cc-option, -Wunused-but-set-variable) \
62846 +       $(call cc-option, -Wold-style-declaration)
62847 +ccflags-y += $(condflags)
62849 +obj-$(CONFIG_NTFS3_FS) += ntfs3.o
62851 +ntfs3-y :=     attrib.o \
62852 +               attrlist.o \
62853 +               bitfunc.o \
62854 +               bitmap.o \
62855 +               dir.o \
62856 +               fsntfs.o \
62857 +               frecord.o \
62858 +               file.o \
62859 +               fslog.o \
62860 +               inode.o \
62861 +               index.o \
62862 +               lznt.o \
62863 +               namei.o \
62864 +               record.o \
62865 +               run.o \
62866 +               super.o \
62867 +               upcase.o \
62868 +               xattr.o
62870 +ntfs3-$(CONFIG_NTFS3_LZX_XPRESS) += $(addprefix lib/,\
62871 +               decompress_common.o \
62872 +               lzx_decompress.o \
62873 +               xpress_decompress.o \
62874 +               )
62875 diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
62876 new file mode 100644
62877 index 000000000000..bca85e7b6eaf
62878 --- /dev/null
62879 +++ b/fs/ntfs3/attrib.c
62880 @@ -0,0 +1,2082 @@
62881 +// SPDX-License-Identifier: GPL-2.0
62883 + *
62884 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
62885 + *
62886 + * TODO: merge attr_set_size/attr_data_get_block/attr_allocate_frame?
62887 + */
62889 +#include <linux/blkdev.h>
62890 +#include <linux/buffer_head.h>
62891 +#include <linux/fs.h>
62892 +#include <linux/hash.h>
62893 +#include <linux/nls.h>
62894 +#include <linux/ratelimit.h>
62895 +#include <linux/slab.h>
62897 +#include "debug.h"
62898 +#include "ntfs.h"
62899 +#include "ntfs_fs.h"
62902 + * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
62903 + * preallocate algorithm
62904 + */
62905 +#ifndef NTFS_MIN_LOG2_OF_CLUMP
62906 +#define NTFS_MIN_LOG2_OF_CLUMP 16
62907 +#endif
62909 +#ifndef NTFS_MAX_LOG2_OF_CLUMP
62910 +#define NTFS_MAX_LOG2_OF_CLUMP 26
62911 +#endif
62913 +// 16M
62914 +#define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
62915 +// 16G
62916 +#define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
62919 + * get_pre_allocated
62920 + *
62921 + */
62922 +static inline u64 get_pre_allocated(u64 size)
62924 +       u32 clump;
62925 +       u8 align_shift;
62926 +       u64 ret;
62928 +       if (size <= NTFS_CLUMP_MIN) {
62929 +               clump = 1 << NTFS_MIN_LOG2_OF_CLUMP;
62930 +               align_shift = NTFS_MIN_LOG2_OF_CLUMP;
62931 +       } else if (size >= NTFS_CLUMP_MAX) {
62932 +               clump = 1 << NTFS_MAX_LOG2_OF_CLUMP;
62933 +               align_shift = NTFS_MAX_LOG2_OF_CLUMP;
62934 +       } else {
62935 +               align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 +
62936 +                             __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP));
62937 +               clump = 1u << align_shift;
62938 +       }
62940 +       ret = (((size + clump - 1) >> align_shift)) << align_shift;
62942 +       return ret;
62946 + * attr_must_be_resident
62947 + *
62948 + * returns true if attribute must be resident
62949 + */
62950 +static inline bool attr_must_be_resident(struct ntfs_sb_info *sbi,
62951 +                                        enum ATTR_TYPE type)
62953 +       const struct ATTR_DEF_ENTRY *de;
62955 +       switch (type) {
62956 +       case ATTR_STD:
62957 +       case ATTR_NAME:
62958 +       case ATTR_ID:
62959 +       case ATTR_LABEL:
62960 +       case ATTR_VOL_INFO:
62961 +       case ATTR_ROOT:
62962 +       case ATTR_EA_INFO:
62963 +               return true;
62964 +       default:
62965 +               de = ntfs_query_def(sbi, type);
62966 +               if (de && (de->flags & NTFS_ATTR_MUST_BE_RESIDENT))
62967 +                       return true;
62968 +               return false;
62969 +       }
62973 + * attr_load_runs
62974 + *
62975 + * load all runs stored in 'attr'
62976 + */
62977 +int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
62978 +                  struct runs_tree *run, const CLST *vcn)
62980 +       int err;
62981 +       CLST svcn = le64_to_cpu(attr->nres.svcn);
62982 +       CLST evcn = le64_to_cpu(attr->nres.evcn);
62983 +       u32 asize;
62984 +       u16 run_off;
62986 +       if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn))
62987 +               return 0;
62989 +       if (vcn && (evcn < *vcn || *vcn < svcn))
62990 +               return -EINVAL;
62992 +       asize = le32_to_cpu(attr->size);
62993 +       run_off = le16_to_cpu(attr->nres.run_off);
62994 +       err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
62995 +                           vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
62996 +                           asize - run_off);
62997 +       if (err < 0)
62998 +               return err;
63000 +       return 0;
63004 + * int run_deallocate_ex
63005 + *
63006 + * Deallocate clusters
63007 + */
63008 +static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
63009 +                            CLST vcn, CLST len, CLST *done, bool trim)
63011 +       int err = 0;
63012 +       CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0;
63013 +       size_t idx;
63015 +       if (!len)
63016 +               goto out;
63018 +       if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
63019 +failed:
63020 +               run_truncate(run, vcn0);
63021 +               err = -EINVAL;
63022 +               goto out;
63023 +       }
63025 +       for (;;) {
63026 +               if (clen > len)
63027 +                       clen = len;
63029 +               if (!clen) {
63030 +                       err = -EINVAL;
63031 +                       goto out;
63032 +               }
63034 +               if (lcn != SPARSE_LCN) {
63035 +                       mark_as_free_ex(sbi, lcn, clen, trim);
63036 +                       dn += clen;
63037 +               }
63039 +               len -= clen;
63040 +               if (!len)
63041 +                       break;
63043 +               vcn_next = vcn + clen;
63044 +               if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
63045 +                   vcn != vcn_next) {
63046 +                       // save memory - don't load entire run
63047 +                       goto failed;
63048 +               }
63049 +       }
63051 +out:
63052 +       if (done)
63053 +               *done += dn;
63055 +       return err;
63059 + * attr_allocate_clusters
63060 + *
63061 + * find free space, mark it as used and store in 'run'
63062 + */
63063 +int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
63064 +                          CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
63065 +                          enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
63066 +                          CLST *new_lcn)
63068 +       int err;
63069 +       CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
63070 +       struct wnd_bitmap *wnd = &sbi->used.bitmap;
63071 +       size_t cnt = run->count;
63073 +       for (;;) {
63074 +               err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen,
63075 +                                              opt);
63077 +               if (err == -ENOSPC && pre) {
63078 +                       pre = 0;
63079 +                       if (*pre_alloc)
63080 +                               *pre_alloc = 0;
63081 +                       continue;
63082 +               }
63084 +               if (err)
63085 +                       goto out;
63087 +               if (new_lcn && vcn == vcn0)
63088 +                       *new_lcn = lcn;
63090 +               /* Add new fragment into run storage */
63091 +               if (!run_add_entry(run, vcn, lcn, flen, opt == ALLOCATE_MFT)) {
63092 +                       down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
63093 +                       wnd_set_free(wnd, lcn, flen);
63094 +                       up_write(&wnd->rw_lock);
63095 +                       err = -ENOMEM;
63096 +                       goto out;
63097 +               }
63099 +               vcn += flen;
63101 +               if (flen >= len || opt == ALLOCATE_MFT ||
63102 +                   (fr && run->count - cnt >= fr)) {
63103 +                       *alen = vcn - vcn0;
63104 +                       return 0;
63105 +               }
63107 +               len -= flen;
63108 +       }
63110 +out:
63111 +       /* undo */
63112 +       run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false);
63113 +       run_truncate(run, vcn0);
63115 +       return err;
63119 + * if page is not NULL - it is already contains resident data
63120 + * and locked (called from ni_write_frame)
63121 + */
63122 +int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
63123 +                         struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
63124 +                         u64 new_size, struct runs_tree *run,
63125 +                         struct ATTRIB **ins_attr, struct page *page)
63127 +       struct ntfs_sb_info *sbi;
63128 +       struct ATTRIB *attr_s;
63129 +       struct MFT_REC *rec;
63130 +       u32 used, asize, rsize, aoff, align;
63131 +       bool is_data;
63132 +       CLST len, alen;
63133 +       char *next;
63134 +       int err;
63136 +       if (attr->non_res) {
63137 +               *ins_attr = attr;
63138 +               return 0;
63139 +       }
63141 +       sbi = mi->sbi;
63142 +       rec = mi->mrec;
63143 +       attr_s = NULL;
63144 +       used = le32_to_cpu(rec->used);
63145 +       asize = le32_to_cpu(attr->size);
63146 +       next = Add2Ptr(attr, asize);
63147 +       aoff = PtrOffset(rec, attr);
63148 +       rsize = le32_to_cpu(attr->res.data_size);
63149 +       is_data = attr->type == ATTR_DATA && !attr->name_len;
63151 +       align = sbi->cluster_size;
63152 +       if (is_attr_compressed(attr))
63153 +               align <<= COMPRESSION_UNIT;
63154 +       len = (rsize + align - 1) >> sbi->cluster_bits;
63156 +       run_init(run);
63158 +       /* make a copy of original attribute */
63159 +       attr_s = ntfs_memdup(attr, asize);
63160 +       if (!attr_s) {
63161 +               err = -ENOMEM;
63162 +               goto out;
63163 +       }
63165 +       if (!len) {
63166 +               /* empty resident -> empty nonresident */
63167 +               alen = 0;
63168 +       } else {
63169 +               const char *data = resident_data(attr);
63171 +               err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
63172 +                                            ALLOCATE_DEF, &alen, 0, NULL);
63173 +               if (err)
63174 +                       goto out1;
63176 +               if (!rsize) {
63177 +                       /* empty resident -> non empty nonresident */
63178 +               } else if (!is_data) {
63179 +                       err = ntfs_sb_write_run(sbi, run, 0, data, rsize);
63180 +                       if (err)
63181 +                               goto out2;
63182 +               } else if (!page) {
63183 +                       char *kaddr;
63185 +                       page = grab_cache_page(ni->vfs_inode.i_mapping, 0);
63186 +                       if (!page) {
63187 +                               err = -ENOMEM;
63188 +                               goto out2;
63189 +                       }
63190 +                       kaddr = kmap_atomic(page);
63191 +                       memcpy(kaddr, data, rsize);
63192 +                       memset(kaddr + rsize, 0, PAGE_SIZE - rsize);
63193 +                       kunmap_atomic(kaddr);
63194 +                       flush_dcache_page(page);
63195 +                       SetPageUptodate(page);
63196 +                       set_page_dirty(page);
63197 +                       unlock_page(page);
63198 +                       put_page(page);
63199 +               }
63200 +       }
63202 +       /* remove original attribute */
63203 +       used -= asize;
63204 +       memmove(attr, Add2Ptr(attr, asize), used - aoff);
63205 +       rec->used = cpu_to_le32(used);
63206 +       mi->dirty = true;
63207 +       if (le)
63208 +               al_remove_le(ni, le);
63210 +       err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
63211 +                                   attr_s->name_len, run, 0, alen,
63212 +                                   attr_s->flags, &attr, NULL);
63213 +       if (err)
63214 +               goto out3;
63216 +       ntfs_free(attr_s);
63217 +       attr->nres.data_size = cpu_to_le64(rsize);
63218 +       attr->nres.valid_size = attr->nres.data_size;
63220 +       *ins_attr = attr;
63222 +       if (is_data)
63223 +               ni->ni_flags &= ~NI_FLAG_RESIDENT;
63225 +       /* Resident attribute becomes non resident */
63226 +       return 0;
63228 +out3:
63229 +       attr = Add2Ptr(rec, aoff);
63230 +       memmove(next, attr, used - aoff);
63231 +       memcpy(attr, attr_s, asize);
63232 +       rec->used = cpu_to_le32(used + asize);
63233 +       mi->dirty = true;
63234 +out2:
63235 +       /* undo: do not trim new allocated clusters */
63236 +       run_deallocate(sbi, run, false);
63237 +       run_close(run);
63238 +out1:
63239 +       ntfs_free(attr_s);
63240 +       /*reinsert le*/
63241 +out:
63242 +       return err;
63246 + * attr_set_size_res
63247 + *
63248 + * helper for attr_set_size
63249 + */
63250 +static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
63251 +                            struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
63252 +                            u64 new_size, struct runs_tree *run,
63253 +                            struct ATTRIB **ins_attr)
63255 +       struct ntfs_sb_info *sbi = mi->sbi;
63256 +       struct MFT_REC *rec = mi->mrec;
63257 +       u32 used = le32_to_cpu(rec->used);
63258 +       u32 asize = le32_to_cpu(attr->size);
63259 +       u32 aoff = PtrOffset(rec, attr);
63260 +       u32 rsize = le32_to_cpu(attr->res.data_size);
63261 +       u32 tail = used - aoff - asize;
63262 +       char *next = Add2Ptr(attr, asize);
63263 +       s64 dsize = QuadAlign(new_size) - QuadAlign(rsize);
63265 +       if (dsize < 0) {
63266 +               memmove(next + dsize, next, tail);
63267 +       } else if (dsize > 0) {
63268 +               if (used + dsize > sbi->max_bytes_per_attr)
63269 +                       return attr_make_nonresident(ni, attr, le, mi, new_size,
63270 +                                                    run, ins_attr, NULL);
63272 +               memmove(next + dsize, next, tail);
63273 +               memset(next, 0, dsize);
63274 +       }
63276 +       if (new_size > rsize)
63277 +               memset(Add2Ptr(resident_data(attr), rsize), 0,
63278 +                      new_size - rsize);
63280 +       rec->used = cpu_to_le32(used + dsize);
63281 +       attr->size = cpu_to_le32(asize + dsize);
63282 +       attr->res.data_size = cpu_to_le32(new_size);
63283 +       mi->dirty = true;
63284 +       *ins_attr = attr;
63286 +       return 0;
63290 + * attr_set_size
63291 + *
63292 + * change the size of attribute
63293 + * Extend:
63294 + *   - sparse/compressed: no allocated clusters
63295 + *   - normal: append allocated and preallocated new clusters
63296 + * Shrink:
63297 + *   - no deallocate if keep_prealloc is set
63298 + */
63299 +int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
63300 +                 const __le16 *name, u8 name_len, struct runs_tree *run,
63301 +                 u64 new_size, const u64 *new_valid, bool keep_prealloc,
63302 +                 struct ATTRIB **ret)
63304 +       int err = 0;
63305 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
63306 +       u8 cluster_bits = sbi->cluster_bits;
63307 +       bool is_mft =
63308 +               ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA && !name_len;
63309 +       u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp;
63310 +       struct ATTRIB *attr = NULL, *attr_b;
63311 +       struct ATTR_LIST_ENTRY *le, *le_b;
63312 +       struct mft_inode *mi, *mi_b;
63313 +       CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
63314 +       CLST next_svcn, pre_alloc = -1, done = 0;
63315 +       bool is_ext;
63316 +       u32 align;
63317 +       struct MFT_REC *rec;
63319 +again:
63320 +       le_b = NULL;
63321 +       attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
63322 +                             &mi_b);
63323 +       if (!attr_b) {
63324 +               err = -ENOENT;
63325 +               goto out;
63326 +       }
63328 +       if (!attr_b->non_res) {
63329 +               err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
63330 +                                       &attr_b);
63331 +               if (err || !attr_b->non_res)
63332 +                       goto out;
63334 +               /* layout of records may be changed, so do a full search */
63335 +               goto again;
63336 +       }
63338 +       is_ext = is_attr_ext(attr_b);
63340 +again_1:
63341 +       align = sbi->cluster_size;
63343 +       if (is_ext) {
63344 +               align <<= attr_b->nres.c_unit;
63345 +               if (is_attr_sparsed(attr_b))
63346 +                       keep_prealloc = false;
63347 +       }
63349 +       old_valid = le64_to_cpu(attr_b->nres.valid_size);
63350 +       old_size = le64_to_cpu(attr_b->nres.data_size);
63351 +       old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
63352 +       old_alen = old_alloc >> cluster_bits;
63354 +       new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
63355 +       new_alen = new_alloc >> cluster_bits;
63357 +       if (keep_prealloc && is_ext)
63358 +               keep_prealloc = false;
63360 +       if (keep_prealloc && new_size < old_size) {
63361 +               attr_b->nres.data_size = cpu_to_le64(new_size);
63362 +               mi_b->dirty = true;
63363 +               goto ok;
63364 +       }
63366 +       vcn = old_alen - 1;
63368 +       svcn = le64_to_cpu(attr_b->nres.svcn);
63369 +       evcn = le64_to_cpu(attr_b->nres.evcn);
63371 +       if (svcn <= vcn && vcn <= evcn) {
63372 +               attr = attr_b;
63373 +               le = le_b;
63374 +               mi = mi_b;
63375 +       } else if (!le_b) {
63376 +               err = -EINVAL;
63377 +               goto out;
63378 +       } else {
63379 +               le = le_b;
63380 +               attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
63381 +                                   &mi);
63382 +               if (!attr) {
63383 +                       err = -EINVAL;
63384 +                       goto out;
63385 +               }
63387 +next_le_1:
63388 +               svcn = le64_to_cpu(attr->nres.svcn);
63389 +               evcn = le64_to_cpu(attr->nres.evcn);
63390 +       }
63392 +next_le:
63393 +       rec = mi->mrec;
63395 +       err = attr_load_runs(attr, ni, run, NULL);
63396 +       if (err)
63397 +               goto out;
63399 +       if (new_size > old_size) {
63400 +               CLST to_allocate;
63401 +               size_t free;
63403 +               if (new_alloc <= old_alloc) {
63404 +                       attr_b->nres.data_size = cpu_to_le64(new_size);
63405 +                       mi_b->dirty = true;
63406 +                       goto ok;
63407 +               }
63409 +               to_allocate = new_alen - old_alen;
63410 +add_alloc_in_same_attr_seg:
63411 +               lcn = 0;
63412 +               if (is_mft) {
63413 +                       /* mft allocates clusters from mftzone */
63414 +                       pre_alloc = 0;
63415 +               } else if (is_ext) {
63416 +                       /* no preallocate for sparse/compress */
63417 +                       pre_alloc = 0;
63418 +               } else if (pre_alloc == -1) {
63419 +                       pre_alloc = 0;
63420 +                       if (type == ATTR_DATA && !name_len &&
63421 +                           sbi->options.prealloc) {
63422 +                               CLST new_alen2 = bytes_to_cluster(
63423 +                                       sbi, get_pre_allocated(new_size));
63424 +                               pre_alloc = new_alen2 - new_alen;
63425 +                       }
63427 +                       /* Get the last lcn to allocate from */
63428 +                       if (old_alen &&
63429 +                           !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) {
63430 +                               lcn = SPARSE_LCN;
63431 +                       }
63433 +                       if (lcn == SPARSE_LCN)
63434 +                               lcn = 0;
63435 +                       else if (lcn)
63436 +                               lcn += 1;
63438 +                       free = wnd_zeroes(&sbi->used.bitmap);
63439 +                       if (to_allocate > free) {
63440 +                               err = -ENOSPC;
63441 +                               goto out;
63442 +                       }
63444 +                       if (pre_alloc && to_allocate + pre_alloc > free)
63445 +                               pre_alloc = 0;
63446 +               }
63448 +               vcn = old_alen;
63450 +               if (is_ext) {
63451 +                       if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate,
63452 +                                          false)) {
63453 +                               err = -ENOMEM;
63454 +                               goto out;
63455 +                       }
63456 +                       alen = to_allocate;
63457 +               } else {
63458 +                       /* ~3 bytes per fragment */
63459 +                       err = attr_allocate_clusters(
63460 +                               sbi, run, vcn, lcn, to_allocate, &pre_alloc,
63461 +                               is_mft ? ALLOCATE_MFT : 0, &alen,
63462 +                               is_mft ? 0
63463 +                                      : (sbi->record_size -
63464 +                                         le32_to_cpu(rec->used) + 8) /
63465 +                                                        3 +
63466 +                                                1,
63467 +                               NULL);
63468 +                       if (err)
63469 +                               goto out;
63470 +               }
63472 +               done += alen;
63473 +               vcn += alen;
63474 +               if (to_allocate > alen)
63475 +                       to_allocate -= alen;
63476 +               else
63477 +                       to_allocate = 0;
63479 +pack_runs:
63480 +               err = mi_pack_runs(mi, attr, run, vcn - svcn);
63481 +               if (err)
63482 +                       goto out;
63484 +               next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
63485 +               new_alloc_tmp = (u64)next_svcn << cluster_bits;
63486 +               attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
63487 +               mi_b->dirty = true;
63489 +               if (next_svcn >= vcn && !to_allocate) {
63490 +                       /* Normal way. update attribute and exit */
63491 +                       attr_b->nres.data_size = cpu_to_le64(new_size);
63492 +                       goto ok;
63493 +               }
63495 +               /* at least two mft to avoid recursive loop*/
63496 +               if (is_mft && next_svcn == vcn &&
63497 +                   ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) {
63498 +                       new_size = new_alloc_tmp;
63499 +                       attr_b->nres.data_size = attr_b->nres.alloc_size;
63500 +                       goto ok;
63501 +               }
63503 +               if (le32_to_cpu(rec->used) < sbi->record_size) {
63504 +                       old_alen = next_svcn;
63505 +                       evcn = old_alen - 1;
63506 +                       goto add_alloc_in_same_attr_seg;
63507 +               }
63509 +               attr_b->nres.data_size = attr_b->nres.alloc_size;
63510 +               if (new_alloc_tmp < old_valid)
63511 +                       attr_b->nres.valid_size = attr_b->nres.data_size;
63513 +               if (type == ATTR_LIST) {
63514 +                       err = ni_expand_list(ni);
63515 +                       if (err)
63516 +                               goto out;
63517 +                       if (next_svcn < vcn)
63518 +                               goto pack_runs;
63520 +                       /* layout of records is changed */
63521 +                       goto again;
63522 +               }
63524 +               if (!ni->attr_list.size) {
63525 +                       err = ni_create_attr_list(ni);
63526 +                       if (err)
63527 +                               goto out;
63528 +                       /* layout of records is changed */
63529 +               }
63531 +               if (next_svcn >= vcn) {
63532 +                       /* this is mft data, repeat */
63533 +                       goto again;
63534 +               }
63536 +               /* insert new attribute segment */
63537 +               err = ni_insert_nonresident(ni, type, name, name_len, run,
63538 +                                           next_svcn, vcn - next_svcn,
63539 +                                           attr_b->flags, &attr, &mi);
63540 +               if (err)
63541 +                       goto out;
63543 +               if (!is_mft)
63544 +                       run_truncate_head(run, evcn + 1);
63546 +               svcn = le64_to_cpu(attr->nres.svcn);
63547 +               evcn = le64_to_cpu(attr->nres.evcn);
63549 +               le_b = NULL;
63550 +               /* layout of records maybe changed */
63551 +               /* find base attribute to update*/
63552 +               attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
63553 +                                     NULL, &mi_b);
63554 +               if (!attr_b) {
63555 +                       err = -ENOENT;
63556 +                       goto out;
63557 +               }
63559 +               attr_b->nres.alloc_size = cpu_to_le64((u64)vcn << cluster_bits);
63560 +               attr_b->nres.data_size = attr_b->nres.alloc_size;
63561 +               attr_b->nres.valid_size = attr_b->nres.alloc_size;
63562 +               mi_b->dirty = true;
63563 +               goto again_1;
63564 +       }
63566 +       if (new_size != old_size ||
63567 +           (new_alloc != old_alloc && !keep_prealloc)) {
63568 +               vcn = max(svcn, new_alen);
63569 +               new_alloc_tmp = (u64)vcn << cluster_bits;
63571 +               alen = 0;
63572 +               err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &alen,
63573 +                                       true);
63574 +               if (err)
63575 +                       goto out;
63577 +               run_truncate(run, vcn);
63579 +               if (vcn > svcn) {
63580 +                       err = mi_pack_runs(mi, attr, run, vcn - svcn);
63581 +                       if (err)
63582 +                               goto out;
63583 +               } else if (le && le->vcn) {
63584 +                       u16 le_sz = le16_to_cpu(le->size);
63586 +                       /*
63587 +                        * NOTE: list entries for one attribute are always
63588 +                        * the same size. We deal with last entry (vcn==0)
63589 +                        * and it is not first in entries array
63590 +                        * (list entry for std attribute always first)
63591 +                        * So it is safe to step back
63592 +                        */
63593 +                       mi_remove_attr(mi, attr);
63595 +                       if (!al_remove_le(ni, le)) {
63596 +                               err = -EINVAL;
63597 +                               goto out;
63598 +                       }
63600 +                       le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
63601 +               } else {
63602 +                       attr->nres.evcn = cpu_to_le64((u64)vcn - 1);
63603 +                       mi->dirty = true;
63604 +               }
63606 +               attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
63608 +               if (vcn == new_alen) {
63609 +                       attr_b->nres.data_size = cpu_to_le64(new_size);
63610 +                       if (new_size < old_valid)
63611 +                               attr_b->nres.valid_size =
63612 +                                       attr_b->nres.data_size;
63613 +               } else {
63614 +                       if (new_alloc_tmp <=
63615 +                           le64_to_cpu(attr_b->nres.data_size))
63616 +                               attr_b->nres.data_size =
63617 +                                       attr_b->nres.alloc_size;
63618 +                       if (new_alloc_tmp <
63619 +                           le64_to_cpu(attr_b->nres.valid_size))
63620 +                               attr_b->nres.valid_size =
63621 +                                       attr_b->nres.alloc_size;
63622 +               }
63624 +               if (is_ext)
63625 +                       le64_sub_cpu(&attr_b->nres.total_size,
63626 +                                    ((u64)alen << cluster_bits));
63628 +               mi_b->dirty = true;
63630 +               if (new_alloc_tmp <= new_alloc)
63631 +                       goto ok;
63633 +               old_size = new_alloc_tmp;
63634 +               vcn = svcn - 1;
63636 +               if (le == le_b) {
63637 +                       attr = attr_b;
63638 +                       mi = mi_b;
63639 +                       evcn = svcn - 1;
63640 +                       svcn = 0;
63641 +                       goto next_le;
63642 +               }
63644 +               if (le->type != type || le->name_len != name_len ||
63645 +                   memcmp(le_name(le), name, name_len * sizeof(short))) {
63646 +                       err = -EINVAL;
63647 +                       goto out;
63648 +               }
63650 +               err = ni_load_mi(ni, le, &mi);
63651 +               if (err)
63652 +                       goto out;
63654 +               attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
63655 +               if (!attr) {
63656 +                       err = -EINVAL;
63657 +                       goto out;
63658 +               }
63659 +               goto next_le_1;
63660 +       }
63662 +ok:
63663 +       if (new_valid) {
63664 +               __le64 valid = cpu_to_le64(min(*new_valid, new_size));
63666 +               if (attr_b->nres.valid_size != valid) {
63667 +                       attr_b->nres.valid_size = valid;
63668 +                       mi_b->dirty = true;
63669 +               }
63670 +       }
63672 +out:
63673 +       if (!err && attr_b && ret)
63674 +               *ret = attr_b;
63676 +       /* update inode_set_bytes*/
63677 +       if (!err && ((type == ATTR_DATA && !name_len) ||
63678 +                    (type == ATTR_ALLOC && name == I30_NAME))) {
63679 +               bool dirty = false;
63681 +               if (ni->vfs_inode.i_size != new_size) {
63682 +                       ni->vfs_inode.i_size = new_size;
63683 +                       dirty = true;
63684 +               }
63686 +               if (attr_b && attr_b->non_res) {
63687 +                       new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
63688 +                       if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
63689 +                               inode_set_bytes(&ni->vfs_inode, new_alloc);
63690 +                               dirty = true;
63691 +                       }
63692 +               }
63694 +               if (dirty) {
63695 +                       ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
63696 +                       mark_inode_dirty(&ni->vfs_inode);
63697 +               }
63698 +       }
63700 +       return err;
63703 +int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
63704 +                       CLST *len, bool *new)
63706 +       int err = 0;
63707 +       struct runs_tree *run = &ni->file.run;
63708 +       struct ntfs_sb_info *sbi;
63709 +       u8 cluster_bits;
63710 +       struct ATTRIB *attr = NULL, *attr_b;
63711 +       struct ATTR_LIST_ENTRY *le, *le_b;
63712 +       struct mft_inode *mi, *mi_b;
63713 +       CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end;
63714 +       u64 total_size;
63715 +       u32 clst_per_frame;
63716 +       bool ok;
63718 +       if (new)
63719 +               *new = false;
63721 +       down_read(&ni->file.run_lock);
63722 +       ok = run_lookup_entry(run, vcn, lcn, len, NULL);
63723 +       up_read(&ni->file.run_lock);
63725 +       if (ok && (*lcn != SPARSE_LCN || !new)) {
63726 +               /* normal way */
63727 +               return 0;
63728 +       }
63730 +       if (!clen)
63731 +               clen = 1;
63733 +       if (ok && clen > *len)
63734 +               clen = *len;
63736 +       sbi = ni->mi.sbi;
63737 +       cluster_bits = sbi->cluster_bits;
63739 +       ni_lock(ni);
63740 +       down_write(&ni->file.run_lock);
63742 +       le_b = NULL;
63743 +       attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
63744 +       if (!attr_b) {
63745 +               err = -ENOENT;
63746 +               goto out;
63747 +       }
63749 +       if (!attr_b->non_res) {
63750 +               *lcn = RESIDENT_LCN;
63751 +               *len = 1;
63752 +               goto out;
63753 +       }
63755 +       asize = le64_to_cpu(attr_b->nres.alloc_size) >> sbi->cluster_bits;
63756 +       if (vcn >= asize) {
63757 +               err = -EINVAL;
63758 +               goto out;
63759 +       }
63761 +       clst_per_frame = 1u << attr_b->nres.c_unit;
63762 +       to_alloc = (clen + clst_per_frame - 1) & ~(clst_per_frame - 1);
63764 +       if (vcn + to_alloc > asize)
63765 +               to_alloc = asize - vcn;
63767 +       svcn = le64_to_cpu(attr_b->nres.svcn);
63768 +       evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
63770 +       attr = attr_b;
63771 +       le = le_b;
63772 +       mi = mi_b;
63774 +       if (le_b && (vcn < svcn || evcn1 <= vcn)) {
63775 +               attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
63776 +                                   &mi);
63777 +               if (!attr) {
63778 +                       err = -EINVAL;
63779 +                       goto out;
63780 +               }
63781 +               svcn = le64_to_cpu(attr->nres.svcn);
63782 +               evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
63783 +       }
63785 +       err = attr_load_runs(attr, ni, run, NULL);
63786 +       if (err)
63787 +               goto out;
63789 +       if (!ok) {
63790 +               ok = run_lookup_entry(run, vcn, lcn, len, NULL);
63791 +               if (ok && (*lcn != SPARSE_LCN || !new)) {
63792 +                       /* normal way */
63793 +                       err = 0;
63794 +                       goto ok;
63795 +               }
63797 +               if (!ok && !new) {
63798 +                       *len = 0;
63799 +                       err = 0;
63800 +                       goto ok;
63801 +               }
63803 +               if (ok && clen > *len) {
63804 +                       clen = *len;
63805 +                       to_alloc = (clen + clst_per_frame - 1) &
63806 +                                  ~(clst_per_frame - 1);
63807 +               }
63808 +       }
63810 +       if (!is_attr_ext(attr_b)) {
63811 +               err = -EINVAL;
63812 +               goto out;
63813 +       }
63815 +       /* Get the last lcn to allocate from */
63816 +       hint = 0;
63818 +       if (vcn > evcn1) {
63819 +               if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1,
63820 +                                  false)) {
63821 +                       err = -ENOMEM;
63822 +                       goto out;
63823 +               }
63824 +       } else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) {
63825 +               hint = -1;
63826 +       }
63828 +       err = attr_allocate_clusters(
63829 +               sbi, run, vcn, hint + 1, to_alloc, NULL, 0, len,
63830 +               (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1,
63831 +               lcn);
63832 +       if (err)
63833 +               goto out;
63834 +       *new = true;
63836 +       end = vcn + *len;
63838 +       total_size = le64_to_cpu(attr_b->nres.total_size) +
63839 +                    ((u64)*len << cluster_bits);
63841 +repack:
63842 +       err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
63843 +       if (err)
63844 +               goto out;
63846 +       attr_b->nres.total_size = cpu_to_le64(total_size);
63847 +       inode_set_bytes(&ni->vfs_inode, total_size);
63848 +       ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
63850 +       mi_b->dirty = true;
63851 +       mark_inode_dirty(&ni->vfs_inode);
63853 +       /* stored [vcn : next_svcn) from [vcn : end) */
63854 +       next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
63856 +       if (end <= evcn1) {
63857 +               if (next_svcn == evcn1) {
63858 +                       /* Normal way. update attribute and exit */
63859 +                       goto ok;
63860 +               }
63861 +               /* add new segment [next_svcn : evcn1 - next_svcn )*/
63862 +               if (!ni->attr_list.size) {
63863 +                       err = ni_create_attr_list(ni);
63864 +                       if (err)
63865 +                               goto out;
63866 +                       /* layout of records is changed */
63867 +                       le_b = NULL;
63868 +                       attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
63869 +                                             0, NULL, &mi_b);
63870 +                       if (!attr_b) {
63871 +                               err = -ENOENT;
63872 +                               goto out;
63873 +                       }
63875 +                       attr = attr_b;
63876 +                       le = le_b;
63877 +                       mi = mi_b;
63878 +                       goto repack;
63879 +               }
63880 +       }
63882 +       svcn = evcn1;
63884 +       /* Estimate next attribute */
63885 +       attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
63887 +       if (attr) {
63888 +               CLST alloc = bytes_to_cluster(
63889 +                       sbi, le64_to_cpu(attr_b->nres.alloc_size));
63890 +               CLST evcn = le64_to_cpu(attr->nres.evcn);
63892 +               if (end < next_svcn)
63893 +                       end = next_svcn;
63894 +               while (end > evcn) {
63895 +                       /* remove segment [svcn : evcn)*/
63896 +                       mi_remove_attr(mi, attr);
63898 +                       if (!al_remove_le(ni, le)) {
63899 +                               err = -EINVAL;
63900 +                               goto out;
63901 +                       }
63903 +                       if (evcn + 1 >= alloc) {
63904 +                               /* last attribute segment */
63905 +                               evcn1 = evcn + 1;
63906 +                               goto ins_ext;
63907 +                       }
63909 +                       if (ni_load_mi(ni, le, &mi)) {
63910 +                               attr = NULL;
63911 +                               goto out;
63912 +                       }
63914 +                       attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
63915 +                                           &le->id);
63916 +                       if (!attr) {
63917 +                               err = -EINVAL;
63918 +                               goto out;
63919 +                       }
63920 +                       svcn = le64_to_cpu(attr->nres.svcn);
63921 +                       evcn = le64_to_cpu(attr->nres.evcn);
63922 +               }
63924 +               if (end < svcn)
63925 +                       end = svcn;
63927 +               err = attr_load_runs(attr, ni, run, &end);
63928 +               if (err)
63929 +                       goto out;
63931 +               evcn1 = evcn + 1;
63932 +               attr->nres.svcn = cpu_to_le64(next_svcn);
63933 +               err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
63934 +               if (err)
63935 +                       goto out;
63937 +               le->vcn = cpu_to_le64(next_svcn);
63938 +               ni->attr_list.dirty = true;
63939 +               mi->dirty = true;
63941 +               next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
63942 +       }
63943 +ins_ext:
63944 +       if (evcn1 > next_svcn) {
63945 +               err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
63946 +                                           next_svcn, evcn1 - next_svcn,
63947 +                                           attr_b->flags, &attr, &mi);
63948 +               if (err)
63949 +                       goto out;
63950 +       }
63951 +ok:
63952 +       run_truncate_around(run, vcn);
63953 +out:
63954 +       up_write(&ni->file.run_lock);
63955 +       ni_unlock(ni);
63957 +       return err;
63960 +int attr_data_read_resident(struct ntfs_inode *ni, struct page *page)
63962 +       u64 vbo;
63963 +       struct ATTRIB *attr;
63964 +       u32 data_size;
63966 +       attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL);
63967 +       if (!attr)
63968 +               return -EINVAL;
63970 +       if (attr->non_res)
63971 +               return E_NTFS_NONRESIDENT;
63973 +       vbo = page->index << PAGE_SHIFT;
63974 +       data_size = le32_to_cpu(attr->res.data_size);
63975 +       if (vbo < data_size) {
63976 +               const char *data = resident_data(attr);
63977 +               char *kaddr = kmap_atomic(page);
63978 +               u32 use = data_size - vbo;
63980 +               if (use > PAGE_SIZE)
63981 +                       use = PAGE_SIZE;
63983 +               memcpy(kaddr, data + vbo, use);
63984 +               memset(kaddr + use, 0, PAGE_SIZE - use);
63985 +               kunmap_atomic(kaddr);
63986 +               flush_dcache_page(page);
63987 +               SetPageUptodate(page);
63988 +       } else if (!PageUptodate(page)) {
63989 +               zero_user_segment(page, 0, PAGE_SIZE);
63990 +               SetPageUptodate(page);
63991 +       }
63993 +       return 0;
63996 +int attr_data_write_resident(struct ntfs_inode *ni, struct page *page)
63998 +       u64 vbo;
63999 +       struct mft_inode *mi;
64000 +       struct ATTRIB *attr;
64001 +       u32 data_size;
64003 +       attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
64004 +       if (!attr)
64005 +               return -EINVAL;
64007 +       if (attr->non_res) {
64008 +               /*return special error code to check this case*/
64009 +               return E_NTFS_NONRESIDENT;
64010 +       }
64012 +       vbo = page->index << PAGE_SHIFT;
64013 +       data_size = le32_to_cpu(attr->res.data_size);
64014 +       if (vbo < data_size) {
64015 +               char *data = resident_data(attr);
64016 +               char *kaddr = kmap_atomic(page);
64017 +               u32 use = data_size - vbo;
64019 +               if (use > PAGE_SIZE)
64020 +                       use = PAGE_SIZE;
64021 +               memcpy(data + vbo, kaddr, use);
64022 +               kunmap_atomic(kaddr);
64023 +               mi->dirty = true;
64024 +       }
64025 +       ni->i_valid = data_size;
64027 +       return 0;
64031 + * attr_load_runs_vcn
64032 + *
64033 + * load runs with vcn
64034 + */
64035 +int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
64036 +                      const __le16 *name, u8 name_len, struct runs_tree *run,
64037 +                      CLST vcn)
64039 +       struct ATTRIB *attr;
64040 +       int err;
64041 +       CLST svcn, evcn;
64042 +       u16 ro;
64044 +       attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
64045 +       if (!attr)
64046 +               return -ENOENT;
64048 +       svcn = le64_to_cpu(attr->nres.svcn);
64049 +       evcn = le64_to_cpu(attr->nres.evcn);
64051 +       if (evcn < vcn || vcn < svcn)
64052 +               return -EINVAL;
64054 +       ro = le16_to_cpu(attr->nres.run_off);
64055 +       err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
64056 +                           Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
64057 +       if (err < 0)
64058 +               return err;
64059 +       return 0;
64063 + * load runs for given range [from to)
64064 + */
64065 +int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
64066 +                        const __le16 *name, u8 name_len, struct runs_tree *run,
64067 +                        u64 from, u64 to)
64069 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
64070 +       u8 cluster_bits = sbi->cluster_bits;
64071 +       CLST vcn = from >> cluster_bits;
64072 +       CLST vcn_last = (to - 1) >> cluster_bits;
64073 +       CLST lcn, clen;
64074 +       int err;
64076 +       for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) {
64077 +               if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) {
64078 +                       err = attr_load_runs_vcn(ni, type, name, name_len, run,
64079 +                                                vcn);
64080 +                       if (err)
64081 +                               return err;
64082 +                       clen = 0; /*next run_lookup_entry(vcn) must be success*/
64083 +               }
64084 +       }
64086 +       return 0;
64089 +#ifdef CONFIG_NTFS3_LZX_XPRESS
64091 + * attr_wof_frame_info
64092 + *
64093 + * read header of xpress/lzx file to get info about frame
64094 + */
64095 +int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
64096 +                       struct runs_tree *run, u64 frame, u64 frames,
64097 +                       u8 frame_bits, u32 *ondisk_size, u64 *vbo_data)
64099 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
64100 +       u64 vbo[2], off[2], wof_size;
64101 +       u32 voff;
64102 +       u8 bytes_per_off;
64103 +       char *addr;
64104 +       struct page *page;
64105 +       int i, err;
64106 +       __le32 *off32;
64107 +       __le64 *off64;
64109 +       if (ni->vfs_inode.i_size < 0x100000000ull) {
64110 +               /* file starts with array of 32 bit offsets */
64111 +               bytes_per_off = sizeof(__le32);
64112 +               vbo[1] = frame << 2;
64113 +               *vbo_data = frames << 2;
64114 +       } else {
64115 +               /* file starts with array of 64 bit offsets */
64116 +               bytes_per_off = sizeof(__le64);
64117 +               vbo[1] = frame << 3;
64118 +               *vbo_data = frames << 3;
64119 +       }
64121 +       /*
64122 +        * read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts
64123 +        * read 4/8 bytes at [vbo] == offset where compressed frame ends
64124 +        */
64125 +       if (!attr->non_res) {
64126 +               if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
64127 +                       ntfs_inode_err(&ni->vfs_inode, "is corrupted");
64128 +                       return -EINVAL;
64129 +               }
64130 +               addr = resident_data(attr);
64132 +               if (bytes_per_off == sizeof(__le32)) {
64133 +                       off32 = Add2Ptr(addr, vbo[1]);
64134 +                       off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0;
64135 +                       off[1] = le32_to_cpu(off32[0]);
64136 +               } else {
64137 +                       off64 = Add2Ptr(addr, vbo[1]);
64138 +                       off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0;
64139 +                       off[1] = le64_to_cpu(off64[0]);
64140 +               }
64142 +               *vbo_data += off[0];
64143 +               *ondisk_size = off[1] - off[0];
64144 +               return 0;
64145 +       }
64147 +       wof_size = le64_to_cpu(attr->nres.data_size);
64148 +       down_write(&ni->file.run_lock);
64149 +       page = ni->file.offs_page;
64150 +       if (!page) {
64151 +               page = alloc_page(GFP_KERNEL);
64152 +               if (!page) {
64153 +                       err = -ENOMEM;
64154 +                       goto out;
64155 +               }
64156 +               page->index = -1;
64157 +               ni->file.offs_page = page;
64158 +       }
64159 +       lock_page(page);
64160 +       addr = page_address(page);
64162 +       if (vbo[1]) {
64163 +               voff = vbo[1] & (PAGE_SIZE - 1);
64164 +               vbo[0] = vbo[1] - bytes_per_off;
64165 +               i = 0;
64166 +       } else {
64167 +               voff = 0;
64168 +               vbo[0] = 0;
64169 +               off[0] = 0;
64170 +               i = 1;
64171 +       }
64173 +       do {
64174 +               pgoff_t index = vbo[i] >> PAGE_SHIFT;
64176 +               if (index != page->index) {
64177 +                       u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
64178 +                       u64 to = min(from + PAGE_SIZE, wof_size);
64180 +                       err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
64181 +                                                  ARRAY_SIZE(WOF_NAME), run,
64182 +                                                  from, to);
64183 +                       if (err)
64184 +                               goto out1;
64186 +                       err = ntfs_bio_pages(sbi, run, &page, 1, from,
64187 +                                            to - from, REQ_OP_READ);
64188 +                       if (err) {
64189 +                               page->index = -1;
64190 +                               goto out1;
64191 +                       }
64192 +                       page->index = index;
64193 +               }
64195 +               if (i) {
64196 +                       if (bytes_per_off == sizeof(__le32)) {
64197 +                               off32 = Add2Ptr(addr, voff);
64198 +                               off[1] = le32_to_cpu(*off32);
64199 +                       } else {
64200 +                               off64 = Add2Ptr(addr, voff);
64201 +                               off[1] = le64_to_cpu(*off64);
64202 +                       }
64203 +               } else if (!voff) {
64204 +                       if (bytes_per_off == sizeof(__le32)) {
64205 +                               off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32));
64206 +                               off[0] = le32_to_cpu(*off32);
64207 +                       } else {
64208 +                               off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64));
64209 +                               off[0] = le64_to_cpu(*off64);
64210 +                       }
64211 +               } else {
64212 +                       /* two values in one page*/
64213 +                       if (bytes_per_off == sizeof(__le32)) {
64214 +                               off32 = Add2Ptr(addr, voff);
64215 +                               off[0] = le32_to_cpu(off32[-1]);
64216 +                               off[1] = le32_to_cpu(off32[0]);
64217 +                       } else {
64218 +                               off64 = Add2Ptr(addr, voff);
64219 +                               off[0] = le64_to_cpu(off64[-1]);
64220 +                               off[1] = le64_to_cpu(off64[0]);
64221 +                       }
64222 +                       break;
64223 +               }
64224 +       } while (++i < 2);
64226 +       *vbo_data += off[0];
64227 +       *ondisk_size = off[1] - off[0];
64229 +out1:
64230 +       unlock_page(page);
64231 +out:
64232 +       up_write(&ni->file.run_lock);
64233 +       return err;
64235 +#endif
64238 + * attr_is_frame_compressed
64239 + *
64240 + * This function is used to detect compressed frame
64241 + */
64242 +int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
64243 +                            CLST frame, CLST *clst_data)
64245 +       int err;
64246 +       u32 clst_frame;
64247 +       CLST clen, lcn, vcn, alen, slen, vcn_next;
64248 +       size_t idx;
64249 +       struct runs_tree *run;
64251 +       *clst_data = 0;
64253 +       if (!is_attr_compressed(attr))
64254 +               return 0;
64256 +       if (!attr->non_res)
64257 +               return 0;
64259 +       clst_frame = 1u << attr->nres.c_unit;
64260 +       vcn = frame * clst_frame;
64261 +       run = &ni->file.run;
64263 +       if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
64264 +               err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
64265 +                                        attr->name_len, run, vcn);
64266 +               if (err)
64267 +                       return err;
64269 +               if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
64270 +                       return -EINVAL;
64271 +       }
64273 +       if (lcn == SPARSE_LCN) {
64274 +               /* sparsed frame */
64275 +               return 0;
64276 +       }
64278 +       if (clen >= clst_frame) {
64279 +               /*
64280 +                * The frame is not compressed 'cause
64281 +                * it does not contain any sparse clusters
64282 +                */
64283 +               *clst_data = clst_frame;
64284 +               return 0;
64285 +       }
64287 +       alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size));
64288 +       slen = 0;
64289 +       *clst_data = clen;
64291 +       /*
64292 +        * The frame is compressed if *clst_data + slen >= clst_frame
64293 +        * Check next fragments
64294 +        */
64295 +       while ((vcn += clen) < alen) {
64296 +               vcn_next = vcn;
64298 +               if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
64299 +                   vcn_next != vcn) {
64300 +                       err = attr_load_runs_vcn(ni, attr->type,
64301 +                                                attr_name(attr),
64302 +                                                attr->name_len, run, vcn_next);
64303 +                       if (err)
64304 +                               return err;
64305 +                       vcn = vcn_next;
64307 +                       if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
64308 +                               return -EINVAL;
64309 +               }
64311 +               if (lcn == SPARSE_LCN) {
64312 +                       slen += clen;
64313 +               } else {
64314 +                       if (slen) {
64315 +                               /*
64316 +                                * data_clusters + sparse_clusters =
64317 +                                * not enough for frame
64318 +                                */
64319 +                               return -EINVAL;
64320 +                       }
64321 +                       *clst_data += clen;
64322 +               }
64324 +               if (*clst_data + slen >= clst_frame) {
64325 +                       if (!slen) {
64326 +                               /*
64327 +                                * There is no sparsed clusters in this frame
64328 +                                * So it is not compressed
64329 +                                */
64330 +                               *clst_data = clst_frame;
64331 +                       } else {
64332 +                               /*frame is compressed*/
64333 +                       }
64334 +                       break;
64335 +               }
64336 +       }
64338 +       return 0;
64342 + * attr_allocate_frame
64343 + *
64344 + * allocate/free clusters for 'frame'
64345 + * assumed: down_write(&ni->file.run_lock);
64346 + */
64347 +int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
64348 +                       u64 new_valid)
64350 +       int err = 0;
64351 +       struct runs_tree *run = &ni->file.run;
64352 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
64353 +       struct ATTRIB *attr = NULL, *attr_b;
64354 +       struct ATTR_LIST_ENTRY *le, *le_b;
64355 +       struct mft_inode *mi, *mi_b;
64356 +       CLST svcn, evcn1, next_svcn, lcn, len;
64357 +       CLST vcn, end, clst_data;
64358 +       u64 total_size, valid_size, data_size;
64360 +       le_b = NULL;
64361 +       attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
64362 +       if (!attr_b)
64363 +               return -ENOENT;
64365 +       if (!is_attr_ext(attr_b))
64366 +               return -EINVAL;
64368 +       vcn = frame << NTFS_LZNT_CUNIT;
64369 +       total_size = le64_to_cpu(attr_b->nres.total_size);
64371 +       svcn = le64_to_cpu(attr_b->nres.svcn);
64372 +       evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
64373 +       data_size = le64_to_cpu(attr_b->nres.data_size);
64375 +       if (svcn <= vcn && vcn < evcn1) {
64376 +               attr = attr_b;
64377 +               le = le_b;
64378 +               mi = mi_b;
64379 +       } else if (!le_b) {
64380 +               err = -EINVAL;
64381 +               goto out;
64382 +       } else {
64383 +               le = le_b;
64384 +               attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
64385 +                                   &mi);
64386 +               if (!attr) {
64387 +                       err = -EINVAL;
64388 +                       goto out;
64389 +               }
64390 +               svcn = le64_to_cpu(attr->nres.svcn);
64391 +               evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
64392 +       }
64394 +       err = attr_load_runs(attr, ni, run, NULL);
64395 +       if (err)
64396 +               goto out;
64398 +       err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data);
64399 +       if (err)
64400 +               goto out;
64402 +       total_size -= (u64)clst_data << sbi->cluster_bits;
64404 +       len = bytes_to_cluster(sbi, compr_size);
64406 +       if (len == clst_data)
64407 +               goto out;
64409 +       if (len < clst_data) {
64410 +               err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len,
64411 +                                       NULL, true);
64412 +               if (err)
64413 +                       goto out;
64415 +               if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len,
64416 +                                  false)) {
64417 +                       err = -ENOMEM;
64418 +                       goto out;
64419 +               }
64420 +               end = vcn + clst_data;
64421 +               /* run contains updated range [vcn + len : end) */
64422 +       } else {
64423 +               CLST alen, hint = 0;
64424 +               /* Get the last lcn to allocate from */
64425 +               if (vcn + clst_data &&
64426 +                   !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL,
64427 +                                     NULL)) {
64428 +                       hint = -1;
64429 +               }
64431 +               err = attr_allocate_clusters(sbi, run, vcn + clst_data,
64432 +                                            hint + 1, len - clst_data, NULL, 0,
64433 +                                            &alen, 0, &lcn);
64434 +               if (err)
64435 +                       goto out;
64437 +               end = vcn + len;
64438 +               /* run contains updated range [vcn + clst_data : end) */
64439 +       }
64441 +       total_size += (u64)len << sbi->cluster_bits;
64443 +repack:
64444 +       err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
64445 +       if (err)
64446 +               goto out;
64448 +       attr_b->nres.total_size = cpu_to_le64(total_size);
64449 +       inode_set_bytes(&ni->vfs_inode, total_size);
64451 +       mi_b->dirty = true;
64452 +       mark_inode_dirty(&ni->vfs_inode);
64454 +       /* stored [vcn : next_svcn) from [vcn : end) */
64455 +       next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
64457 +       if (end <= evcn1) {
64458 +               if (next_svcn == evcn1) {
64459 +                       /* Normal way. update attribute and exit */
64460 +                       goto ok;
64461 +               }
64462 +               /* add new segment [next_svcn : evcn1 - next_svcn )*/
64463 +               if (!ni->attr_list.size) {
64464 +                       err = ni_create_attr_list(ni);
64465 +                       if (err)
64466 +                               goto out;
64467 +                       /* layout of records is changed */
64468 +                       le_b = NULL;
64469 +                       attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
64470 +                                             0, NULL, &mi_b);
64471 +                       if (!attr_b) {
64472 +                               err = -ENOENT;
64473 +                               goto out;
64474 +                       }
64476 +                       attr = attr_b;
64477 +                       le = le_b;
64478 +                       mi = mi_b;
64479 +                       goto repack;
64480 +               }
64481 +       }
64483 +       svcn = evcn1;
64485 +       /* Estimate next attribute */
64486 +       attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
64488 +       if (attr) {
64489 +               CLST alloc = bytes_to_cluster(
64490 +                       sbi, le64_to_cpu(attr_b->nres.alloc_size));
64491 +               CLST evcn = le64_to_cpu(attr->nres.evcn);
64493 +               if (end < next_svcn)
64494 +                       end = next_svcn;
64495 +               while (end > evcn) {
64496 +                       /* remove segment [svcn : evcn)*/
64497 +                       mi_remove_attr(mi, attr);
64499 +                       if (!al_remove_le(ni, le)) {
64500 +                               err = -EINVAL;
64501 +                               goto out;
64502 +                       }
64504 +                       if (evcn + 1 >= alloc) {
64505 +                               /* last attribute segment */
64506 +                               evcn1 = evcn + 1;
64507 +                               goto ins_ext;
64508 +                       }
64510 +                       if (ni_load_mi(ni, le, &mi)) {
64511 +                               attr = NULL;
64512 +                               goto out;
64513 +                       }
64515 +                       attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
64516 +                                           &le->id);
64517 +                       if (!attr) {
64518 +                               err = -EINVAL;
64519 +                               goto out;
64520 +                       }
64521 +                       svcn = le64_to_cpu(attr->nres.svcn);
64522 +                       evcn = le64_to_cpu(attr->nres.evcn);
64523 +               }
64525 +               if (end < svcn)
64526 +                       end = svcn;
64528 +               err = attr_load_runs(attr, ni, run, &end);
64529 +               if (err)
64530 +                       goto out;
64532 +               evcn1 = evcn + 1;
64533 +               attr->nres.svcn = cpu_to_le64(next_svcn);
64534 +               err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
64535 +               if (err)
64536 +                       goto out;
64538 +               le->vcn = cpu_to_le64(next_svcn);
64539 +               ni->attr_list.dirty = true;
64540 +               mi->dirty = true;
64542 +               next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
64543 +       }
64544 +ins_ext:
64545 +       if (evcn1 > next_svcn) {
64546 +               err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
64547 +                                           next_svcn, evcn1 - next_svcn,
64548 +                                           attr_b->flags, &attr, &mi);
64549 +               if (err)
64550 +                       goto out;
64551 +       }
64552 +ok:
64553 +       run_truncate_around(run, vcn);
64554 +out:
64555 +       if (new_valid > data_size)
64556 +               new_valid = data_size;
64558 +       valid_size = le64_to_cpu(attr_b->nres.valid_size);
64559 +       if (new_valid != valid_size) {
64560 +               attr_b->nres.valid_size = cpu_to_le64(valid_size);
64561 +               mi_b->dirty = true;
64562 +       }
64564 +       return err;
64567 +/* Collapse range in file */
64568 +int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
64570 +       int err = 0;
64571 +       struct runs_tree *run = &ni->file.run;
64572 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
64573 +       struct ATTRIB *attr = NULL, *attr_b;
64574 +       struct ATTR_LIST_ENTRY *le, *le_b;
64575 +       struct mft_inode *mi, *mi_b;
64576 +       CLST svcn, evcn1, len, dealloc, alen;
64577 +       CLST vcn, end;
64578 +       u64 valid_size, data_size, alloc_size, total_size;
64579 +       u32 mask;
64580 +       __le16 a_flags;
64582 +       if (!bytes)
64583 +               return 0;
64585 +       le_b = NULL;
64586 +       attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
64587 +       if (!attr_b)
64588 +               return -ENOENT;
64590 +       if (!attr_b->non_res) {
64591 +               /* Attribute is resident. Nothing to do? */
64592 +               return 0;
64593 +       }
64595 +       data_size = le64_to_cpu(attr_b->nres.data_size);
64596 +       alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
64597 +       a_flags = attr_b->flags;
64599 +       if (is_attr_ext(attr_b)) {
64600 +               total_size = le64_to_cpu(attr_b->nres.total_size);
64601 +               mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
64602 +       } else {
64603 +               total_size = alloc_size;
64604 +               mask = sbi->cluster_mask;
64605 +       }
64607 +       if ((vbo & mask) || (bytes & mask)) {
64608 +               /* allow to collapse only cluster aligned ranges */
64609 +               return -EINVAL;
64610 +       }
64612 +       if (vbo > data_size)
64613 +               return -EINVAL;
64615 +       down_write(&ni->file.run_lock);
64617 +       if (vbo + bytes >= data_size) {
64618 +               u64 new_valid = min(ni->i_valid, vbo);
64620 +               /* Simple truncate file at 'vbo' */
64621 +               truncate_setsize(&ni->vfs_inode, vbo);
64622 +               err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo,
64623 +                                   &new_valid, true, NULL);
64625 +               if (!err && new_valid < ni->i_valid)
64626 +                       ni->i_valid = new_valid;
64628 +               goto out;
64629 +       }
64631 +       /*
64632 +        * Enumerate all attribute segments and collapse
64633 +        */
64634 +       alen = alloc_size >> sbi->cluster_bits;
64635 +       vcn = vbo >> sbi->cluster_bits;
64636 +       len = bytes >> sbi->cluster_bits;
64637 +       end = vcn + len;
64638 +       dealloc = 0;
64640 +       svcn = le64_to_cpu(attr_b->nres.svcn);
64641 +       evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
64643 +       if (svcn <= vcn && vcn < evcn1) {
64644 +               attr = attr_b;
64645 +               le = le_b;
64646 +               mi = mi_b;
64647 +       } else if (!le_b) {
64648 +               err = -EINVAL;
64649 +               goto out;
64650 +       } else {
64651 +               le = le_b;
64652 +               attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
64653 +                                   &mi);
64654 +               if (!attr) {
64655 +                       err = -EINVAL;
64656 +                       goto out;
64657 +               }
64659 +               svcn = le64_to_cpu(attr->nres.svcn);
64660 +               evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
64661 +       }
64663 +       for (;;) {
64664 +               if (svcn >= end) {
64665 +                       /* shift vcn */
64666 +                       attr->nres.svcn = cpu_to_le64(svcn - len);
64667 +                       attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len);
64668 +                       if (le) {
64669 +                               le->vcn = attr->nres.svcn;
64670 +                               ni->attr_list.dirty = true;
64671 +                       }
64672 +                       mi->dirty = true;
64673 +               } else if (svcn < vcn || end < evcn1) {
64674 +                       CLST vcn1, eat, next_svcn;
64676 +                       /* collapse a part of this attribute segment */
64677 +                       err = attr_load_runs(attr, ni, run, &svcn);
64678 +                       if (err)
64679 +                               goto out;
64680 +                       vcn1 = max(vcn, svcn);
64681 +                       eat = min(end, evcn1) - vcn1;
64683 +                       err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc,
64684 +                                               true);
64685 +                       if (err)
64686 +                               goto out;
64688 +                       if (!run_collapse_range(run, vcn1, eat)) {
64689 +                               err = -ENOMEM;
64690 +                               goto out;
64691 +                       }
64693 +                       if (svcn >= vcn) {
64694 +                               /* shift vcn */
64695 +                               attr->nres.svcn = cpu_to_le64(vcn);
64696 +                               if (le) {
64697 +                                       le->vcn = attr->nres.svcn;
64698 +                                       ni->attr_list.dirty = true;
64699 +                               }
64700 +                       }
64702 +                       err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat);
64703 +                       if (err)
64704 +                               goto out;
64706 +                       next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
64707 +                       if (next_svcn + eat < evcn1) {
64708 +                               err = ni_insert_nonresident(
64709 +                                       ni, ATTR_DATA, NULL, 0, run, next_svcn,
64710 +                                       evcn1 - eat - next_svcn, a_flags, &attr,
64711 +                                       &mi);
64712 +                               if (err)
64713 +                                       goto out;
64715 +                               /* layout of records maybe changed */
64716 +                               attr_b = NULL;
64717 +                               le = al_find_ex(ni, NULL, ATTR_DATA, NULL, 0,
64718 +                                               &next_svcn);
64719 +                               if (!le) {
64720 +                                       err = -EINVAL;
64721 +                                       goto out;
64722 +                               }
64723 +                       }
64725 +                       /* free all allocated memory */
64726 +                       run_truncate(run, 0);
64727 +               } else {
64728 +                       u16 le_sz;
64729 +                       u16 roff = le16_to_cpu(attr->nres.run_off);
64731 +                       /*run==1 means unpack and deallocate*/
64732 +                       run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
64733 +                                     evcn1 - 1, svcn, Add2Ptr(attr, roff),
64734 +                                     le32_to_cpu(attr->size) - roff);
64736 +                       /* delete this attribute segment */
64737 +                       mi_remove_attr(mi, attr);
64738 +                       if (!le)
64739 +                               break;
64741 +                       le_sz = le16_to_cpu(le->size);
64742 +                       if (!al_remove_le(ni, le)) {
64743 +                               err = -EINVAL;
64744 +                               goto out;
64745 +                       }
64747 +                       if (evcn1 >= alen)
64748 +                               break;
64750 +                       if (!svcn) {
64751 +                               /* Load next record that contains this attribute */
64752 +                               if (ni_load_mi(ni, le, &mi)) {
64753 +                                       err = -EINVAL;
64754 +                                       goto out;
64755 +                               }
64757 +                               /* Look for required attribute */
64758 +                               attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL,
64759 +                                                   0, &le->id);
64760 +                               if (!attr) {
64761 +                                       err = -EINVAL;
64762 +                                       goto out;
64763 +                               }
64764 +                               goto next_attr;
64765 +                       }
64766 +                       le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
64767 +               }
64769 +               if (evcn1 >= alen)
64770 +                       break;
64772 +               attr = ni_enum_attr_ex(ni, attr, &le, &mi);
64773 +               if (!attr) {
64774 +                       err = -EINVAL;
64775 +                       goto out;
64776 +               }
64778 +next_attr:
64779 +               svcn = le64_to_cpu(attr->nres.svcn);
64780 +               evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
64781 +       }
64783 +       if (!attr_b) {
64784 +               le_b = NULL;
64785 +               attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
64786 +                                     &mi_b);
64787 +               if (!attr_b) {
64788 +                       err = -ENOENT;
64789 +                       goto out;
64790 +               }
64791 +       }
64793 +       data_size -= bytes;
64794 +       valid_size = ni->i_valid;
64795 +       if (vbo + bytes <= valid_size)
64796 +               valid_size -= bytes;
64797 +       else if (vbo < valid_size)
64798 +               valid_size = vbo;
64800 +       attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes);
64801 +       attr_b->nres.data_size = cpu_to_le64(data_size);
64802 +       attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size));
64803 +       total_size -= (u64)dealloc << sbi->cluster_bits;
64804 +       if (is_attr_ext(attr_b))
64805 +               attr_b->nres.total_size = cpu_to_le64(total_size);
64806 +       mi_b->dirty = true;
64808 +       /*update inode size*/
64809 +       ni->i_valid = valid_size;
64810 +       ni->vfs_inode.i_size = data_size;
64811 +       inode_set_bytes(&ni->vfs_inode, total_size);
64812 +       ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
64813 +       mark_inode_dirty(&ni->vfs_inode);
64815 +out:
64816 +       up_write(&ni->file.run_lock);
64817 +       if (err)
64818 +               make_bad_inode(&ni->vfs_inode);
64820 +       return err;
64823 +/* not for normal files */
64824 +int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes)
64826 +       int err = 0;
64827 +       struct runs_tree *run = &ni->file.run;
64828 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
64829 +       struct ATTRIB *attr = NULL, *attr_b;
64830 +       struct ATTR_LIST_ENTRY *le, *le_b;
64831 +       struct mft_inode *mi, *mi_b;
64832 +       CLST svcn, evcn1, vcn, len, end, alen, dealloc;
64833 +       u64 total_size, alloc_size;
64835 +       if (!bytes)
64836 +               return 0;
64838 +       le_b = NULL;
64839 +       attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
64840 +       if (!attr_b)
64841 +               return -ENOENT;
64843 +       if (!attr_b->non_res) {
64844 +               u32 data_size = le32_to_cpu(attr->res.data_size);
64845 +               u32 from, to;
64847 +               if (vbo > data_size)
64848 +                       return 0;
64850 +               from = vbo;
64851 +               to = (vbo + bytes) < data_size ? (vbo + bytes) : data_size;
64852 +               memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
64853 +               return 0;
64854 +       }
64856 +       /* TODO: add support for normal files too */
64857 +       if (!is_attr_ext(attr_b))
64858 +               return -EOPNOTSUPP;
64860 +       alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
64861 +       total_size = le64_to_cpu(attr_b->nres.total_size);
64863 +       if (vbo >= alloc_size) {
64864 +               // NOTE: it is allowed
64865 +               return 0;
64866 +       }
64868 +       if (vbo + bytes > alloc_size)
64869 +               bytes = alloc_size - vbo;
64871 +       down_write(&ni->file.run_lock);
64872 +       /*
64873 +        * Enumerate all attribute segments and punch hole where necessary
64874 +        */
64875 +       alen = alloc_size >> sbi->cluster_bits;
64876 +       vcn = vbo >> sbi->cluster_bits;
64877 +       len = bytes >> sbi->cluster_bits;
64878 +       end = vcn + len;
64879 +       dealloc = 0;
64881 +       svcn = le64_to_cpu(attr_b->nres.svcn);
64882 +       evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
64884 +       if (svcn <= vcn && vcn < evcn1) {
64885 +               attr = attr_b;
64886 +               le = le_b;
64887 +               mi = mi_b;
64888 +       } else if (!le_b) {
64889 +               err = -EINVAL;
64890 +               goto out;
64891 +       } else {
64892 +               le = le_b;
64893 +               attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
64894 +                                   &mi);
64895 +               if (!attr) {
64896 +                       err = -EINVAL;
64897 +                       goto out;
64898 +               }
64900 +               svcn = le64_to_cpu(attr->nres.svcn);
64901 +               evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
64902 +       }
64904 +       while (svcn < end) {
64905 +               CLST vcn1, zero, dealloc2;
64907 +               err = attr_load_runs(attr, ni, run, &svcn);
64908 +               if (err)
64909 +                       goto out;
64910 +               vcn1 = max(vcn, svcn);
64911 +               zero = min(end, evcn1) - vcn1;
64913 +               dealloc2 = dealloc;
64914 +               err = run_deallocate_ex(sbi, run, vcn1, zero, &dealloc, true);
64915 +               if (err)
64916 +                       goto out;
64918 +               if (dealloc2 == dealloc) {
64919 +                       /* looks like  the required range is already sparsed */
64920 +               } else {
64921 +                       if (!run_add_entry(run, vcn1, SPARSE_LCN, zero,
64922 +                                          false)) {
64923 +                               err = -ENOMEM;
64924 +                               goto out;
64925 +                       }
64927 +                       err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
64928 +                       if (err)
64929 +                               goto out;
64930 +               }
64931 +               /* free all allocated memory */
64932 +               run_truncate(run, 0);
64934 +               if (evcn1 >= alen)
64935 +                       break;
64937 +               attr = ni_enum_attr_ex(ni, attr, &le, &mi);
64938 +               if (!attr) {
64939 +                       err = -EINVAL;
64940 +                       goto out;
64941 +               }
64943 +               svcn = le64_to_cpu(attr->nres.svcn);
64944 +               evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
64945 +       }
64947 +       total_size -= (u64)dealloc << sbi->cluster_bits;
64948 +       attr_b->nres.total_size = cpu_to_le64(total_size);
64949 +       mi_b->dirty = true;
64951 +       /*update inode size*/
64952 +       inode_set_bytes(&ni->vfs_inode, total_size);
64953 +       ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
64954 +       mark_inode_dirty(&ni->vfs_inode);
64956 +out:
64957 +       up_write(&ni->file.run_lock);
64958 +       if (err)
64959 +               make_bad_inode(&ni->vfs_inode);
64961 +       return err;
64963 diff --git a/fs/ntfs3/attrlist.c b/fs/ntfs3/attrlist.c
64964 new file mode 100644
64965 index 000000000000..ea561361b576
64966 --- /dev/null
64967 +++ b/fs/ntfs3/attrlist.c
64968 @@ -0,0 +1,456 @@
64969 +// SPDX-License-Identifier: GPL-2.0
64971 + *
64972 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
64973 + *
64974 + */
64976 +#include <linux/blkdev.h>
64977 +#include <linux/buffer_head.h>
64978 +#include <linux/fs.h>
64979 +#include <linux/nls.h>
64981 +#include "debug.h"
64982 +#include "ntfs.h"
64983 +#include "ntfs_fs.h"
64985 +/* Returns true if le is valid */
64986 +static inline bool al_is_valid_le(const struct ntfs_inode *ni,
64987 +                                 struct ATTR_LIST_ENTRY *le)
64989 +       if (!le || !ni->attr_list.le || !ni->attr_list.size)
64990 +               return false;
64992 +       return PtrOffset(ni->attr_list.le, le) + le16_to_cpu(le->size) <=
64993 +              ni->attr_list.size;
64996 +void al_destroy(struct ntfs_inode *ni)
64998 +       run_close(&ni->attr_list.run);
64999 +       ntfs_free(ni->attr_list.le);
65000 +       ni->attr_list.le = NULL;
65001 +       ni->attr_list.size = 0;
65002 +       ni->attr_list.dirty = false;
65006 + * ntfs_load_attr_list
65007 + *
65008 + * This method makes sure that the ATTRIB list, if present,
65009 + * has been properly set up.
65010 + */
65011 +int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr)
65013 +       int err;
65014 +       size_t lsize;
65015 +       void *le = NULL;
65017 +       if (ni->attr_list.size)
65018 +               return 0;
65020 +       if (!attr->non_res) {
65021 +               lsize = le32_to_cpu(attr->res.data_size);
65022 +               le = ntfs_malloc(al_aligned(lsize));
65023 +               if (!le) {
65024 +                       err = -ENOMEM;
65025 +                       goto out;
65026 +               }
65027 +               memcpy(le, resident_data(attr), lsize);
65028 +       } else if (attr->nres.svcn) {
65029 +               err = -EINVAL;
65030 +               goto out;
65031 +       } else {
65032 +               u16 run_off = le16_to_cpu(attr->nres.run_off);
65034 +               lsize = le64_to_cpu(attr->nres.data_size);
65036 +               run_init(&ni->attr_list.run);
65038 +               err = run_unpack_ex(&ni->attr_list.run, ni->mi.sbi, ni->mi.rno,
65039 +                                   0, le64_to_cpu(attr->nres.evcn), 0,
65040 +                                   Add2Ptr(attr, run_off),
65041 +                                   le32_to_cpu(attr->size) - run_off);
65042 +               if (err < 0)
65043 +                       goto out;
65045 +               le = ntfs_malloc(al_aligned(lsize));
65046 +               if (!le) {
65047 +                       err = -ENOMEM;
65048 +                       goto out;
65049 +               }
65051 +               err = ntfs_read_run_nb(ni->mi.sbi, &ni->attr_list.run, 0, le,
65052 +                                      lsize, NULL);
65053 +               if (err)
65054 +                       goto out;
65055 +       }
65057 +       ni->attr_list.size = lsize;
65058 +       ni->attr_list.le = le;
65060 +       return 0;
65062 +out:
65063 +       ni->attr_list.le = le;
65064 +       al_destroy(ni);
65066 +       return err;
65070 + * al_enumerate
65071 + *
65072 + * Returns the next list 'le'
65073 + * if 'le' is NULL then returns the first 'le'
65074 + */
65075 +struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni,
65076 +                                    struct ATTR_LIST_ENTRY *le)
65078 +       size_t off;
65079 +       u16 sz;
65081 +       if (!le) {
65082 +               le = ni->attr_list.le;
65083 +       } else {
65084 +               sz = le16_to_cpu(le->size);
65085 +               if (sz < sizeof(struct ATTR_LIST_ENTRY)) {
65086 +                       /* Impossible 'cause we should not return such 'le' */
65087 +                       return NULL;
65088 +               }
65089 +               le = Add2Ptr(le, sz);
65090 +       }
65092 +       /* Check boundary */
65093 +       off = PtrOffset(ni->attr_list.le, le);
65094 +       if (off + sizeof(struct ATTR_LIST_ENTRY) > ni->attr_list.size) {
65095 +               // The regular end of list
65096 +               return NULL;
65097 +       }
65099 +       sz = le16_to_cpu(le->size);
65101 +       /* Check 'le' for errors */
65102 +       if (sz < sizeof(struct ATTR_LIST_ENTRY) ||
65103 +           off + sz > ni->attr_list.size ||
65104 +           sz < le->name_off + le->name_len * sizeof(short)) {
65105 +               return NULL;
65106 +       }
65108 +       return le;
65112 + * al_find_le
65113 + *
65114 + * finds the first 'le' in the list which matches type, name and vcn
65115 + * Returns NULL if not found
65116 + */
65117 +struct ATTR_LIST_ENTRY *al_find_le(struct ntfs_inode *ni,
65118 +                                  struct ATTR_LIST_ENTRY *le,
65119 +                                  const struct ATTRIB *attr)
65121 +       CLST svcn = attr_svcn(attr);
65123 +       return al_find_ex(ni, le, attr->type, attr_name(attr), attr->name_len,
65124 +                         &svcn);
65128 + * al_find_ex
65129 + *
65130 + * finds the first 'le' in the list which matches type, name and vcn
65131 + * Returns NULL if not found
65132 + */
65133 +struct ATTR_LIST_ENTRY *al_find_ex(struct ntfs_inode *ni,
65134 +                                  struct ATTR_LIST_ENTRY *le,
65135 +                                  enum ATTR_TYPE type, const __le16 *name,
65136 +                                  u8 name_len, const CLST *vcn)
65138 +       struct ATTR_LIST_ENTRY *ret = NULL;
65139 +       u32 type_in = le32_to_cpu(type);
65141 +       while ((le = al_enumerate(ni, le))) {
65142 +               u64 le_vcn;
65143 +               int diff = le32_to_cpu(le->type) - type_in;
65145 +               /* List entries are sorted by type, name and vcn */
65146 +               if (diff < 0)
65147 +                       continue;
65149 +               if (diff > 0)
65150 +                       return ret;
65152 +               if (le->name_len != name_len)
65153 +                       continue;
65155 +               le_vcn = le64_to_cpu(le->vcn);
65156 +               if (!le_vcn) {
65157 +                       /*
65158 +                        * compare entry names only for entry with vcn == 0
65159 +                        */
65160 +                       diff = ntfs_cmp_names(le_name(le), name_len, name,
65161 +                                             name_len, ni->mi.sbi->upcase,
65162 +                                             true);
65163 +                       if (diff < 0)
65164 +                               continue;
65166 +                       if (diff > 0)
65167 +                               return ret;
65168 +               }
65170 +               if (!vcn)
65171 +                       return le;
65173 +               if (*vcn == le_vcn)
65174 +                       return le;
65176 +               if (*vcn < le_vcn)
65177 +                       return ret;
65179 +               ret = le;
65180 +       }
65182 +       return ret;
65186 + * al_find_le_to_insert
65187 + *
65188 + * finds the first list entry which matches type, name and vcn
65189 + */
65190 +static struct ATTR_LIST_ENTRY *al_find_le_to_insert(struct ntfs_inode *ni,
65191 +                                                   enum ATTR_TYPE type,
65192 +                                                   const __le16 *name,
65193 +                                                   u8 name_len, CLST vcn)
65195 +       struct ATTR_LIST_ENTRY *le = NULL, *prev;
65196 +       u32 type_in = le32_to_cpu(type);
65198 +       /* List entries are sorted by type, name, vcn */
65199 +       while ((le = al_enumerate(ni, prev = le))) {
65200 +               int diff = le32_to_cpu(le->type) - type_in;
65202 +               if (diff < 0)
65203 +                       continue;
65205 +               if (diff > 0)
65206 +                       return le;
65208 +               if (!le->vcn) {
65209 +                       /*
65210 +                        * compare entry names only for entry with vcn == 0
65211 +                        */
65212 +                       diff = ntfs_cmp_names(le_name(le), le->name_len, name,
65213 +                                             name_len, ni->mi.sbi->upcase,
65214 +                                             true);
65215 +                       if (diff < 0)
65216 +                               continue;
65218 +                       if (diff > 0)
65219 +                               return le;
65220 +               }
65222 +               if (le64_to_cpu(le->vcn) >= vcn)
65223 +                       return le;
65224 +       }
65226 +       return prev ? Add2Ptr(prev, le16_to_cpu(prev->size)) : ni->attr_list.le;
65230 + * al_add_le
65231 + *
65232 + * adds an "attribute list entry" to the list.
65233 + */
65234 +int al_add_le(struct ntfs_inode *ni, enum ATTR_TYPE type, const __le16 *name,
65235 +             u8 name_len, CLST svcn, __le16 id, const struct MFT_REF *ref,
65236 +             struct ATTR_LIST_ENTRY **new_le)
65238 +       int err;
65239 +       struct ATTRIB *attr;
65240 +       struct ATTR_LIST_ENTRY *le;
65241 +       size_t off;
65242 +       u16 sz;
65243 +       size_t asize, new_asize;
65244 +       u64 new_size;
65245 +       typeof(ni->attr_list) *al = &ni->attr_list;
65247 +       /*
65248 +        * Compute the size of the new 'le'
65249 +        */
65250 +       sz = le_size(name_len);
65251 +       new_size = al->size + sz;
65252 +       asize = al_aligned(al->size);
65253 +       new_asize = al_aligned(new_size);
65255 +       /* Scan forward to the point at which the new 'le' should be inserted. */
65256 +       le = al_find_le_to_insert(ni, type, name, name_len, svcn);
65257 +       off = PtrOffset(al->le, le);
65259 +       if (new_size > asize) {
65260 +               void *ptr = ntfs_malloc(new_asize);
65262 +               if (!ptr)
65263 +                       return -ENOMEM;
65265 +               memcpy(ptr, al->le, off);
65266 +               memcpy(Add2Ptr(ptr, off + sz), le, al->size - off);
65267 +               le = Add2Ptr(ptr, off);
65268 +               ntfs_free(al->le);
65269 +               al->le = ptr;
65270 +       } else {
65271 +               memmove(Add2Ptr(le, sz), le, al->size - off);
65272 +       }
65274 +       al->size = new_size;
65276 +       le->type = type;
65277 +       le->size = cpu_to_le16(sz);
65278 +       le->name_len = name_len;
65279 +       le->name_off = offsetof(struct ATTR_LIST_ENTRY, name);
65280 +       le->vcn = cpu_to_le64(svcn);
65281 +       le->ref = *ref;
65282 +       le->id = id;
65283 +       memcpy(le->name, name, sizeof(short) * name_len);
65285 +       al->dirty = true;
65287 +       err = attr_set_size(ni, ATTR_LIST, NULL, 0, &al->run, new_size,
65288 +                           &new_size, true, &attr);
65289 +       if (err)
65290 +               return err;
65292 +       if (attr && attr->non_res) {
65293 +               err = ntfs_sb_write_run(ni->mi.sbi, &al->run, 0, al->le,
65294 +                                       al->size);
65295 +               if (err)
65296 +                       return err;
65297 +       }
65299 +       al->dirty = false;
65300 +       *new_le = le;
65302 +       return 0;
65306 + * al_remove_le
65307 + *
65308 + * removes 'le' from attribute list
65309 + */
65310 +bool al_remove_le(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le)
65312 +       u16 size;
65313 +       size_t off;
65314 +       typeof(ni->attr_list) *al = &ni->attr_list;
65316 +       if (!al_is_valid_le(ni, le))
65317 +               return false;
65319 +       /* Save on stack the size of 'le' */
65320 +       size = le16_to_cpu(le->size);
65321 +       off = PtrOffset(al->le, le);
65323 +       memmove(le, Add2Ptr(le, size), al->size - (off + size));
65325 +       al->size -= size;
65326 +       al->dirty = true;
65328 +       return true;
65332 + * al_delete_le
65333 + *
65334 + * deletes from the list the first 'le' which matches its parameters.
65335 + */
65336 +bool al_delete_le(struct ntfs_inode *ni, enum ATTR_TYPE type, CLST vcn,
65337 +                 const __le16 *name, size_t name_len,
65338 +                 const struct MFT_REF *ref)
65340 +       u16 size;
65341 +       struct ATTR_LIST_ENTRY *le;
65342 +       size_t off;
65343 +       typeof(ni->attr_list) *al = &ni->attr_list;
65345 +       /* Scan forward to the first 'le' that matches the input */
65346 +       le = al_find_ex(ni, NULL, type, name, name_len, &vcn);
65347 +       if (!le)
65348 +               return false;
65350 +       off = PtrOffset(al->le, le);
65352 +next:
65353 +       if (off >= al->size)
65354 +               return false;
65355 +       if (le->type != type)
65356 +               return false;
65357 +       if (le->name_len != name_len)
65358 +               return false;
65359 +       if (name_len && ntfs_cmp_names(le_name(le), name_len, name, name_len,
65360 +                                      ni->mi.sbi->upcase, true))
65361 +               return false;
65362 +       if (le64_to_cpu(le->vcn) != vcn)
65363 +               return false;
65365 +       /*
65366 +        * The caller specified a segment reference, so we have to
65367 +        * scan through the matching entries until we find that segment
65368 +        * reference or we run of matching entries.
65369 +        */
65370 +       if (ref && memcmp(ref, &le->ref, sizeof(*ref))) {
65371 +               off += le16_to_cpu(le->size);
65372 +               le = Add2Ptr(al->le, off);
65373 +               goto next;
65374 +       }
65376 +       /* Save on stack the size of 'le' */
65377 +       size = le16_to_cpu(le->size);
65378 +       /* Delete 'le'. */
65379 +       memmove(le, Add2Ptr(le, size), al->size - (off + size));
65381 +       al->size -= size;
65382 +       al->dirty = true;
65384 +       return true;
65388 + * al_update
65389 + */
65390 +int al_update(struct ntfs_inode *ni)
65392 +       int err;
65393 +       struct ATTRIB *attr;
65394 +       typeof(ni->attr_list) *al = &ni->attr_list;
65396 +       if (!al->dirty || !al->size)
65397 +               return 0;
65399 +       /*
65400 +        * attribute list increased on demand in al_add_le
65401 +        * attribute list decreased here
65402 +        */
65403 +       err = attr_set_size(ni, ATTR_LIST, NULL, 0, &al->run, al->size, NULL,
65404 +                           false, &attr);
65405 +       if (err)
65406 +               goto out;
65408 +       if (!attr->non_res) {
65409 +               memcpy(resident_data(attr), al->le, al->size);
65410 +       } else {
65411 +               err = ntfs_sb_write_run(ni->mi.sbi, &al->run, 0, al->le,
65412 +                                       al->size);
65413 +               if (err)
65414 +                       goto out;
65416 +               attr->nres.valid_size = attr->nres.data_size;
65417 +       }
65419 +       ni->mi.dirty = true;
65420 +       al->dirty = false;
65422 +out:
65423 +       return err;
65425 diff --git a/fs/ntfs3/bitfunc.c b/fs/ntfs3/bitfunc.c
65426 new file mode 100644
65427 index 000000000000..2de5faef2721
65428 --- /dev/null
65429 +++ b/fs/ntfs3/bitfunc.c
65430 @@ -0,0 +1,135 @@
65431 +// SPDX-License-Identifier: GPL-2.0
65433 + *
65434 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
65435 + *
65436 + */
65437 +#include <linux/blkdev.h>
65438 +#include <linux/buffer_head.h>
65439 +#include <linux/fs.h>
65440 +#include <linux/nls.h>
65442 +#include "debug.h"
65443 +#include "ntfs.h"
65444 +#include "ntfs_fs.h"
65446 +#define BITS_IN_SIZE_T (sizeof(size_t) * 8)
65449 + * fill_mask[i] - first i bits are '1' , i = 0,1,2,3,4,5,6,7,8
65450 + * fill_mask[i] = 0xFF >> (8-i)
65451 + */
65452 +static const u8 fill_mask[] = { 0x00, 0x01, 0x03, 0x07, 0x0F,
65453 +                               0x1F, 0x3F, 0x7F, 0xFF };
65456 + * zero_mask[i] - first i bits are '0' , i = 0,1,2,3,4,5,6,7,8
65457 + * zero_mask[i] = 0xFF << i
65458 + */
65459 +static const u8 zero_mask[] = { 0xFF, 0xFE, 0xFC, 0xF8, 0xF0,
65460 +                               0xE0, 0xC0, 0x80, 0x00 };
65463 + * are_bits_clear
65464 + *
65465 + * Returns true if all bits [bit, bit+nbits) are zeros "0"
65466 + */
65467 +bool are_bits_clear(const ulong *lmap, size_t bit, size_t nbits)
65469 +       size_t pos = bit & 7;
65470 +       const u8 *map = (u8 *)lmap + (bit >> 3);
65472 +       if (pos) {
65473 +               if (8 - pos >= nbits)
65474 +                       return !nbits || !(*map & fill_mask[pos + nbits] &
65475 +                                          zero_mask[pos]);
65477 +               if (*map++ & zero_mask[pos])
65478 +                       return false;
65479 +               nbits -= 8 - pos;
65480 +       }
65482 +       pos = ((size_t)map) & (sizeof(size_t) - 1);
65483 +       if (pos) {
65484 +               pos = sizeof(size_t) - pos;
65485 +               if (nbits >= pos * 8) {
65486 +                       for (nbits -= pos * 8; pos; pos--, map++) {
65487 +                               if (*map)
65488 +                                       return false;
65489 +                       }
65490 +               }
65491 +       }
65493 +       for (pos = nbits / BITS_IN_SIZE_T; pos; pos--, map += sizeof(size_t)) {
65494 +               if (*((size_t *)map))
65495 +                       return false;
65496 +       }
65498 +       for (pos = (nbits % BITS_IN_SIZE_T) >> 3; pos; pos--, map++) {
65499 +               if (*map)
65500 +                       return false;
65501 +       }
65503 +       pos = nbits & 7;
65504 +       if (pos && (*map & fill_mask[pos]))
65505 +               return false;
65507 +       // All bits are zero
65508 +       return true;
65512 + * are_bits_set
65513 + *
65514 + * Returns true if all bits [bit, bit+nbits) are ones "1"
65515 + */
65516 +bool are_bits_set(const ulong *lmap, size_t bit, size_t nbits)
65518 +       u8 mask;
65519 +       size_t pos = bit & 7;
65520 +       const u8 *map = (u8 *)lmap + (bit >> 3);
65522 +       if (pos) {
65523 +               if (8 - pos >= nbits) {
65524 +                       mask = fill_mask[pos + nbits] & zero_mask[pos];
65525 +                       return !nbits || (*map & mask) == mask;
65526 +               }
65528 +               mask = zero_mask[pos];
65529 +               if ((*map++ & mask) != mask)
65530 +                       return false;
65531 +               nbits -= 8 - pos;
65532 +       }
65534 +       pos = ((size_t)map) & (sizeof(size_t) - 1);
65535 +       if (pos) {
65536 +               pos = sizeof(size_t) - pos;
65537 +               if (nbits >= pos * 8) {
65538 +                       for (nbits -= pos * 8; pos; pos--, map++) {
65539 +                               if (*map != 0xFF)
65540 +                                       return false;
65541 +                       }
65542 +               }
65543 +       }
65545 +       for (pos = nbits / BITS_IN_SIZE_T; pos; pos--, map += sizeof(size_t)) {
65546 +               if (*((size_t *)map) != MINUS_ONE_T)
65547 +                       return false;
65548 +       }
65550 +       for (pos = (nbits % BITS_IN_SIZE_T) >> 3; pos; pos--, map++) {
65551 +               if (*map != 0xFF)
65552 +                       return false;
65553 +       }
65555 +       pos = nbits & 7;
65556 +       if (pos) {
65557 +               u8 mask = fill_mask[pos];
65559 +               if ((*map & mask) != mask)
65560 +                       return false;
65561 +       }
65563 +       // All bits are ones
65564 +       return true;
65566 diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c
65567 new file mode 100644
65568 index 000000000000..32aab0031221
65569 --- /dev/null
65570 +++ b/fs/ntfs3/bitmap.c
65571 @@ -0,0 +1,1519 @@
65572 +// SPDX-License-Identifier: GPL-2.0
65574 + *
65575 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
65576 + *
65577 + * This code builds two trees of free clusters extents.
65578 + * Trees are sorted by start of extent and by length of extent.
65579 + * NTFS_MAX_WND_EXTENTS defines the maximum number of elements in trees.
65580 + * In extreme case code reads on-disk bitmap to find free clusters
65581 + *
65582 + */
65584 +#include <linux/blkdev.h>
65585 +#include <linux/buffer_head.h>
65586 +#include <linux/fs.h>
65587 +#include <linux/nls.h>
65589 +#include "debug.h"
65590 +#include "ntfs.h"
65591 +#include "ntfs_fs.h"
65594 + * Maximum number of extents in tree.
65595 + */
65596 +#define NTFS_MAX_WND_EXTENTS (32u * 1024u)
65598 +struct rb_node_key {
65599 +       struct rb_node node;
65600 +       size_t key;
65604 + * Tree is sorted by start (key)
65605 + */
65606 +struct e_node {
65607 +       struct rb_node_key start; /* Tree sorted by start */
65608 +       struct rb_node_key count; /* Tree sorted by len*/
65611 +static int wnd_rescan(struct wnd_bitmap *wnd);
65612 +static struct buffer_head *wnd_map(struct wnd_bitmap *wnd, size_t iw);
65613 +static bool wnd_is_free_hlp(struct wnd_bitmap *wnd, size_t bit, size_t bits);
65615 +static struct kmem_cache *ntfs_enode_cachep;
65617 +int __init ntfs3_init_bitmap(void)
65619 +       ntfs_enode_cachep =
65620 +               kmem_cache_create("ntfs3_enode_cache", sizeof(struct e_node), 0,
65621 +                                 SLAB_RECLAIM_ACCOUNT, NULL);
65622 +       return ntfs_enode_cachep ? 0 : -ENOMEM;
65625 +void ntfs3_exit_bitmap(void)
65627 +       kmem_cache_destroy(ntfs_enode_cachep);
65630 +static inline u32 wnd_bits(const struct wnd_bitmap *wnd, size_t i)
65632 +       return i + 1 == wnd->nwnd ? wnd->bits_last : wnd->sb->s_blocksize * 8;
65636 + * b_pos + b_len - biggest fragment
65637 + * Scan range [wpos wbits) window 'buf'
65638 + * Returns -1 if not found
65639 + */
65640 +static size_t wnd_scan(const ulong *buf, size_t wbit, u32 wpos, u32 wend,
65641 +                      size_t to_alloc, size_t *prev_tail, size_t *b_pos,
65642 +                      size_t *b_len)
65644 +       while (wpos < wend) {
65645 +               size_t free_len;
65646 +               u32 free_bits, end;
65647 +               u32 used = find_next_zero_bit(buf, wend, wpos);
65649 +               if (used >= wend) {
65650 +                       if (*b_len < *prev_tail) {
65651 +                               *b_pos = wbit - *prev_tail;
65652 +                               *b_len = *prev_tail;
65653 +                       }
65655 +                       *prev_tail = 0;
65656 +                       return -1;
65657 +               }
65659 +               if (used > wpos) {
65660 +                       wpos = used;
65661 +                       if (*b_len < *prev_tail) {
65662 +                               *b_pos = wbit - *prev_tail;
65663 +                               *b_len = *prev_tail;
65664 +                       }
65666 +                       *prev_tail = 0;
65667 +               }
65669 +               /*
65670 +                * Now we have a fragment [wpos, wend) staring with 0
65671 +                */
65672 +               end = wpos + to_alloc - *prev_tail;
65673 +               free_bits = find_next_bit(buf, min(end, wend), wpos);
65675 +               free_len = *prev_tail + free_bits - wpos;
65677 +               if (*b_len < free_len) {
65678 +                       *b_pos = wbit + wpos - *prev_tail;
65679 +                       *b_len = free_len;
65680 +               }
65682 +               if (free_len >= to_alloc)
65683 +                       return wbit + wpos - *prev_tail;
65685 +               if (free_bits >= wend) {
65686 +                       *prev_tail += free_bits - wpos;
65687 +                       return -1;
65688 +               }
65690 +               wpos = free_bits + 1;
65692 +               *prev_tail = 0;
65693 +       }
65695 +       return -1;
65699 + * wnd_close
65700 + *
65701 + * Frees all resources
65702 + */
65703 +void wnd_close(struct wnd_bitmap *wnd)
65705 +       struct rb_node *node, *next;
65707 +       ntfs_free(wnd->free_bits);
65708 +       run_close(&wnd->run);
65710 +       node = rb_first(&wnd->start_tree);
65712 +       while (node) {
65713 +               next = rb_next(node);
65714 +               rb_erase(node, &wnd->start_tree);
65715 +               kmem_cache_free(ntfs_enode_cachep,
65716 +                               rb_entry(node, struct e_node, start.node));
65717 +               node = next;
65718 +       }
65721 +static struct rb_node *rb_lookup(struct rb_root *root, size_t v)
65723 +       struct rb_node **p = &root->rb_node;
65724 +       struct rb_node *r = NULL;
65726 +       while (*p) {
65727 +               struct rb_node_key *k;
65729 +               k = rb_entry(*p, struct rb_node_key, node);
65730 +               if (v < k->key) {
65731 +                       p = &(*p)->rb_left;
65732 +               } else if (v > k->key) {
65733 +                       r = &k->node;
65734 +                       p = &(*p)->rb_right;
65735 +               } else {
65736 +                       return &k->node;
65737 +               }
65738 +       }
65740 +       return r;
65744 + * rb_insert_count
65745 + *
65746 + * Helper function to insert special kind of 'count' tree
65747 + */
65748 +static inline bool rb_insert_count(struct rb_root *root, struct e_node *e)
65750 +       struct rb_node **p = &root->rb_node;
65751 +       struct rb_node *parent = NULL;
65752 +       size_t e_ckey = e->count.key;
65753 +       size_t e_skey = e->start.key;
65755 +       while (*p) {
65756 +               struct e_node *k =
65757 +                       rb_entry(parent = *p, struct e_node, count.node);
65759 +               if (e_ckey > k->count.key) {
65760 +                       p = &(*p)->rb_left;
65761 +               } else if (e_ckey < k->count.key) {
65762 +                       p = &(*p)->rb_right;
65763 +               } else if (e_skey < k->start.key) {
65764 +                       p = &(*p)->rb_left;
65765 +               } else if (e_skey > k->start.key) {
65766 +                       p = &(*p)->rb_right;
65767 +               } else {
65768 +                       WARN_ON(1);
65769 +                       return false;
65770 +               }
65771 +       }
65773 +       rb_link_node(&e->count.node, parent, p);
65774 +       rb_insert_color(&e->count.node, root);
65775 +       return true;
65779 + * inline bool rb_insert_start
65780 + *
65781 + * Helper function to insert special kind of 'start' tree
65782 + */
65783 +static inline bool rb_insert_start(struct rb_root *root, struct e_node *e)
65785 +       struct rb_node **p = &root->rb_node;
65786 +       struct rb_node *parent = NULL;
65787 +       size_t e_skey = e->start.key;
65789 +       while (*p) {
65790 +               struct e_node *k;
65792 +               parent = *p;
65794 +               k = rb_entry(parent, struct e_node, start.node);
65795 +               if (e_skey < k->start.key) {
65796 +                       p = &(*p)->rb_left;
65797 +               } else if (e_skey > k->start.key) {
65798 +                       p = &(*p)->rb_right;
65799 +               } else {
65800 +                       WARN_ON(1);
65801 +                       return false;
65802 +               }
65803 +       }
65805 +       rb_link_node(&e->start.node, parent, p);
65806 +       rb_insert_color(&e->start.node, root);
65807 +       return true;
65811 + * wnd_add_free_ext
65812 + *
65813 + * adds a new extent of free space
65814 + * build = 1 when building tree
65815 + */
65816 +static void wnd_add_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len,
65817 +                            bool build)
65819 +       struct e_node *e, *e0 = NULL;
65820 +       size_t ib, end_in = bit + len;
65821 +       struct rb_node *n;
65823 +       if (build) {
65824 +               /* Use extent_min to filter too short extents */
65825 +               if (wnd->count >= NTFS_MAX_WND_EXTENTS &&
65826 +                   len <= wnd->extent_min) {
65827 +                       wnd->uptodated = -1;
65828 +                       return;
65829 +               }
65830 +       } else {
65831 +               /* Try to find extent before 'bit' */
65832 +               n = rb_lookup(&wnd->start_tree, bit);
65834 +               if (!n) {
65835 +                       n = rb_first(&wnd->start_tree);
65836 +               } else {
65837 +                       e = rb_entry(n, struct e_node, start.node);
65838 +                       n = rb_next(n);
65839 +                       if (e->start.key + e->count.key == bit) {
65840 +                               /* Remove left */
65841 +                               bit = e->start.key;
65842 +                               len += e->count.key;
65843 +                               rb_erase(&e->start.node, &wnd->start_tree);
65844 +                               rb_erase(&e->count.node, &wnd->count_tree);
65845 +                               wnd->count -= 1;
65846 +                               e0 = e;
65847 +                       }
65848 +               }
65850 +               while (n) {
65851 +                       size_t next_end;
65853 +                       e = rb_entry(n, struct e_node, start.node);
65854 +                       next_end = e->start.key + e->count.key;
65855 +                       if (e->start.key > end_in)
65856 +                               break;
65858 +                       /* Remove right */
65859 +                       n = rb_next(n);
65860 +                       len += next_end - end_in;
65861 +                       end_in = next_end;
65862 +                       rb_erase(&e->start.node, &wnd->start_tree);
65863 +                       rb_erase(&e->count.node, &wnd->count_tree);
65864 +                       wnd->count -= 1;
65866 +                       if (!e0)
65867 +                               e0 = e;
65868 +                       else
65869 +                               kmem_cache_free(ntfs_enode_cachep, e);
65870 +               }
65872 +               if (wnd->uptodated != 1) {
65873 +                       /* Check bits before 'bit' */
65874 +                       ib = wnd->zone_bit == wnd->zone_end ||
65875 +                                            bit < wnd->zone_end
65876 +                                    ? 0
65877 +                                    : wnd->zone_end;
65879 +                       while (bit > ib && wnd_is_free_hlp(wnd, bit - 1, 1)) {
65880 +                               bit -= 1;
65881 +                               len += 1;
65882 +                       }
65884 +                       /* Check bits after 'end_in' */
65885 +                       ib = wnd->zone_bit == wnd->zone_end ||
65886 +                                            end_in > wnd->zone_bit
65887 +                                    ? wnd->nbits
65888 +                                    : wnd->zone_bit;
65890 +                       while (end_in < ib && wnd_is_free_hlp(wnd, end_in, 1)) {
65891 +                               end_in += 1;
65892 +                               len += 1;
65893 +                       }
65894 +               }
65895 +       }
65896 +       /* Insert new fragment */
65897 +       if (wnd->count >= NTFS_MAX_WND_EXTENTS) {
65898 +               if (e0)
65899 +                       kmem_cache_free(ntfs_enode_cachep, e0);
65901 +               wnd->uptodated = -1;
65903 +               /* Compare with smallest fragment */
65904 +               n = rb_last(&wnd->count_tree);
65905 +               e = rb_entry(n, struct e_node, count.node);
65906 +               if (len <= e->count.key)
65907 +                       goto out; /* Do not insert small fragments */
65909 +               if (build) {
65910 +                       struct e_node *e2;
65912 +                       n = rb_prev(n);
65913 +                       e2 = rb_entry(n, struct e_node, count.node);
65914 +                       /* smallest fragment will be 'e2->count.key' */
65915 +                       wnd->extent_min = e2->count.key;
65916 +               }
65918 +               /* Replace smallest fragment by new one */
65919 +               rb_erase(&e->start.node, &wnd->start_tree);
65920 +               rb_erase(&e->count.node, &wnd->count_tree);
65921 +               wnd->count -= 1;
65922 +       } else {
65923 +               e = e0 ? e0 : kmem_cache_alloc(ntfs_enode_cachep, GFP_ATOMIC);
65924 +               if (!e) {
65925 +                       wnd->uptodated = -1;
65926 +                       goto out;
65927 +               }
65929 +               if (build && len <= wnd->extent_min)
65930 +                       wnd->extent_min = len;
65931 +       }
65932 +       e->start.key = bit;
65933 +       e->count.key = len;
65934 +       if (len > wnd->extent_max)
65935 +               wnd->extent_max = len;
65937 +       rb_insert_start(&wnd->start_tree, e);
65938 +       rb_insert_count(&wnd->count_tree, e);
65939 +       wnd->count += 1;
65941 +out:;
65945 + * wnd_remove_free_ext
65946 + *
65947 + * removes a run from the cached free space
65948 + */
65949 +static void wnd_remove_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len)
65951 +       struct rb_node *n, *n3;
65952 +       struct e_node *e, *e3;
65953 +       size_t end_in = bit + len;
65954 +       size_t end3, end, new_key, new_len, max_new_len;
65956 +       /* Try to find extent before 'bit' */
65957 +       n = rb_lookup(&wnd->start_tree, bit);
65959 +       if (!n)
65960 +               return;
65962 +       e = rb_entry(n, struct e_node, start.node);
65963 +       end = e->start.key + e->count.key;
65965 +       new_key = new_len = 0;
65966 +       len = e->count.key;
65968 +       /* Range [bit,end_in) must be inside 'e' or outside 'e' and 'n' */
65969 +       if (e->start.key > bit)
65970 +               ;
65971 +       else if (end_in <= end) {
65972 +               /* Range [bit,end_in) inside 'e' */
65973 +               new_key = end_in;
65974 +               new_len = end - end_in;
65975 +               len = bit - e->start.key;
65976 +       } else if (bit > end) {
65977 +               bool bmax = false;
65979 +               n3 = rb_next(n);
65981 +               while (n3) {
65982 +                       e3 = rb_entry(n3, struct e_node, start.node);
65983 +                       if (e3->start.key >= end_in)
65984 +                               break;
65986 +                       if (e3->count.key == wnd->extent_max)
65987 +                               bmax = true;
65989 +                       end3 = e3->start.key + e3->count.key;
65990 +                       if (end3 > end_in) {
65991 +                               e3->start.key = end_in;
65992 +                               rb_erase(&e3->count.node, &wnd->count_tree);
65993 +                               e3->count.key = end3 - end_in;
65994 +                               rb_insert_count(&wnd->count_tree, e3);
65995 +                               break;
65996 +                       }
65998 +                       n3 = rb_next(n3);
65999 +                       rb_erase(&e3->start.node, &wnd->start_tree);
66000 +                       rb_erase(&e3->count.node, &wnd->count_tree);
66001 +                       wnd->count -= 1;
66002 +                       kmem_cache_free(ntfs_enode_cachep, e3);
66003 +               }
66004 +               if (!bmax)
66005 +                       return;
66006 +               n3 = rb_first(&wnd->count_tree);
66007 +               wnd->extent_max =
66008 +                       n3 ? rb_entry(n3, struct e_node, count.node)->count.key
66009 +                          : 0;
66010 +               return;
66011 +       }
66013 +       if (e->count.key != wnd->extent_max) {
66014 +               ;
66015 +       } else if (rb_prev(&e->count.node)) {
66016 +               ;
66017 +       } else {
66018 +               n3 = rb_next(&e->count.node);
66019 +               max_new_len = len > new_len ? len : new_len;
66020 +               if (!n3) {
66021 +                       wnd->extent_max = max_new_len;
66022 +               } else {
66023 +                       e3 = rb_entry(n3, struct e_node, count.node);
66024 +                       wnd->extent_max = max(e3->count.key, max_new_len);
66025 +               }
66026 +       }
66028 +       if (!len) {
66029 +               if (new_len) {
66030 +                       e->start.key = new_key;
66031 +                       rb_erase(&e->count.node, &wnd->count_tree);
66032 +                       e->count.key = new_len;
66033 +                       rb_insert_count(&wnd->count_tree, e);
66034 +               } else {
66035 +                       rb_erase(&e->start.node, &wnd->start_tree);
66036 +                       rb_erase(&e->count.node, &wnd->count_tree);
66037 +                       wnd->count -= 1;
66038 +                       kmem_cache_free(ntfs_enode_cachep, e);
66039 +               }
66040 +               goto out;
66041 +       }
66042 +       rb_erase(&e->count.node, &wnd->count_tree);
66043 +       e->count.key = len;
66044 +       rb_insert_count(&wnd->count_tree, e);
66046 +       if (!new_len)
66047 +               goto out;
66049 +       if (wnd->count >= NTFS_MAX_WND_EXTENTS) {
66050 +               wnd->uptodated = -1;
66052 +               /* Get minimal extent */
66053 +               e = rb_entry(rb_last(&wnd->count_tree), struct e_node,
66054 +                            count.node);
66055 +               if (e->count.key > new_len)
66056 +                       goto out;
66058 +               /* Replace minimum */
66059 +               rb_erase(&e->start.node, &wnd->start_tree);
66060 +               rb_erase(&e->count.node, &wnd->count_tree);
66061 +               wnd->count -= 1;
66062 +       } else {
66063 +               e = kmem_cache_alloc(ntfs_enode_cachep, GFP_ATOMIC);
66064 +               if (!e)
66065 +                       wnd->uptodated = -1;
66066 +       }
66068 +       if (e) {
66069 +               e->start.key = new_key;
66070 +               e->count.key = new_len;
66071 +               rb_insert_start(&wnd->start_tree, e);
66072 +               rb_insert_count(&wnd->count_tree, e);
66073 +               wnd->count += 1;
66074 +       }
66076 +out:
66077 +       if (!wnd->count && 1 != wnd->uptodated)
66078 +               wnd_rescan(wnd);
66082 + * wnd_rescan
66083 + *
66084 + * Scan all bitmap. used while initialization.
66085 + */
66086 +static int wnd_rescan(struct wnd_bitmap *wnd)
66088 +       int err = 0;
66089 +       size_t prev_tail = 0;
66090 +       struct super_block *sb = wnd->sb;
66091 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
66092 +       u64 lbo, len = 0;
66093 +       u32 blocksize = sb->s_blocksize;
66094 +       u8 cluster_bits = sbi->cluster_bits;
66095 +       u32 wbits = 8 * sb->s_blocksize;
66096 +       u32 used, frb;
66097 +       const ulong *buf;
66098 +       size_t wpos, wbit, iw, vbo;
66099 +       struct buffer_head *bh = NULL;
66100 +       CLST lcn, clen;
66102 +       wnd->uptodated = 0;
66103 +       wnd->extent_max = 0;
66104 +       wnd->extent_min = MINUS_ONE_T;
66105 +       wnd->total_zeroes = 0;
66107 +       vbo = 0;
66109 +       for (iw = 0; iw < wnd->nwnd; iw++) {
66110 +               if (iw + 1 == wnd->nwnd)
66111 +                       wbits = wnd->bits_last;
66113 +               if (wnd->inited) {
66114 +                       if (!wnd->free_bits[iw]) {
66115 +                               /* all ones */
66116 +                               if (prev_tail) {
66117 +                                       wnd_add_free_ext(wnd,
66118 +                                                        vbo * 8 - prev_tail,
66119 +                                                        prev_tail, true);
66120 +                                       prev_tail = 0;
66121 +                               }
66122 +                               goto next_wnd;
66123 +                       }
66124 +                       if (wbits == wnd->free_bits[iw]) {
66125 +                               /* all zeroes */
66126 +                               prev_tail += wbits;
66127 +                               wnd->total_zeroes += wbits;
66128 +                               goto next_wnd;
66129 +                       }
66130 +               }
66132 +               if (!len) {
66133 +                       u32 off = vbo & sbi->cluster_mask;
66135 +                       if (!run_lookup_entry(&wnd->run, vbo >> cluster_bits,
66136 +                                             &lcn, &clen, NULL)) {
66137 +                               err = -ENOENT;
66138 +                               goto out;
66139 +                       }
66141 +                       lbo = ((u64)lcn << cluster_bits) + off;
66142 +                       len = ((u64)clen << cluster_bits) - off;
66143 +               }
66145 +               bh = ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
66146 +               if (!bh) {
66147 +                       err = -EIO;
66148 +                       goto out;
66149 +               }
66151 +               buf = (ulong *)bh->b_data;
66153 +               used = __bitmap_weight(buf, wbits);
66154 +               if (used < wbits) {
66155 +                       frb = wbits - used;
66156 +                       wnd->free_bits[iw] = frb;
66157 +                       wnd->total_zeroes += frb;
66158 +               }
66160 +               wpos = 0;
66161 +               wbit = vbo * 8;
66163 +               if (wbit + wbits > wnd->nbits)
66164 +                       wbits = wnd->nbits - wbit;
66166 +               do {
66167 +                       used = find_next_zero_bit(buf, wbits, wpos);
66169 +                       if (used > wpos && prev_tail) {
66170 +                               wnd_add_free_ext(wnd, wbit + wpos - prev_tail,
66171 +                                                prev_tail, true);
66172 +                               prev_tail = 0;
66173 +                       }
66175 +                       wpos = used;
66177 +                       if (wpos >= wbits) {
66178 +                               /* No free blocks */
66179 +                               prev_tail = 0;
66180 +                               break;
66181 +                       }
66183 +                       frb = find_next_bit(buf, wbits, wpos);
66184 +                       if (frb >= wbits) {
66185 +                               /* keep last free block */
66186 +                               prev_tail += frb - wpos;
66187 +                               break;
66188 +                       }
66190 +                       wnd_add_free_ext(wnd, wbit + wpos - prev_tail,
66191 +                                        frb + prev_tail - wpos, true);
66193 +                       /* Skip free block and first '1' */
66194 +                       wpos = frb + 1;
66195 +                       /* Reset previous tail */
66196 +                       prev_tail = 0;
66197 +               } while (wpos < wbits);
66199 +next_wnd:
66201 +               if (bh)
66202 +                       put_bh(bh);
66203 +               bh = NULL;
66205 +               vbo += blocksize;
66206 +               if (len) {
66207 +                       len -= blocksize;
66208 +                       lbo += blocksize;
66209 +               }
66210 +       }
66212 +       /* Add last block */
66213 +       if (prev_tail)
66214 +               wnd_add_free_ext(wnd, wnd->nbits - prev_tail, prev_tail, true);
66216 +       /*
66217 +        * Before init cycle wnd->uptodated was 0
66218 +        * If any errors or limits occurs while initialization then
66219 +        * wnd->uptodated will be -1
66220 +        * If 'uptodated' is still 0 then Tree is really updated
66221 +        */
66222 +       if (!wnd->uptodated)
66223 +               wnd->uptodated = 1;
66225 +       if (wnd->zone_bit != wnd->zone_end) {
66226 +               size_t zlen = wnd->zone_end - wnd->zone_bit;
66228 +               wnd->zone_end = wnd->zone_bit;
66229 +               wnd_zone_set(wnd, wnd->zone_bit, zlen);
66230 +       }
66232 +out:
66233 +       return err;
66237 + * wnd_init
66238 + */
66239 +int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits)
66241 +       int err;
66242 +       u32 blocksize = sb->s_blocksize;
66243 +       u32 wbits = blocksize * 8;
66245 +       init_rwsem(&wnd->rw_lock);
66247 +       wnd->sb = sb;
66248 +       wnd->nbits = nbits;
66249 +       wnd->total_zeroes = nbits;
66250 +       wnd->extent_max = MINUS_ONE_T;
66251 +       wnd->zone_bit = wnd->zone_end = 0;
66252 +       wnd->nwnd = bytes_to_block(sb, bitmap_size(nbits));
66253 +       wnd->bits_last = nbits & (wbits - 1);
66254 +       if (!wnd->bits_last)
66255 +               wnd->bits_last = wbits;
66257 +       wnd->free_bits = ntfs_zalloc(wnd->nwnd * sizeof(u16));
66258 +       if (!wnd->free_bits)
66259 +               return -ENOMEM;
66261 +       err = wnd_rescan(wnd);
66262 +       if (err)
66263 +               return err;
66265 +       wnd->inited = true;
66267 +       return 0;
66271 + * wnd_map
66272 + *
66273 + * call sb_bread for requested window
66274 + */
66275 +static struct buffer_head *wnd_map(struct wnd_bitmap *wnd, size_t iw)
66277 +       size_t vbo;
66278 +       CLST lcn, clen;
66279 +       struct super_block *sb = wnd->sb;
66280 +       struct ntfs_sb_info *sbi;
66281 +       struct buffer_head *bh;
66282 +       u64 lbo;
66284 +       sbi = sb->s_fs_info;
66285 +       vbo = (u64)iw << sb->s_blocksize_bits;
66287 +       if (!run_lookup_entry(&wnd->run, vbo >> sbi->cluster_bits, &lcn, &clen,
66288 +                             NULL)) {
66289 +               return ERR_PTR(-ENOENT);
66290 +       }
66292 +       lbo = ((u64)lcn << sbi->cluster_bits) + (vbo & sbi->cluster_mask);
66294 +       bh = ntfs_bread(wnd->sb, lbo >> sb->s_blocksize_bits);
66295 +       if (!bh)
66296 +               return ERR_PTR(-EIO);
66298 +       return bh;
66302 + * wnd_set_free
66303 + *
66304 + * Marks the bits range from bit to bit + bits as free
66305 + */
66306 +int wnd_set_free(struct wnd_bitmap *wnd, size_t bit, size_t bits)
66308 +       int err = 0;
66309 +       struct super_block *sb = wnd->sb;
66310 +       size_t bits0 = bits;
66311 +       u32 wbits = 8 * sb->s_blocksize;
66312 +       size_t iw = bit >> (sb->s_blocksize_bits + 3);
66313 +       u32 wbit = bit & (wbits - 1);
66314 +       struct buffer_head *bh;
66316 +       while (iw < wnd->nwnd && bits) {
66317 +               u32 tail, op;
66318 +               ulong *buf;
66320 +               if (iw + 1 == wnd->nwnd)
66321 +                       wbits = wnd->bits_last;
66323 +               tail = wbits - wbit;
66324 +               op = tail < bits ? tail : bits;
66326 +               bh = wnd_map(wnd, iw);
66327 +               if (IS_ERR(bh)) {
66328 +                       err = PTR_ERR(bh);
66329 +                       break;
66330 +               }
66332 +               buf = (ulong *)bh->b_data;
66334 +               lock_buffer(bh);
66336 +               __bitmap_clear(buf, wbit, op);
66338 +               wnd->free_bits[iw] += op;
66340 +               set_buffer_uptodate(bh);
66341 +               mark_buffer_dirty(bh);
66342 +               unlock_buffer(bh);
66343 +               put_bh(bh);
66345 +               wnd->total_zeroes += op;
66346 +               bits -= op;
66347 +               wbit = 0;
66348 +               iw += 1;
66349 +       }
66351 +       wnd_add_free_ext(wnd, bit, bits0, false);
66353 +       return err;
66357 + * wnd_set_used
66358 + *
66359 + * Marks the bits range from bit to bit + bits as used
66360 + */
66361 +int wnd_set_used(struct wnd_bitmap *wnd, size_t bit, size_t bits)
66363 +       int err = 0;
66364 +       struct super_block *sb = wnd->sb;
66365 +       size_t bits0 = bits;
66366 +       size_t iw = bit >> (sb->s_blocksize_bits + 3);
66367 +       u32 wbits = 8 * sb->s_blocksize;
66368 +       u32 wbit = bit & (wbits - 1);
66369 +       struct buffer_head *bh;
66371 +       while (iw < wnd->nwnd && bits) {
66372 +               u32 tail, op;
66373 +               ulong *buf;
66375 +               if (unlikely(iw + 1 == wnd->nwnd))
66376 +                       wbits = wnd->bits_last;
66378 +               tail = wbits - wbit;
66379 +               op = tail < bits ? tail : bits;
66381 +               bh = wnd_map(wnd, iw);
66382 +               if (IS_ERR(bh)) {
66383 +                       err = PTR_ERR(bh);
66384 +                       break;
66385 +               }
66386 +               buf = (ulong *)bh->b_data;
66388 +               lock_buffer(bh);
66390 +               __bitmap_set(buf, wbit, op);
66391 +               wnd->free_bits[iw] -= op;
66393 +               set_buffer_uptodate(bh);
66394 +               mark_buffer_dirty(bh);
66395 +               unlock_buffer(bh);
66396 +               put_bh(bh);
66398 +               wnd->total_zeroes -= op;
66399 +               bits -= op;
66400 +               wbit = 0;
66401 +               iw += 1;
66402 +       }
66404 +       if (!RB_EMPTY_ROOT(&wnd->start_tree))
66405 +               wnd_remove_free_ext(wnd, bit, bits0);
66407 +       return err;
66411 + * wnd_is_free_hlp
66412 + *
66413 + * Returns true if all clusters [bit, bit+bits) are free (bitmap only)
66414 + */
66415 +static bool wnd_is_free_hlp(struct wnd_bitmap *wnd, size_t bit, size_t bits)
66417 +       struct super_block *sb = wnd->sb;
66418 +       size_t iw = bit >> (sb->s_blocksize_bits + 3);
66419 +       u32 wbits = 8 * sb->s_blocksize;
66420 +       u32 wbit = bit & (wbits - 1);
66422 +       while (iw < wnd->nwnd && bits) {
66423 +               u32 tail, op;
66425 +               if (unlikely(iw + 1 == wnd->nwnd))
66426 +                       wbits = wnd->bits_last;
66428 +               tail = wbits - wbit;
66429 +               op = tail < bits ? tail : bits;
66431 +               if (wbits != wnd->free_bits[iw]) {
66432 +                       bool ret;
66433 +                       struct buffer_head *bh = wnd_map(wnd, iw);
66435 +                       if (IS_ERR(bh))
66436 +                               return false;
66438 +                       ret = are_bits_clear((ulong *)bh->b_data, wbit, op);
66440 +                       put_bh(bh);
66441 +                       if (!ret)
66442 +                               return false;
66443 +               }
66445 +               bits -= op;
66446 +               wbit = 0;
66447 +               iw += 1;
66448 +       }
66450 +       return true;
66454 + * wnd_is_free
66455 + *
66456 + * Returns true if all clusters [bit, bit+bits) are free
66457 + */
66458 +bool wnd_is_free(struct wnd_bitmap *wnd, size_t bit, size_t bits)
66460 +       bool ret;
66461 +       struct rb_node *n;
66462 +       size_t end;
66463 +       struct e_node *e;
66465 +       if (RB_EMPTY_ROOT(&wnd->start_tree))
66466 +               goto use_wnd;
66468 +       n = rb_lookup(&wnd->start_tree, bit);
66469 +       if (!n)
66470 +               goto use_wnd;
66472 +       e = rb_entry(n, struct e_node, start.node);
66474 +       end = e->start.key + e->count.key;
66476 +       if (bit < end && bit + bits <= end)
66477 +               return true;
66479 +use_wnd:
66480 +       ret = wnd_is_free_hlp(wnd, bit, bits);
66482 +       return ret;
66486 + * wnd_is_used
66487 + *
66488 + * Returns true if all clusters [bit, bit+bits) are used
66489 + */
66490 +bool wnd_is_used(struct wnd_bitmap *wnd, size_t bit, size_t bits)
66492 +       bool ret = false;
66493 +       struct super_block *sb = wnd->sb;
66494 +       size_t iw = bit >> (sb->s_blocksize_bits + 3);
66495 +       u32 wbits = 8 * sb->s_blocksize;
66496 +       u32 wbit = bit & (wbits - 1);
66497 +       size_t end;
66498 +       struct rb_node *n;
66499 +       struct e_node *e;
66501 +       if (RB_EMPTY_ROOT(&wnd->start_tree))
66502 +               goto use_wnd;
66504 +       end = bit + bits;
66505 +       n = rb_lookup(&wnd->start_tree, end - 1);
66506 +       if (!n)
66507 +               goto use_wnd;
66509 +       e = rb_entry(n, struct e_node, start.node);
66510 +       if (e->start.key + e->count.key > bit)
66511 +               return false;
66513 +use_wnd:
66514 +       while (iw < wnd->nwnd && bits) {
66515 +               u32 tail, op;
66517 +               if (unlikely(iw + 1 == wnd->nwnd))
66518 +                       wbits = wnd->bits_last;
66520 +               tail = wbits - wbit;
66521 +               op = tail < bits ? tail : bits;
66523 +               if (wnd->free_bits[iw]) {
66524 +                       bool ret;
66525 +                       struct buffer_head *bh = wnd_map(wnd, iw);
66527 +                       if (IS_ERR(bh))
66528 +                               goto out;
66530 +                       ret = are_bits_set((ulong *)bh->b_data, wbit, op);
66531 +                       put_bh(bh);
66532 +                       if (!ret)
66533 +                               goto out;
66534 +               }
66536 +               bits -= op;
66537 +               wbit = 0;
66538 +               iw += 1;
66539 +       }
66540 +       ret = true;
66542 +out:
66543 +       return ret;
66547 + * wnd_find
66548 + * - flags - BITMAP_FIND_XXX flags
66549 + *
66550 + * looks for free space
66551 + * Returns 0 if not found
66552 + */
66553 +size_t wnd_find(struct wnd_bitmap *wnd, size_t to_alloc, size_t hint,
66554 +               size_t flags, size_t *allocated)
66556 +       struct super_block *sb;
66557 +       u32 wbits, wpos, wzbit, wzend;
66558 +       size_t fnd, max_alloc, b_len, b_pos;
66559 +       size_t iw, prev_tail, nwnd, wbit, ebit, zbit, zend;
66560 +       size_t to_alloc0 = to_alloc;
66561 +       const ulong *buf;
66562 +       const struct e_node *e;
66563 +       const struct rb_node *pr, *cr;
66564 +       u8 log2_bits;
66565 +       bool fbits_valid;
66566 +       struct buffer_head *bh;
66568 +       /* fast checking for available free space */
66569 +       if (flags & BITMAP_FIND_FULL) {
66570 +               size_t zeroes = wnd_zeroes(wnd);
66572 +               zeroes -= wnd->zone_end - wnd->zone_bit;
66573 +               if (zeroes < to_alloc0)
66574 +                       goto no_space;
66576 +               if (to_alloc0 > wnd->extent_max)
66577 +                       goto no_space;
66578 +       } else {
66579 +               if (to_alloc > wnd->extent_max)
66580 +                       to_alloc = wnd->extent_max;
66581 +       }
66583 +       if (wnd->zone_bit <= hint && hint < wnd->zone_end)
66584 +               hint = wnd->zone_end;
66586 +       max_alloc = wnd->nbits;
66587 +       b_len = b_pos = 0;
66589 +       if (hint >= max_alloc)
66590 +               hint = 0;
66592 +       if (RB_EMPTY_ROOT(&wnd->start_tree)) {
66593 +               if (wnd->uptodated == 1) {
66594 +                       /* extents tree is updated -> no free space */
66595 +                       goto no_space;
66596 +               }
66597 +               goto scan_bitmap;
66598 +       }
66600 +       e = NULL;
66601 +       if (!hint)
66602 +               goto allocate_biggest;
66604 +       /* Use hint: enumerate extents by start >= hint */
66605 +       pr = NULL;
66606 +       cr = wnd->start_tree.rb_node;
66608 +       for (;;) {
66609 +               e = rb_entry(cr, struct e_node, start.node);
66611 +               if (e->start.key == hint)
66612 +                       break;
66614 +               if (e->start.key < hint) {
66615 +                       pr = cr;
66616 +                       cr = cr->rb_right;
66617 +                       if (!cr)
66618 +                               break;
66619 +                       continue;
66620 +               }
66622 +               cr = cr->rb_left;
66623 +               if (!cr) {
66624 +                       e = pr ? rb_entry(pr, struct e_node, start.node) : NULL;
66625 +                       break;
66626 +               }
66627 +       }
66629 +       if (!e)
66630 +               goto allocate_biggest;
66632 +       if (e->start.key + e->count.key > hint) {
66633 +               /* We have found extension with 'hint' inside */
66634 +               size_t len = e->start.key + e->count.key - hint;
66636 +               if (len >= to_alloc && hint + to_alloc <= max_alloc) {
66637 +                       fnd = hint;
66638 +                       goto found;
66639 +               }
66641 +               if (!(flags & BITMAP_FIND_FULL)) {
66642 +                       if (len > to_alloc)
66643 +                               len = to_alloc;
66645 +                       if (hint + len <= max_alloc) {
66646 +                               fnd = hint;
66647 +                               to_alloc = len;
66648 +                               goto found;
66649 +                       }
66650 +               }
66651 +       }
66653 +allocate_biggest:
66654 +       /* Allocate from biggest free extent */
66655 +       e = rb_entry(rb_first(&wnd->count_tree), struct e_node, count.node);
66656 +       if (e->count.key != wnd->extent_max)
66657 +               wnd->extent_max = e->count.key;
66659 +       if (e->count.key < max_alloc) {
66660 +               if (e->count.key >= to_alloc) {
66661 +                       ;
66662 +               } else if (flags & BITMAP_FIND_FULL) {
66663 +                       if (e->count.key < to_alloc0) {
66664 +                               /* Biggest free block is less then requested */
66665 +                               goto no_space;
66666 +                       }
66667 +                       to_alloc = e->count.key;
66668 +               } else if (-1 != wnd->uptodated) {
66669 +                       to_alloc = e->count.key;
66670 +               } else {
66671 +                       /* Check if we can use more bits */
66672 +                       size_t op, max_check;
66673 +                       struct rb_root start_tree;
66675 +                       memcpy(&start_tree, &wnd->start_tree,
66676 +                              sizeof(struct rb_root));
66677 +                       memset(&wnd->start_tree, 0, sizeof(struct rb_root));
66679 +                       max_check = e->start.key + to_alloc;
66680 +                       if (max_check > max_alloc)
66681 +                               max_check = max_alloc;
66682 +                       for (op = e->start.key + e->count.key; op < max_check;
66683 +                            op++) {
66684 +                               if (!wnd_is_free(wnd, op, 1))
66685 +                                       break;
66686 +                       }
66687 +                       memcpy(&wnd->start_tree, &start_tree,
66688 +                              sizeof(struct rb_root));
66689 +                       to_alloc = op - e->start.key;
66690 +               }
66692 +               /* Prepare to return */
66693 +               fnd = e->start.key;
66694 +               if (e->start.key + to_alloc > max_alloc)
66695 +                       to_alloc = max_alloc - e->start.key;
66696 +               goto found;
66697 +       }
66699 +       if (wnd->uptodated == 1) {
66700 +               /* extents tree is updated -> no free space */
66701 +               goto no_space;
66702 +       }
66704 +       b_len = e->count.key;
66705 +       b_pos = e->start.key;
66707 +scan_bitmap:
66708 +       sb = wnd->sb;
66709 +       log2_bits = sb->s_blocksize_bits + 3;
66711 +       /* At most two ranges [hint, max_alloc) + [0, hint) */
66712 +Again:
66714 +       /* TODO: optimize request for case nbits > wbits */
66715 +       iw = hint >> log2_bits;
66716 +       wbits = sb->s_blocksize * 8;
66717 +       wpos = hint & (wbits - 1);
66718 +       prev_tail = 0;
66719 +       fbits_valid = true;
66721 +       if (max_alloc == wnd->nbits) {
66722 +               nwnd = wnd->nwnd;
66723 +       } else {
66724 +               size_t t = max_alloc + wbits - 1;
66726 +               nwnd = likely(t > max_alloc) ? (t >> log2_bits) : wnd->nwnd;
66727 +       }
66729 +       /* Enumerate all windows */
66730 +       for (; iw < nwnd; iw++) {
66731 +               wbit = iw << log2_bits;
66733 +               if (!wnd->free_bits[iw]) {
66734 +                       if (prev_tail > b_len) {
66735 +                               b_pos = wbit - prev_tail;
66736 +                               b_len = prev_tail;
66737 +                       }
66739 +                       /* Skip full used window */
66740 +                       prev_tail = 0;
66741 +                       wpos = 0;
66742 +                       continue;
66743 +               }
66745 +               if (unlikely(iw + 1 == nwnd)) {
66746 +                       if (max_alloc == wnd->nbits) {
66747 +                               wbits = wnd->bits_last;
66748 +                       } else {
66749 +                               size_t t = max_alloc & (wbits - 1);
66751 +                               if (t) {
66752 +                                       wbits = t;
66753 +                                       fbits_valid = false;
66754 +                               }
66755 +                       }
66756 +               }
66758 +               if (wnd->zone_end > wnd->zone_bit) {
66759 +                       ebit = wbit + wbits;
66760 +                       zbit = max(wnd->zone_bit, wbit);
66761 +                       zend = min(wnd->zone_end, ebit);
66763 +                       /* Here we have a window [wbit, ebit) and zone [zbit, zend) */
66764 +                       if (zend <= zbit) {
66765 +                               /* Zone does not overlap window */
66766 +                       } else {
66767 +                               wzbit = zbit - wbit;
66768 +                               wzend = zend - wbit;
66770 +                               /* Zone overlaps window */
66771 +                               if (wnd->free_bits[iw] == wzend - wzbit) {
66772 +                                       prev_tail = 0;
66773 +                                       wpos = 0;
66774 +                                       continue;
66775 +                               }
66777 +                               /* Scan two ranges window: [wbit, zbit) and [zend, ebit) */
66778 +                               bh = wnd_map(wnd, iw);
66780 +                               if (IS_ERR(bh)) {
66781 +                                       /* TODO: error */
66782 +                                       prev_tail = 0;
66783 +                                       wpos = 0;
66784 +                                       continue;
66785 +                               }
66787 +                               buf = (ulong *)bh->b_data;
66789 +                               /* Scan range [wbit, zbit) */
66790 +                               if (wpos < wzbit) {
66791 +                                       /* Scan range [wpos, zbit) */
66792 +                                       fnd = wnd_scan(buf, wbit, wpos, wzbit,
66793 +                                                      to_alloc, &prev_tail,
66794 +                                                      &b_pos, &b_len);
66795 +                                       if (fnd != MINUS_ONE_T) {
66796 +                                               put_bh(bh);
66797 +                                               goto found;
66798 +                                       }
66799 +                               }
66801 +                               prev_tail = 0;
66803 +                               /* Scan range [zend, ebit) */
66804 +                               if (wzend < wbits) {
66805 +                                       fnd = wnd_scan(buf, wbit,
66806 +                                                      max(wzend, wpos), wbits,
66807 +                                                      to_alloc, &prev_tail,
66808 +                                                      &b_pos, &b_len);
66809 +                                       if (fnd != MINUS_ONE_T) {
66810 +                                               put_bh(bh);
66811 +                                               goto found;
66812 +                                       }
66813 +                               }
66815 +                               wpos = 0;
66816 +                               put_bh(bh);
66817 +                               continue;
66818 +                       }
66819 +               }
66821 +               /* Current window does not overlap zone */
66822 +               if (!wpos && fbits_valid && wnd->free_bits[iw] == wbits) {
66823 +                       /* window is empty */
66824 +                       if (prev_tail + wbits >= to_alloc) {
66825 +                               fnd = wbit + wpos - prev_tail;
66826 +                               goto found;
66827 +                       }
66829 +                       /* Increase 'prev_tail' and process next window */
66830 +                       prev_tail += wbits;
66831 +                       wpos = 0;
66832 +                       continue;
66833 +               }
66835 +               /* read window */
66836 +               bh = wnd_map(wnd, iw);
66837 +               if (IS_ERR(bh)) {
66838 +                       // TODO: error
66839 +                       prev_tail = 0;
66840 +                       wpos = 0;
66841 +                       continue;
66842 +               }
66844 +               buf = (ulong *)bh->b_data;
66846 +               /* Scan range [wpos, eBits) */
66847 +               fnd = wnd_scan(buf, wbit, wpos, wbits, to_alloc, &prev_tail,
66848 +                              &b_pos, &b_len);
66849 +               put_bh(bh);
66850 +               if (fnd != MINUS_ONE_T)
66851 +                       goto found;
66852 +       }
66854 +       if (b_len < prev_tail) {
66855 +               /* The last fragment */
66856 +               b_len = prev_tail;
66857 +               b_pos = max_alloc - prev_tail;
66858 +       }
66860 +       if (hint) {
66861 +               /*
66862 +                * We have scanned range [hint max_alloc)
66863 +                * Prepare to scan range [0 hint + to_alloc)
66864 +                */
66865 +               size_t nextmax = hint + to_alloc;
66867 +               if (likely(nextmax >= hint) && nextmax < max_alloc)
66868 +                       max_alloc = nextmax;
66869 +               hint = 0;
66870 +               goto Again;
66871 +       }
66873 +       if (!b_len)
66874 +               goto no_space;
66876 +       wnd->extent_max = b_len;
66878 +       if (flags & BITMAP_FIND_FULL)
66879 +               goto no_space;
66881 +       fnd = b_pos;
66882 +       to_alloc = b_len;
66884 +found:
66885 +       if (flags & BITMAP_FIND_MARK_AS_USED) {
66886 +               /* TODO optimize remove extent (pass 'e'?) */
66887 +               if (wnd_set_used(wnd, fnd, to_alloc))
66888 +                       goto no_space;
66889 +       } else if (wnd->extent_max != MINUS_ONE_T &&
66890 +                  to_alloc > wnd->extent_max) {
66891 +               wnd->extent_max = to_alloc;
66892 +       }
66894 +       *allocated = fnd;
66895 +       return to_alloc;
66897 +no_space:
66898 +       return 0;
66902 + * wnd_extend
66903 + *
66904 + * Extend bitmap ($MFT bitmap)
66905 + */
66906 +int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
66908 +       int err;
66909 +       struct super_block *sb = wnd->sb;
66910 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
66911 +       u32 blocksize = sb->s_blocksize;
66912 +       u32 wbits = blocksize * 8;
66913 +       u32 b0, new_last;
66914 +       size_t bits, iw, new_wnd;
66915 +       size_t old_bits = wnd->nbits;
66916 +       u16 *new_free;
66918 +       if (new_bits <= old_bits)
66919 +               return -EINVAL;
66921 +       /* align to 8 byte boundary */
66922 +       new_wnd = bytes_to_block(sb, bitmap_size(new_bits));
66923 +       new_last = new_bits & (wbits - 1);
66924 +       if (!new_last)
66925 +               new_last = wbits;
66927 +       if (new_wnd != wnd->nwnd) {
66928 +               new_free = ntfs_malloc(new_wnd * sizeof(u16));
66929 +               if (!new_free)
66930 +                       return -ENOMEM;
66932 +               if (new_free != wnd->free_bits)
66933 +                       memcpy(new_free, wnd->free_bits,
66934 +                              wnd->nwnd * sizeof(short));
66935 +               memset(new_free + wnd->nwnd, 0,
66936 +                      (new_wnd - wnd->nwnd) * sizeof(short));
66937 +               ntfs_free(wnd->free_bits);
66938 +               wnd->free_bits = new_free;
66939 +       }
66941 +       /* Zero bits [old_bits,new_bits) */
66942 +       bits = new_bits - old_bits;
66943 +       b0 = old_bits & (wbits - 1);
66945 +       for (iw = old_bits >> (sb->s_blocksize_bits + 3); bits; iw += 1) {
66946 +               u32 op;
66947 +               size_t frb;
66948 +               u64 vbo, lbo, bytes;
66949 +               struct buffer_head *bh;
66950 +               ulong *buf;
66952 +               if (iw + 1 == new_wnd)
66953 +                       wbits = new_last;
66955 +               op = b0 + bits > wbits ? wbits - b0 : bits;
66956 +               vbo = (u64)iw * blocksize;
66958 +               err = ntfs_vbo_to_lbo(sbi, &wnd->run, vbo, &lbo, &bytes);
66959 +               if (err)
66960 +                       break;
66962 +               bh = ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
66963 +               if (!bh)
66964 +                       return -EIO;
66966 +               lock_buffer(bh);
66967 +               buf = (ulong *)bh->b_data;
66969 +               __bitmap_clear(buf, b0, blocksize * 8 - b0);
66970 +               frb = wbits - __bitmap_weight(buf, wbits);
66971 +               wnd->total_zeroes += frb - wnd->free_bits[iw];
66972 +               wnd->free_bits[iw] = frb;
66974 +               set_buffer_uptodate(bh);
66975 +               mark_buffer_dirty(bh);
66976 +               unlock_buffer(bh);
66977 +               /*err = sync_dirty_buffer(bh);*/
66979 +               b0 = 0;
66980 +               bits -= op;
66981 +       }
66983 +       wnd->nbits = new_bits;
66984 +       wnd->nwnd = new_wnd;
66985 +       wnd->bits_last = new_last;
66987 +       wnd_add_free_ext(wnd, old_bits, new_bits - old_bits, false);
66989 +       return 0;
66993 + * wnd_zone_set
66994 + */
66995 +void wnd_zone_set(struct wnd_bitmap *wnd, size_t lcn, size_t len)
66997 +       size_t zlen;
66999 +       zlen = wnd->zone_end - wnd->zone_bit;
67000 +       if (zlen)
67001 +               wnd_add_free_ext(wnd, wnd->zone_bit, zlen, false);
67003 +       if (!RB_EMPTY_ROOT(&wnd->start_tree) && len)
67004 +               wnd_remove_free_ext(wnd, lcn, len);
67006 +       wnd->zone_bit = lcn;
67007 +       wnd->zone_end = lcn + len;
67010 +int ntfs_trim_fs(struct ntfs_sb_info *sbi, struct fstrim_range *range)
67012 +       int err = 0;
67013 +       struct super_block *sb = sbi->sb;
67014 +       struct wnd_bitmap *wnd = &sbi->used.bitmap;
67015 +       u32 wbits = 8 * sb->s_blocksize;
67016 +       CLST len = 0, lcn = 0, done = 0;
67017 +       CLST minlen = bytes_to_cluster(sbi, range->minlen);
67018 +       CLST lcn_from = bytes_to_cluster(sbi, range->start);
67019 +       size_t iw = lcn_from >> (sb->s_blocksize_bits + 3);
67020 +       u32 wbit = lcn_from & (wbits - 1);
67021 +       const ulong *buf;
67022 +       CLST lcn_to;
67024 +       if (!minlen)
67025 +               minlen = 1;
67027 +       if (range->len == (u64)-1)
67028 +               lcn_to = wnd->nbits;
67029 +       else
67030 +               lcn_to = bytes_to_cluster(sbi, range->start + range->len);
67032 +       down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
67034 +       for (; iw < wnd->nbits; iw++, wbit = 0) {
67035 +               CLST lcn_wnd = iw * wbits;
67036 +               struct buffer_head *bh;
67038 +               if (lcn_wnd > lcn_to)
67039 +                       break;
67041 +               if (!wnd->free_bits[iw])
67042 +                       continue;
67044 +               if (iw + 1 == wnd->nwnd)
67045 +                       wbits = wnd->bits_last;
67047 +               if (lcn_wnd + wbits > lcn_to)
67048 +                       wbits = lcn_to - lcn_wnd;
67050 +               bh = wnd_map(wnd, iw);
67051 +               if (IS_ERR(bh)) {
67052 +                       err = PTR_ERR(bh);
67053 +                       break;
67054 +               }
67056 +               buf = (ulong *)bh->b_data;
67058 +               for (; wbit < wbits; wbit++) {
67059 +                       if (!test_bit(wbit, buf)) {
67060 +                               if (!len)
67061 +                                       lcn = lcn_wnd + wbit;
67062 +                               len += 1;
67063 +                               continue;
67064 +                       }
67065 +                       if (len >= minlen) {
67066 +                               err = ntfs_discard(sbi, lcn, len);
67067 +                               if (err)
67068 +                                       goto out;
67069 +                               done += len;
67070 +                       }
67071 +                       len = 0;
67072 +               }
67073 +               put_bh(bh);
67074 +       }
67076 +       /* Process the last fragment */
67077 +       if (len >= minlen) {
67078 +               err = ntfs_discard(sbi, lcn, len);
67079 +               if (err)
67080 +                       goto out;
67081 +               done += len;
67082 +       }
67084 +out:
67085 +       range->len = (u64)done << sbi->cluster_bits;
67087 +       up_read(&wnd->rw_lock);
67089 +       return err;
67091 diff --git a/fs/ntfs3/debug.h b/fs/ntfs3/debug.h
67092 new file mode 100644
67093 index 000000000000..dfaa4c79dc6d
67094 --- /dev/null
67095 +++ b/fs/ntfs3/debug.h
67096 @@ -0,0 +1,64 @@
67097 +/* SPDX-License-Identifier: GPL-2.0 */
67099 + *
67100 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
67101 + *
67102 + * useful functions for debuging
67103 + */
67105 +// clang-format off
67106 +#ifndef Add2Ptr
67107 +#define Add2Ptr(P, I)          ((void *)((u8 *)(P) + (I)))
67108 +#define PtrOffset(B, O)                ((size_t)((size_t)(O) - (size_t)(B)))
67109 +#endif
67111 +#define QuadAlign(n)           (((n) + 7u) & (~7u))
67112 +#define IsQuadAligned(n)       (!((size_t)(n)&7u))
67113 +#define Quad2Align(n)          (((n) + 15u) & (~15u))
67114 +#define IsQuad2Aligned(n)      (!((size_t)(n)&15u))
67115 +#define Quad4Align(n)          (((n) + 31u) & (~31u))
67116 +#define IsSizeTAligned(n)      (!((size_t)(n) & (sizeof(size_t) - 1)))
67117 +#define DwordAlign(n)          (((n) + 3u) & (~3u))
67118 +#define IsDwordAligned(n)      (!((size_t)(n)&3u))
67119 +#define WordAlign(n)           (((n) + 1u) & (~1u))
67120 +#define IsWordAligned(n)       (!((size_t)(n)&1u))
67122 +#ifdef CONFIG_PRINTK
67123 +__printf(2, 3)
67124 +void ntfs_printk(const struct super_block *sb, const char *fmt, ...);
67125 +__printf(2, 3)
67126 +void ntfs_inode_printk(struct inode *inode, const char *fmt, ...);
67127 +#else
67128 +static inline __printf(2, 3)
67129 +void ntfs_printk(const struct super_block *sb, const char *fmt, ...)
67133 +static inline __printf(2, 3)
67134 +void ntfs_inode_printk(struct inode *inode, const char *fmt, ...)
67137 +#endif
67140 + * Logging macros ( thanks Joe Perches <joe@perches.com> for implementation )
67141 + */
67143 +#define ntfs_err(sb, fmt, ...)  ntfs_printk(sb, KERN_ERR fmt, ##__VA_ARGS__)
67144 +#define ntfs_warn(sb, fmt, ...) ntfs_printk(sb, KERN_WARNING fmt, ##__VA_ARGS__)
67145 +#define ntfs_info(sb, fmt, ...) ntfs_printk(sb, KERN_INFO fmt, ##__VA_ARGS__)
67146 +#define ntfs_notice(sb, fmt, ...)                                              \
67147 +       ntfs_printk(sb, KERN_NOTICE fmt, ##__VA_ARGS__)
67149 +#define ntfs_inode_err(inode, fmt, ...)                                        \
67150 +       ntfs_inode_printk(inode, KERN_ERR fmt, ##__VA_ARGS__)
67151 +#define ntfs_inode_warn(inode, fmt, ...)                                       \
67152 +       ntfs_inode_printk(inode, KERN_WARNING fmt, ##__VA_ARGS__)
67154 +#define ntfs_malloc(s)         kmalloc(s, GFP_NOFS)
67155 +#define ntfs_zalloc(s)         kzalloc(s, GFP_NOFS)
67156 +#define ntfs_vmalloc(s)                kvmalloc(s, GFP_KERNEL)
67157 +#define ntfs_free(p)           kfree(p)
67158 +#define ntfs_vfree(p)          kvfree(p)
67159 +#define ntfs_memdup(src, len)  kmemdup(src, len, GFP_NOFS)
67160 +// clang-format on
67161 diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
67162 new file mode 100644
67163 index 000000000000..9ec6012c405b
67164 --- /dev/null
67165 +++ b/fs/ntfs3/dir.c
67166 @@ -0,0 +1,594 @@
67167 +// SPDX-License-Identifier: GPL-2.0
67169 + *
67170 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
67171 + *
67172 + *  directory handling functions for ntfs-based filesystems
67173 + *
67174 + */
67175 +#include <linux/blkdev.h>
67176 +#include <linux/buffer_head.h>
67177 +#include <linux/fs.h>
67178 +#include <linux/iversion.h>
67179 +#include <linux/nls.h>
67181 +#include "debug.h"
67182 +#include "ntfs.h"
67183 +#include "ntfs_fs.h"
67186 + * Convert little endian utf16 to nls string
67187 + */
67188 +int ntfs_utf16_to_nls(struct ntfs_sb_info *sbi, const struct le_str *uni,
67189 +                     u8 *buf, int buf_len)
67191 +       int ret, uni_len, warn;
67192 +       const __le16 *ip;
67193 +       u8 *op;
67194 +       struct nls_table *nls = sbi->options.nls;
67196 +       static_assert(sizeof(wchar_t) == sizeof(__le16));
67198 +       if (!nls) {
67199 +               /* utf16 -> utf8 */
67200 +               ret = utf16s_to_utf8s((wchar_t *)uni->name, uni->len,
67201 +                                     UTF16_LITTLE_ENDIAN, buf, buf_len);
67202 +               buf[ret] = '\0';
67203 +               return ret;
67204 +       }
67206 +       ip = uni->name;
67207 +       op = buf;
67208 +       uni_len = uni->len;
67209 +       warn = 0;
67211 +       while (uni_len--) {
67212 +               u16 ec;
67213 +               int charlen;
67214 +               char dump[5];
67216 +               if (buf_len < NLS_MAX_CHARSET_SIZE) {
67217 +                       ntfs_warn(sbi->sb,
67218 +                                 "filename was truncated while converting.");
67219 +                       break;
67220 +               }
67222 +               ec = le16_to_cpu(*ip++);
67223 +               charlen = nls->uni2char(ec, op, buf_len);
67225 +               if (charlen > 0) {
67226 +                       op += charlen;
67227 +                       buf_len -= charlen;
67228 +                       continue;
67229 +               }
67231 +               *op++ = '_';
67232 +               buf_len -= 1;
67233 +               if (warn)
67234 +                       continue;
67236 +               warn = 1;
67237 +               hex_byte_pack(&dump[0], ec >> 8);
67238 +               hex_byte_pack(&dump[2], ec);
67239 +               dump[4] = 0;
67241 +               ntfs_err(sbi->sb, "failed to convert \"%s\" to %s", dump,
67242 +                        nls->charset);
67243 +       }
67245 +       *op = '\0';
67246 +       return op - buf;
67249 +// clang-format off
67250 +#define PLANE_SIZE     0x00010000
67252 +#define SURROGATE_PAIR 0x0000d800
67253 +#define SURROGATE_LOW  0x00000400
67254 +#define SURROGATE_BITS 0x000003ff
67255 +// clang-format on
67258 + * modified version of put_utf16 from fs/nls/nls_base.c
67259 + * is sparse warnings free
67260 + */
67261 +static inline void put_utf16(wchar_t *s, unsigned int c,
67262 +                            enum utf16_endian endian)
67264 +       static_assert(sizeof(wchar_t) == sizeof(__le16));
67265 +       static_assert(sizeof(wchar_t) == sizeof(__be16));
67267 +       switch (endian) {
67268 +       default:
67269 +               *s = (wchar_t)c;
67270 +               break;
67271 +       case UTF16_LITTLE_ENDIAN:
67272 +               *(__le16 *)s = __cpu_to_le16(c);
67273 +               break;
67274 +       case UTF16_BIG_ENDIAN:
67275 +               *(__be16 *)s = __cpu_to_be16(c);
67276 +               break;
67277 +       }
67281 + * modified version of 'utf8s_to_utf16s' allows to
67282 + * detect -ENAMETOOLONG without writing out of expected maximum
67283 + */
67284 +static int _utf8s_to_utf16s(const u8 *s, int inlen, enum utf16_endian endian,
67285 +                           wchar_t *pwcs, int maxout)
67287 +       u16 *op;
67288 +       int size;
67289 +       unicode_t u;
67291 +       op = pwcs;
67292 +       while (inlen > 0 && *s) {
67293 +               if (*s & 0x80) {
67294 +                       size = utf8_to_utf32(s, inlen, &u);
67295 +                       if (size < 0)
67296 +                               return -EINVAL;
67297 +                       s += size;
67298 +                       inlen -= size;
67300 +                       if (u >= PLANE_SIZE) {
67301 +                               if (maxout < 2)
67302 +                                       return -ENAMETOOLONG;
67304 +                               u -= PLANE_SIZE;
67305 +                               put_utf16(op++,
67306 +                                         SURROGATE_PAIR |
67307 +                                                 ((u >> 10) & SURROGATE_BITS),
67308 +                                         endian);
67309 +                               put_utf16(op++,
67310 +                                         SURROGATE_PAIR | SURROGATE_LOW |
67311 +                                                 (u & SURROGATE_BITS),
67312 +                                         endian);
67313 +                               maxout -= 2;
67314 +                       } else {
67315 +                               if (maxout < 1)
67316 +                                       return -ENAMETOOLONG;
67318 +                               put_utf16(op++, u, endian);
67319 +                               maxout--;
67320 +                       }
67321 +               } else {
67322 +                       if (maxout < 1)
67323 +                               return -ENAMETOOLONG;
67325 +                       put_utf16(op++, *s++, endian);
67326 +                       inlen--;
67327 +                       maxout--;
67328 +               }
67329 +       }
67330 +       return op - pwcs;
67334 + * Convert input string to utf16
67335 + *
67336 + * name, name_len - input name
67337 + * uni, max_ulen - destination memory
67338 + * endian - endian of target utf16 string
67339 + *
67340 + * This function is called:
67341 + * - to create ntfs name
67342 + * - to create symlink
67343 + *
67344 + * returns utf16 string length or error (if negative)
67345 + */
67346 +int ntfs_nls_to_utf16(struct ntfs_sb_info *sbi, const u8 *name, u32 name_len,
67347 +                     struct cpu_str *uni, u32 max_ulen,
67348 +                     enum utf16_endian endian)
67350 +       int ret, slen;
67351 +       const u8 *end;
67352 +       struct nls_table *nls = sbi->options.nls;
67353 +       u16 *uname = uni->name;
67355 +       static_assert(sizeof(wchar_t) == sizeof(u16));
67357 +       if (!nls) {
67358 +               /* utf8 -> utf16 */
67359 +               ret = _utf8s_to_utf16s(name, name_len, endian, uname, max_ulen);
67360 +               uni->len = ret;
67361 +               return ret;
67362 +       }
67364 +       for (ret = 0, end = name + name_len; name < end; ret++, name += slen) {
67365 +               if (ret >= max_ulen)
67366 +                       return -ENAMETOOLONG;
67368 +               slen = nls->char2uni(name, end - name, uname + ret);
67369 +               if (!slen)
67370 +                       return -EINVAL;
67371 +               if (slen < 0)
67372 +                       return slen;
67373 +       }
67375 +#ifdef __BIG_ENDIAN
67376 +       if (endian == UTF16_LITTLE_ENDIAN) {
67377 +               int i = ret;
67379 +               while (i--) {
67380 +                       __cpu_to_le16s(uname);
67381 +                       uname++;
67382 +               }
67383 +       }
67384 +#else
67385 +       if (endian == UTF16_BIG_ENDIAN) {
67386 +               int i = ret;
67388 +               while (i--) {
67389 +                       __cpu_to_be16s(uname);
67390 +                       uname++;
67391 +               }
67392 +       }
67393 +#endif
67395 +       uni->len = ret;
67396 +       return ret;
67399 +/* helper function */
67400 +struct inode *dir_search_u(struct inode *dir, const struct cpu_str *uni,
67401 +                          struct ntfs_fnd *fnd)
67403 +       int err = 0;
67404 +       struct super_block *sb = dir->i_sb;
67405 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
67406 +       struct ntfs_inode *ni = ntfs_i(dir);
67407 +       struct NTFS_DE *e;
67408 +       int diff;
67409 +       struct inode *inode = NULL;
67410 +       struct ntfs_fnd *fnd_a = NULL;
67412 +       if (!fnd) {
67413 +               fnd_a = fnd_get();
67414 +               if (!fnd_a) {
67415 +                       err = -ENOMEM;
67416 +                       goto out;
67417 +               }
67418 +               fnd = fnd_a;
67419 +       }
67421 +       err = indx_find(&ni->dir, ni, NULL, uni, 0, sbi, &diff, &e, fnd);
67423 +       if (err)
67424 +               goto out;
67426 +       if (diff) {
67427 +               err = -ENOENT;
67428 +               goto out;
67429 +       }
67431 +       inode = ntfs_iget5(sb, &e->ref, uni);
67432 +       if (!IS_ERR(inode) && is_bad_inode(inode)) {
67433 +               iput(inode);
67434 +               err = -EINVAL;
67435 +       }
67436 +out:
67437 +       fnd_put(fnd_a);
67439 +       return err == -ENOENT ? NULL : err ? ERR_PTR(err) : inode;
67442 +static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
67443 +                              const struct NTFS_DE *e, u8 *name,
67444 +                              struct dir_context *ctx)
67446 +       const struct ATTR_FILE_NAME *fname;
67447 +       unsigned long ino;
67448 +       int name_len;
67449 +       u32 dt_type;
67451 +       fname = Add2Ptr(e, sizeof(struct NTFS_DE));
67453 +       if (fname->type == FILE_NAME_DOS)
67454 +               return 0;
67456 +       if (!mi_is_ref(&ni->mi, &fname->home))
67457 +               return 0;
67459 +       ino = ino_get(&e->ref);
67461 +       if (ino == MFT_REC_ROOT)
67462 +               return 0;
67464 +       /* Skip meta files ( unless option to show metafiles is set ) */
67465 +       if (!sbi->options.showmeta && ntfs_is_meta_file(sbi, ino))
67466 +               return 0;
67468 +       if (sbi->options.nohidden && (fname->dup.fa & FILE_ATTRIBUTE_HIDDEN))
67469 +               return 0;
67471 +       name_len = ntfs_utf16_to_nls(sbi, (struct le_str *)&fname->name_len,
67472 +                                    name, PATH_MAX);
67473 +       if (name_len <= 0) {
67474 +               ntfs_warn(sbi->sb, "failed to convert name for inode %lx.",
67475 +                         ino);
67476 +               return 0;
67477 +       }
67479 +       dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG;
67481 +       return !dir_emit(ctx, (s8 *)name, name_len, ino, dt_type);
67485 + * ntfs_read_hdr
67486 + *
67487 + * helper function 'ntfs_readdir'
67488 + */
67489 +static int ntfs_read_hdr(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
67490 +                        const struct INDEX_HDR *hdr, u64 vbo, u64 pos,
67491 +                        u8 *name, struct dir_context *ctx)
67493 +       int err;
67494 +       const struct NTFS_DE *e;
67495 +       u32 e_size;
67496 +       u32 end = le32_to_cpu(hdr->used);
67497 +       u32 off = le32_to_cpu(hdr->de_off);
67499 +       for (;; off += e_size) {
67500 +               if (off + sizeof(struct NTFS_DE) > end)
67501 +                       return -1;
67503 +               e = Add2Ptr(hdr, off);
67504 +               e_size = le16_to_cpu(e->size);
67505 +               if (e_size < sizeof(struct NTFS_DE) || off + e_size > end)
67506 +                       return -1;
67508 +               if (de_is_last(e))
67509 +                       return 0;
67511 +               /* Skip already enumerated*/
67512 +               if (vbo + off < pos)
67513 +                       continue;
67515 +               if (le16_to_cpu(e->key_size) < SIZEOF_ATTRIBUTE_FILENAME)
67516 +                       return -1;
67518 +               ctx->pos = vbo + off;
67520 +               /* Submit the name to the filldir callback. */
67521 +               err = ntfs_filldir(sbi, ni, e, name, ctx);
67522 +               if (err)
67523 +                       return err;
67524 +       }
67528 + * file_operations::iterate_shared
67529 + *
67530 + * Use non sorted enumeration.
67531 + * We have an example of broken volume where sorted enumeration
67532 + * counts each name twice
67533 + */
67534 +static int ntfs_readdir(struct file *file, struct dir_context *ctx)
67536 +       const struct INDEX_ROOT *root;
67537 +       u64 vbo;
67538 +       size_t bit;
67539 +       loff_t eod;
67540 +       int err = 0;
67541 +       struct inode *dir = file_inode(file);
67542 +       struct ntfs_inode *ni = ntfs_i(dir);
67543 +       struct super_block *sb = dir->i_sb;
67544 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
67545 +       loff_t i_size = i_size_read(dir);
67546 +       u32 pos = ctx->pos;
67547 +       u8 *name = NULL;
67548 +       struct indx_node *node = NULL;
67549 +       u8 index_bits = ni->dir.index_bits;
67551 +       /* name is a buffer of PATH_MAX length */
67552 +       static_assert(NTFS_NAME_LEN * 4 < PATH_MAX);
67554 +       eod = i_size + sbi->record_size;
67556 +       if (pos >= eod)
67557 +               return 0;
67559 +       if (!dir_emit_dots(file, ctx))
67560 +               return 0;
67562 +       /* allocate PATH_MAX bytes */
67563 +       name = __getname();
67564 +       if (!name)
67565 +               return -ENOMEM;
67567 +       if (!ni->mi_loaded && ni->attr_list.size) {
67568 +               /*
67569 +                * directory inode is locked for read
67570 +                * load all subrecords to avoid 'write' access to 'ni' during
67571 +                * directory reading
67572 +                */
67573 +               ni_lock(ni);
67574 +               if (!ni->mi_loaded && ni->attr_list.size) {
67575 +                       err = ni_load_all_mi(ni);
67576 +                       if (!err)
67577 +                               ni->mi_loaded = true;
67578 +               }
67579 +               ni_unlock(ni);
67580 +               if (err)
67581 +                       goto out;
67582 +       }
67584 +       root = indx_get_root(&ni->dir, ni, NULL, NULL);
67585 +       if (!root) {
67586 +               err = -EINVAL;
67587 +               goto out;
67588 +       }
67590 +       if (pos >= sbi->record_size) {
67591 +               bit = (pos - sbi->record_size) >> index_bits;
67592 +       } else {
67593 +               err = ntfs_read_hdr(sbi, ni, &root->ihdr, 0, pos, name, ctx);
67594 +               if (err)
67595 +                       goto out;
67596 +               bit = 0;
67597 +       }
67599 +       if (!i_size) {
67600 +               ctx->pos = eod;
67601 +               goto out;
67602 +       }
67604 +       for (;;) {
67605 +               vbo = (u64)bit << index_bits;
67606 +               if (vbo >= i_size) {
67607 +                       ctx->pos = eod;
67608 +                       goto out;
67609 +               }
67611 +               err = indx_used_bit(&ni->dir, ni, &bit);
67612 +               if (err)
67613 +                       goto out;
67615 +               if (bit == MINUS_ONE_T) {
67616 +                       ctx->pos = eod;
67617 +                       goto out;
67618 +               }
67620 +               vbo = (u64)bit << index_bits;
67621 +               if (vbo >= i_size) {
67622 +                       ntfs_inode_err(dir, "Looks like your dir is corrupt");
67623 +                       err = -EINVAL;
67624 +                       goto out;
67625 +               }
67627 +               err = indx_read(&ni->dir, ni, bit << ni->dir.idx2vbn_bits,
67628 +                               &node);
67629 +               if (err)
67630 +                       goto out;
67632 +               err = ntfs_read_hdr(sbi, ni, &node->index->ihdr,
67633 +                                   vbo + sbi->record_size, pos, name, ctx);
67634 +               if (err)
67635 +                       goto out;
67637 +               bit += 1;
67638 +       }
67640 +out:
67642 +       __putname(name);
67643 +       put_indx_node(node);
67645 +       if (err == -ENOENT) {
67646 +               err = 0;
67647 +               ctx->pos = pos;
67648 +       }
67650 +       return err;
67653 +static int ntfs_dir_count(struct inode *dir, bool *is_empty, size_t *dirs,
67654 +                         size_t *files)
67656 +       int err = 0;
67657 +       struct ntfs_inode *ni = ntfs_i(dir);
67658 +       struct NTFS_DE *e = NULL;
67659 +       struct INDEX_ROOT *root;
67660 +       struct INDEX_HDR *hdr;
67661 +       const struct ATTR_FILE_NAME *fname;
67662 +       u32 e_size, off, end;
67663 +       u64 vbo = 0;
67664 +       size_t drs = 0, fles = 0, bit = 0;
67665 +       loff_t i_size = ni->vfs_inode.i_size;
67666 +       struct indx_node *node = NULL;
67667 +       u8 index_bits = ni->dir.index_bits;
67669 +       if (is_empty)
67670 +               *is_empty = true;
67672 +       root = indx_get_root(&ni->dir, ni, NULL, NULL);
67673 +       if (!root)
67674 +               return -EINVAL;
67676 +       hdr = &root->ihdr;
67678 +       for (;;) {
67679 +               end = le32_to_cpu(hdr->used);
67680 +               off = le32_to_cpu(hdr->de_off);
67682 +               for (; off + sizeof(struct NTFS_DE) <= end; off += e_size) {
67683 +                       e = Add2Ptr(hdr, off);
67684 +                       e_size = le16_to_cpu(e->size);
67685 +                       if (e_size < sizeof(struct NTFS_DE) ||
67686 +                           off + e_size > end)
67687 +                               break;
67689 +                       if (de_is_last(e))
67690 +                               break;
67692 +                       fname = de_get_fname(e);
67693 +                       if (!fname)
67694 +                               continue;
67696 +                       if (fname->type == FILE_NAME_DOS)
67697 +                               continue;
67699 +                       if (is_empty) {
67700 +                               *is_empty = false;
67701 +                               if (!dirs && !files)
67702 +                                       goto out;
67703 +                       }
67705 +                       if (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY)
67706 +                               drs += 1;
67707 +                       else
67708 +                               fles += 1;
67709 +               }
67711 +               if (vbo >= i_size)
67712 +                       goto out;
67714 +               err = indx_used_bit(&ni->dir, ni, &bit);
67715 +               if (err)
67716 +                       goto out;
67718 +               if (bit == MINUS_ONE_T)
67719 +                       goto out;
67721 +               vbo = (u64)bit << index_bits;
67722 +               if (vbo >= i_size)
67723 +                       goto out;
67725 +               err = indx_read(&ni->dir, ni, bit << ni->dir.idx2vbn_bits,
67726 +                               &node);
67727 +               if (err)
67728 +                       goto out;
67730 +               hdr = &node->index->ihdr;
67731 +               bit += 1;
67732 +               vbo = (u64)bit << ni->dir.idx2vbn_bits;
67733 +       }
67735 +out:
67736 +       put_indx_node(node);
67737 +       if (dirs)
67738 +               *dirs = drs;
67739 +       if (files)
67740 +               *files = fles;
67742 +       return err;
67745 +bool dir_is_empty(struct inode *dir)
67747 +       bool is_empty = false;
67749 +       ntfs_dir_count(dir, &is_empty, NULL, NULL);
67751 +       return is_empty;
67754 +const struct file_operations ntfs_dir_operations = {
67755 +       .llseek = generic_file_llseek,
67756 +       .read = generic_read_dir,
67757 +       .iterate_shared = ntfs_readdir,
67758 +       .fsync = generic_file_fsync,
67759 +       .open = ntfs_file_open,
67761 diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
67762 new file mode 100644
67763 index 000000000000..347baf674008
67764 --- /dev/null
67765 +++ b/fs/ntfs3/file.c
67766 @@ -0,0 +1,1130 @@
67767 +// SPDX-License-Identifier: GPL-2.0
67769 + *
67770 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
67771 + *
67772 + *  regular file handling primitives for ntfs-based filesystems
67773 + */
67774 +#include <linux/backing-dev.h>
67775 +#include <linux/buffer_head.h>
67776 +#include <linux/compat.h>
67777 +#include <linux/falloc.h>
67778 +#include <linux/fiemap.h>
67779 +#include <linux/msdos_fs.h> /* FAT_IOCTL_XXX */
67780 +#include <linux/nls.h>
67782 +#include "debug.h"
67783 +#include "ntfs.h"
67784 +#include "ntfs_fs.h"
67786 +static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg)
67788 +       struct fstrim_range __user *user_range;
67789 +       struct fstrim_range range;
67790 +       struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev);
67791 +       int err;
67793 +       if (!capable(CAP_SYS_ADMIN))
67794 +               return -EPERM;
67796 +       if (!blk_queue_discard(q))
67797 +               return -EOPNOTSUPP;
67799 +       user_range = (struct fstrim_range __user *)arg;
67800 +       if (copy_from_user(&range, user_range, sizeof(range)))
67801 +               return -EFAULT;
67803 +       range.minlen = max_t(u32, range.minlen, q->limits.discard_granularity);
67805 +       err = ntfs_trim_fs(sbi, &range);
67806 +       if (err < 0)
67807 +               return err;
67809 +       if (copy_to_user(user_range, &range, sizeof(range)))
67810 +               return -EFAULT;
67812 +       return 0;
67815 +static long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg)
67817 +       struct inode *inode = file_inode(filp);
67818 +       struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
67819 +       u32 __user *user_attr = (u32 __user *)arg;
67821 +       switch (cmd) {
67822 +       case FAT_IOCTL_GET_ATTRIBUTES:
67823 +               return put_user(le32_to_cpu(ntfs_i(inode)->std_fa), user_attr);
67825 +       case FAT_IOCTL_GET_VOLUME_ID:
67826 +               return put_user(sbi->volume.ser_num, user_attr);
67828 +       case FITRIM:
67829 +               return ntfs_ioctl_fitrim(sbi, arg);
67830 +       }
67831 +       return -ENOTTY; /* Inappropriate ioctl for device */
67834 +#ifdef CONFIG_COMPAT
67835 +static long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg)
67838 +       return ntfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
67840 +#endif
67843 + * inode_operations::getattr
67844 + */
67845 +int ntfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
67846 +                struct kstat *stat, u32 request_mask, u32 flags)
67848 +       struct inode *inode = d_inode(path->dentry);
67849 +       struct ntfs_inode *ni = ntfs_i(inode);
67851 +       if (is_compressed(ni))
67852 +               stat->attributes |= STATX_ATTR_COMPRESSED;
67854 +       if (is_encrypted(ni))
67855 +               stat->attributes |= STATX_ATTR_ENCRYPTED;
67857 +       stat->attributes_mask |= STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED;
67859 +       generic_fillattr(mnt_userns, inode, stat);
67861 +       stat->result_mask |= STATX_BTIME;
67862 +       stat->btime = ni->i_crtime;
67864 +       return 0;
67867 +static int ntfs_extend_initialized_size(struct file *file,
67868 +                                       struct ntfs_inode *ni,
67869 +                                       const loff_t valid,
67870 +                                       const loff_t new_valid)
67872 +       struct inode *inode = &ni->vfs_inode;
67873 +       struct address_space *mapping = inode->i_mapping;
67874 +       struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
67875 +       loff_t pos = valid;
67876 +       int err;
67878 +       if (is_resident(ni)) {
67879 +               ni->i_valid = new_valid;
67880 +               return 0;
67881 +       }
67883 +       WARN_ON(is_compressed(ni));
67884 +       WARN_ON(valid >= new_valid);
67886 +       for (;;) {
67887 +               u32 zerofrom, len;
67888 +               struct page *page;
67889 +               void *fsdata;
67890 +               u8 bits;
67891 +               CLST vcn, lcn, clen;
67893 +               if (is_sparsed(ni)) {
67894 +                       bits = sbi->cluster_bits;
67895 +                       vcn = pos >> bits;
67897 +                       err = attr_data_get_block(ni, vcn, 0, &lcn, &clen,
67898 +                                                 NULL);
67899 +                       if (err)
67900 +                               goto out;
67902 +                       if (lcn == SPARSE_LCN) {
67903 +                               loff_t vbo = (loff_t)vcn << bits;
67904 +                               loff_t to = vbo + ((loff_t)clen << bits);
67906 +                               if (to <= new_valid) {
67907 +                                       ni->i_valid = to;
67908 +                                       pos = to;
67909 +                                       goto next;
67910 +                               }
67912 +                               if (vbo < pos) {
67913 +                                       pos = vbo;
67914 +                               } else {
67915 +                                       to = (new_valid >> bits) << bits;
67916 +                                       if (pos < to) {
67917 +                                               ni->i_valid = to;
67918 +                                               pos = to;
67919 +                                               goto next;
67920 +                                       }
67921 +                               }
67922 +                       }
67923 +               }
67925 +               zerofrom = pos & (PAGE_SIZE - 1);
67926 +               len = PAGE_SIZE - zerofrom;
67928 +               if (pos + len > new_valid)
67929 +                       len = new_valid - pos;
67931 +               err = pagecache_write_begin(file, mapping, pos, len, 0, &page,
67932 +                                           &fsdata);
67933 +               if (err)
67934 +                       goto out;
67936 +               zero_user_segment(page, zerofrom, PAGE_SIZE);
67938 +               /* this function in any case puts page*/
67939 +               err = pagecache_write_end(file, mapping, pos, len, len, page,
67940 +                                         fsdata);
67941 +               if (err < 0)
67942 +                       goto out;
67943 +               pos += len;
67945 +next:
67946 +               if (pos >= new_valid)
67947 +                       break;
67949 +               balance_dirty_pages_ratelimited(mapping);
67950 +               cond_resched();
67951 +       }
67953 +       mark_inode_dirty(inode);
67955 +       return 0;
67957 +out:
67958 +       ni->i_valid = valid;
67959 +       ntfs_inode_warn(inode, "failed to extend initialized size to %llx.",
67960 +                       new_valid);
67961 +       return err;
67965 + * ntfs_sparse_cluster
67966 + *
67967 + * Helper function to zero a new allocated clusters
67968 + */
67969 +void ntfs_sparse_cluster(struct inode *inode, struct page *page0, CLST vcn,
67970 +                        CLST len)
67972 +       struct address_space *mapping = inode->i_mapping;
67973 +       struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
67974 +       u64 vbo = (u64)vcn << sbi->cluster_bits;
67975 +       u64 bytes = (u64)len << sbi->cluster_bits;
67976 +       u32 blocksize = 1 << inode->i_blkbits;
67977 +       pgoff_t idx0 = page0 ? page0->index : -1;
67978 +       loff_t vbo_clst = vbo & sbi->cluster_mask_inv;
67979 +       loff_t end = ntfs_up_cluster(sbi, vbo + bytes);
67980 +       pgoff_t idx = vbo_clst >> PAGE_SHIFT;
67981 +       u32 from = vbo_clst & (PAGE_SIZE - 1);
67982 +       pgoff_t idx_end = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
67983 +       loff_t page_off;
67984 +       u32 to;
67985 +       bool partial;
67986 +       struct page *page;
67988 +       for (; idx < idx_end; idx += 1, from = 0) {
67989 +               page = idx == idx0 ? page0 : grab_cache_page(mapping, idx);
67991 +               if (!page)
67992 +                       continue;
67994 +               page_off = (loff_t)idx << PAGE_SHIFT;
67995 +               to = (page_off + PAGE_SIZE) > end ? (end - page_off)
67996 +                                                 : PAGE_SIZE;
67997 +               partial = false;
67999 +               if ((from || PAGE_SIZE != to) &&
68000 +                   likely(!page_has_buffers(page))) {
68001 +                       create_empty_buffers(page, blocksize, 0);
68002 +                       if (!page_has_buffers(page)) {
68003 +                               ntfs_inode_err(
68004 +                                       inode,
68005 +                                       "failed to allocate page buffers.");
68006 +                               /*err = -ENOMEM;*/
68007 +                               goto unlock_page;
68008 +                       }
68009 +               }
68011 +               if (page_has_buffers(page)) {
68012 +                       struct buffer_head *head, *bh;
68013 +                       u32 bh_off = 0;
68015 +                       bh = head = page_buffers(page);
68016 +                       do {
68017 +                               u32 bh_next = bh_off + blocksize;
68019 +                               if (from <= bh_off && bh_next <= to) {
68020 +                                       set_buffer_uptodate(bh);
68021 +                                       mark_buffer_dirty(bh);
68022 +                               } else if (!buffer_uptodate(bh)) {
68023 +                                       partial = true;
68024 +                               }
68025 +                               bh_off = bh_next;
68026 +                       } while (head != (bh = bh->b_this_page));
68027 +               }
68029 +               zero_user_segment(page, from, to);
68031 +               if (!partial) {
68032 +                       if (!PageUptodate(page))
68033 +                               SetPageUptodate(page);
68034 +                       set_page_dirty(page);
68035 +               }
68037 +unlock_page:
68038 +               if (idx != idx0) {
68039 +                       unlock_page(page);
68040 +                       put_page(page);
68041 +               }
68042 +               cond_resched();
68043 +       }
68044 +       mark_inode_dirty(inode);
68048 + * file_operations::mmap
68049 + */
68050 +static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
68052 +       struct address_space *mapping = file->f_mapping;
68053 +       struct inode *inode = mapping->host;
68054 +       struct ntfs_inode *ni = ntfs_i(inode);
68055 +       u64 to, from = ((u64)vma->vm_pgoff << PAGE_SHIFT);
68056 +       bool rw = vma->vm_flags & VM_WRITE;
68057 +       int err;
68059 +       if (is_encrypted(ni)) {
68060 +               ntfs_inode_warn(inode,
68061 +                               "mmap is not supported for encrypted files");
68062 +               err = -EOPNOTSUPP;
68063 +               goto out;
68064 +       }
68066 +       if (!rw)
68067 +               goto do_map;
68069 +       if (is_compressed(ni)) {
68070 +               ntfs_inode_warn(
68071 +                       inode,
68072 +                       "mmap(write) is not supported for compressed files");
68073 +               err = -EOPNOTSUPP;
68074 +               goto out;
68075 +       }
68077 +       to = min_t(loff_t, i_size_read(inode),
68078 +                  from + vma->vm_end - vma->vm_start);
68080 +       if (is_sparsed(ni)) {
68081 +               /* allocate clusters for rw map */
68082 +               struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
68083 +               CLST vcn, lcn, len;
68084 +               CLST end = bytes_to_cluster(sbi, to);
68085 +               bool new;
68087 +               for (vcn = from >> sbi->cluster_bits; vcn < end; vcn += len) {
68088 +                       err = attr_data_get_block(ni, vcn, 1, &lcn, &len, &new);
68089 +                       if (err)
68090 +                               goto out;
68091 +                       if (!new)
68092 +                               continue;
68093 +                       ntfs_sparse_cluster(inode, NULL, vcn, 1);
68094 +               }
68095 +       }
68097 +       if (ni->i_valid < to) {
68098 +               inode_lock(inode);
68099 +               err = ntfs_extend_initialized_size(file, ni, ni->i_valid, to);
68100 +               inode_unlock(inode);
68101 +               if (err)
68102 +                       goto out;
68103 +       }
68105 +do_map:
68106 +       err = generic_file_mmap(file, vma);
68107 +out:
68108 +       return err;
68111 +static int ntfs_extend(struct inode *inode, loff_t pos, size_t count,
68112 +                      struct file *file)
68114 +       struct ntfs_inode *ni = ntfs_i(inode);
68115 +       struct address_space *mapping = inode->i_mapping;
68116 +       loff_t end = pos + count;
68117 +       bool extend_init = file && pos > ni->i_valid;
68118 +       int err;
68120 +       if (end <= inode->i_size && !extend_init)
68121 +               return 0;
68123 +       /*mark rw ntfs as dirty. it will be cleared at umount*/
68124 +       ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_DIRTY);
68126 +       if (end > inode->i_size) {
68127 +               err = ntfs_set_size(inode, end);
68128 +               if (err)
68129 +                       goto out;
68130 +               inode->i_size = end;
68131 +       }
68133 +       if (extend_init && !is_compressed(ni)) {
68134 +               err = ntfs_extend_initialized_size(file, ni, ni->i_valid, pos);
68135 +               if (err)
68136 +                       goto out;
68137 +       } else {
68138 +               err = 0;
68139 +       }
68141 +       inode->i_ctime = inode->i_mtime = current_time(inode);
68142 +       mark_inode_dirty(inode);
68144 +       if (IS_SYNC(inode)) {
68145 +               int err2;
68147 +               err = filemap_fdatawrite_range(mapping, pos, end - 1);
68148 +               err2 = sync_mapping_buffers(mapping);
68149 +               if (!err)
68150 +                       err = err2;
68151 +               err2 = write_inode_now(inode, 1);
68152 +               if (!err)
68153 +                       err = err2;
68154 +               if (!err)
68155 +                       err = filemap_fdatawait_range(mapping, pos, end - 1);
68156 +       }
68158 +out:
68159 +       return err;
68162 +static int ntfs_truncate(struct inode *inode, loff_t new_size)
68164 +       struct super_block *sb = inode->i_sb;
68165 +       struct ntfs_inode *ni = ntfs_i(inode);
68166 +       int err, dirty = 0;
68167 +       u64 new_valid;
68169 +       if (!S_ISREG(inode->i_mode))
68170 +               return 0;
68172 +       if (is_compressed(ni)) {
68173 +               if (ni->i_valid > new_size)
68174 +                       ni->i_valid = new_size;
68175 +       } else {
68176 +               err = block_truncate_page(inode->i_mapping, new_size,
68177 +                                         ntfs_get_block);
68178 +               if (err)
68179 +                       return err;
68180 +       }
68182 +       new_valid = ntfs_up_block(sb, min_t(u64, ni->i_valid, new_size));
68184 +       ni_lock(ni);
68186 +       truncate_setsize(inode, new_size);
68188 +       down_write(&ni->file.run_lock);
68189 +       err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
68190 +                           &new_valid, true, NULL);
68191 +       up_write(&ni->file.run_lock);
68193 +       if (new_valid < ni->i_valid)
68194 +               ni->i_valid = new_valid;
68196 +       ni_unlock(ni);
68198 +       ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
68199 +       inode->i_ctime = inode->i_mtime = current_time(inode);
68200 +       if (!IS_DIRSYNC(inode)) {
68201 +               dirty = 1;
68202 +       } else {
68203 +               err = ntfs_sync_inode(inode);
68204 +               if (err)
68205 +                       return err;
68206 +       }
68208 +       if (dirty)
68209 +               mark_inode_dirty(inode);
68211 +       /*ntfs_flush_inodes(inode->i_sb, inode, NULL);*/
68213 +       return 0;
68217 + * Preallocate space for a file. This implements ntfs's fallocate file
68218 + * operation, which gets called from sys_fallocate system call. User
68219 + * space requests 'len' bytes at 'vbo'. If FALLOC_FL_KEEP_SIZE is set
68220 + * we just allocate clusters without zeroing them out. Otherwise we
68221 + * allocate and zero out clusters via an expanding truncate.
68222 + */
68223 +static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
68225 +       struct inode *inode = file->f_mapping->host;
68226 +       struct super_block *sb = inode->i_sb;
68227 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
68228 +       struct ntfs_inode *ni = ntfs_i(inode);
68229 +       loff_t end = vbo + len;
68230 +       loff_t vbo_down = round_down(vbo, PAGE_SIZE);
68231 +       loff_t i_size;
68232 +       int err;
68234 +       /* No support for dir */
68235 +       if (!S_ISREG(inode->i_mode))
68236 +               return -EOPNOTSUPP;
68238 +       /* Return error if mode is not supported */
68239 +       if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
68240 +                    FALLOC_FL_COLLAPSE_RANGE))
68241 +               return -EOPNOTSUPP;
68243 +       ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
68245 +       inode_lock(inode);
68246 +       i_size = inode->i_size;
68248 +       if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
68249 +               /* should never be here, see ntfs_file_open*/
68250 +               err = -EOPNOTSUPP;
68251 +               goto out;
68252 +       }
68254 +       if (mode & FALLOC_FL_PUNCH_HOLE) {
68255 +               if (!(mode & FALLOC_FL_KEEP_SIZE)) {
68256 +                       err = -EINVAL;
68257 +                       goto out;
68258 +               }
68260 +               if (!is_sparsed(ni) && !is_compressed(ni)) {
68261 +                       ntfs_inode_warn(
68262 +                               inode,
68263 +                               "punch_hole only for sparsed/compressed files");
68264 +                       err = -EOPNOTSUPP;
68265 +                       goto out;
68266 +               }
68268 +               err = filemap_write_and_wait_range(inode->i_mapping, vbo,
68269 +                                                  end - 1);
68270 +               if (err)
68271 +                       goto out;
68273 +               err = filemap_write_and_wait_range(inode->i_mapping, end,
68274 +                                                  LLONG_MAX);
68275 +               if (err)
68276 +                       goto out;
68278 +               truncate_pagecache(inode, vbo_down);
68280 +               ni_lock(ni);
68281 +               err = attr_punch_hole(ni, vbo, len);
68282 +               ni_unlock(ni);
68283 +       } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
68284 +               if (mode & ~FALLOC_FL_COLLAPSE_RANGE) {
68285 +                       err = -EINVAL;
68286 +                       goto out;
68287 +               }
68289 +               /*
68290 +                * Write tail of the last page before removed range since
68291 +                * it will get removed from the page cache below.
68292 +                */
68293 +               err = filemap_write_and_wait_range(inode->i_mapping, vbo_down,
68294 +                                                  vbo);
68295 +               if (err)
68296 +                       goto out;
68298 +               /*
68299 +                * Write data that will be shifted to preserve them
68300 +                * when discarding page cache below
68301 +                */
68302 +               err = filemap_write_and_wait_range(inode->i_mapping, end,
68303 +                                                  LLONG_MAX);
68304 +               if (err)
68305 +                       goto out;
68307 +               truncate_pagecache(inode, vbo_down);
68309 +               ni_lock(ni);
68310 +               err = attr_collapse_range(ni, vbo, len);
68311 +               ni_unlock(ni);
68312 +       } else {
68313 +               /*
68314 +                * normal file: allocate clusters, do not change 'valid' size
68315 +                */
68316 +               err = ntfs_set_size(inode, max(end, i_size));
68317 +               if (err)
68318 +                       goto out;
68320 +               if (is_sparsed(ni) || is_compressed(ni)) {
68321 +                       CLST vcn_v = ni->i_valid >> sbi->cluster_bits;
68322 +                       CLST vcn = vbo >> sbi->cluster_bits;
68323 +                       CLST cend = bytes_to_cluster(sbi, end);
68324 +                       CLST lcn, clen;
68325 +                       bool new;
68327 +                       /*
68328 +                        * allocate but not zero new clusters (see below comments)
68329 +                        * this breaks security (one can read unused on-disk areas)
68330 +                        * zeroing these clusters may be too long
68331 +                        * may be we should check here for root rights?
68332 +                        */
68333 +                       for (; vcn < cend; vcn += clen) {
68334 +                               err = attr_data_get_block(ni, vcn, cend - vcn,
68335 +                                                         &lcn, &clen, &new);
68336 +                               if (err)
68337 +                                       goto out;
68338 +                               if (!new || vcn >= vcn_v)
68339 +                                       continue;
68341 +                               /*
68342 +                                * Unwritten area
68343 +                                * NTFS is not able to store several unwritten areas
68344 +                                * Activate 'ntfs_sparse_cluster' to zero new allocated clusters
68345 +                                *
68346 +                                * Dangerous in case:
68347 +                                * 1G of sparsed clusters + 1 cluster of data =>
68348 +                                * valid_size == 1G + 1 cluster
68349 +                                * fallocate(1G) will zero 1G and this can be very long
68350 +                                * xfstest 016/086 will fail without 'ntfs_sparse_cluster'
68351 +                                */
68352 +                               /*ntfs_sparse_cluster(inode, NULL, vcn,
68353 +                                *                  min(vcn_v - vcn, clen));
68354 +                                */
68355 +                       }
68356 +               }
68358 +               if (mode & FALLOC_FL_KEEP_SIZE) {
68359 +                       ni_lock(ni);
68360 +                       /*true - keep preallocated*/
68361 +                       err = attr_set_size(ni, ATTR_DATA, NULL, 0,
68362 +                                           &ni->file.run, i_size, &ni->i_valid,
68363 +                                           true, NULL);
68364 +                       ni_unlock(ni);
68365 +               }
68366 +       }
68368 +       if (!err) {
68369 +               inode->i_ctime = inode->i_mtime = current_time(inode);
68370 +               mark_inode_dirty(inode);
68371 +       }
68372 +out:
68373 +       if (err == -EFBIG)
68374 +               err = -ENOSPC;
68376 +       inode_unlock(inode);
68377 +       return err;
68381 + * inode_operations::setattr
68382 + */
68383 +int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
68384 +                 struct iattr *attr)
68386 +       struct super_block *sb = dentry->d_sb;
68387 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
68388 +       struct inode *inode = d_inode(dentry);
68389 +       struct ntfs_inode *ni = ntfs_i(inode);
68390 +       u32 ia_valid = attr->ia_valid;
68391 +       umode_t mode = inode->i_mode;
68392 +       int err;
68394 +       if (sbi->options.no_acs_rules) {
68395 +               /* "no access rules" - force any changes of time etc. */
68396 +               attr->ia_valid |= ATTR_FORCE;
68397 +               /* and disable for editing some attributes */
68398 +               attr->ia_valid &= ~(ATTR_UID | ATTR_GID | ATTR_MODE);
68399 +               ia_valid = attr->ia_valid;
68400 +       }
68402 +       err = setattr_prepare(mnt_userns, dentry, attr);
68403 +       if (err)
68404 +               goto out;
68406 +       if (ia_valid & ATTR_SIZE) {
68407 +               loff_t oldsize = inode->i_size;
68409 +               if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
68410 +                       /* should never be here, see ntfs_file_open*/
68411 +                       err = -EOPNOTSUPP;
68412 +                       goto out;
68413 +               }
68414 +               inode_dio_wait(inode);
68416 +               if (attr->ia_size < oldsize)
68417 +                       err = ntfs_truncate(inode, attr->ia_size);
68418 +               else if (attr->ia_size > oldsize)
68419 +                       err = ntfs_extend(inode, attr->ia_size, 0, NULL);
68421 +               if (err)
68422 +                       goto out;
68424 +               ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
68425 +       }
68427 +       setattr_copy(mnt_userns, inode, attr);
68429 +       if (mode != inode->i_mode) {
68430 +               err = ntfs_acl_chmod(mnt_userns, inode);
68431 +               if (err)
68432 +                       goto out;
68434 +               /* linux 'w' -> windows 'ro' */
68435 +               if (0222 & inode->i_mode)
68436 +                       ni->std_fa &= ~FILE_ATTRIBUTE_READONLY;
68437 +               else
68438 +                       ni->std_fa |= FILE_ATTRIBUTE_READONLY;
68439 +       }
68441 +       mark_inode_dirty(inode);
68442 +out:
68443 +       return err;
68446 +static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
68448 +       ssize_t err;
68449 +       size_t count = iov_iter_count(iter);
68450 +       struct file *file = iocb->ki_filp;
68451 +       struct inode *inode = file->f_mapping->host;
68452 +       struct ntfs_inode *ni = ntfs_i(inode);
68454 +       if (is_encrypted(ni)) {
68455 +               ntfs_inode_warn(inode, "encrypted i/o not supported");
68456 +               return -EOPNOTSUPP;
68457 +       }
68459 +       if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
68460 +               ntfs_inode_warn(inode, "direct i/o + compressed not supported");
68461 +               return -EOPNOTSUPP;
68462 +       }
68464 +#ifndef CONFIG_NTFS3_LZX_XPRESS
68465 +       if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) {
68466 +               ntfs_inode_warn(
68467 +                       inode,
68468 +                       "activate CONFIG_NTFS3_LZX_XPRESS to read external compressed files");
68469 +               return -EOPNOTSUPP;
68470 +       }
68471 +#endif
68473 +       if (is_dedup(ni)) {
68474 +               ntfs_inode_warn(inode, "read deduplicated not supported");
68475 +               return -EOPNOTSUPP;
68476 +       }
68478 +       err = count ? generic_file_read_iter(iocb, iter) : 0;
68480 +       return err;
68483 +/* returns array of locked pages */
68484 +static int ntfs_get_frame_pages(struct address_space *mapping, pgoff_t index,
68485 +                               struct page **pages, u32 pages_per_frame,
68486 +                               bool *frame_uptodate)
68488 +       gfp_t gfp_mask = mapping_gfp_mask(mapping);
68489 +       u32 npages;
68491 +       *frame_uptodate = true;
68493 +       for (npages = 0; npages < pages_per_frame; npages++, index++) {
68494 +               struct page *page;
68496 +               page = find_or_create_page(mapping, index, gfp_mask);
68497 +               if (!page) {
68498 +                       while (npages--) {
68499 +                               page = pages[npages];
68500 +                               unlock_page(page);
68501 +                               put_page(page);
68502 +                       }
68504 +                       return -ENOMEM;
68505 +               }
68507 +               if (!PageUptodate(page))
68508 +                       *frame_uptodate = false;
68510 +               pages[npages] = page;
68511 +       }
68513 +       return 0;
68516 +/*helper for ntfs_file_write_iter (compressed files)*/
68517 +static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
68519 +       int err;
68520 +       struct file *file = iocb->ki_filp;
68521 +       size_t count = iov_iter_count(from);
68522 +       loff_t pos = iocb->ki_pos;
68523 +       struct inode *inode = file_inode(file);
68524 +       loff_t i_size = inode->i_size;
68525 +       struct address_space *mapping = inode->i_mapping;
68526 +       struct ntfs_inode *ni = ntfs_i(inode);
68527 +       u64 valid = ni->i_valid;
68528 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
68529 +       struct page *page, **pages = NULL;
68530 +       size_t written = 0;
68531 +       u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits;
68532 +       u32 frame_size = 1u << frame_bits;
68533 +       u32 pages_per_frame = frame_size >> PAGE_SHIFT;
68534 +       u32 ip, off;
68535 +       CLST frame;
68536 +       u64 frame_vbo;
68537 +       pgoff_t index;
68538 +       bool frame_uptodate;
68540 +       if (frame_size < PAGE_SIZE) {
68541 +               /*
68542 +                * frame_size == 8K if cluster 512
68543 +                * frame_size == 64K if cluster 4096
68544 +                */
68545 +               ntfs_inode_warn(inode, "page size is bigger than frame size");
68546 +               return -EOPNOTSUPP;
68547 +       }
68549 +       pages = ntfs_malloc(pages_per_frame * sizeof(struct page *));
68550 +       if (!pages)
68551 +               return -ENOMEM;
68553 +       current->backing_dev_info = inode_to_bdi(inode);
68554 +       err = file_remove_privs(file);
68555 +       if (err)
68556 +               goto out;
68558 +       err = file_update_time(file);
68559 +       if (err)
68560 +               goto out;
68562 +       /* zero range [valid : pos) */
68563 +       while (valid < pos) {
68564 +               CLST lcn, clen;
68566 +               frame = valid >> frame_bits;
68567 +               frame_vbo = valid & ~(frame_size - 1);
68568 +               off = valid & (frame_size - 1);
68570 +               err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 0, &lcn,
68571 +                                         &clen, NULL);
68572 +               if (err)
68573 +                       goto out;
68575 +               if (lcn == SPARSE_LCN) {
68576 +                       ni->i_valid = valid =
68577 +                               frame_vbo + ((u64)clen << sbi->cluster_bits);
68578 +                       continue;
68579 +               }
68581 +               /* Load full frame */
68582 +               err = ntfs_get_frame_pages(mapping, frame_vbo >> PAGE_SHIFT,
68583 +                                          pages, pages_per_frame,
68584 +                                          &frame_uptodate);
68585 +               if (err)
68586 +                       goto out;
68588 +               if (!frame_uptodate && off) {
68589 +                       err = ni_read_frame(ni, frame_vbo, pages,
68590 +                                           pages_per_frame);
68591 +                       if (err) {
68592 +                               for (ip = 0; ip < pages_per_frame; ip++) {
68593 +                                       page = pages[ip];
68594 +                                       unlock_page(page);
68595 +                                       put_page(page);
68596 +                               }
68597 +                               goto out;
68598 +                       }
68599 +               }
68601 +               ip = off >> PAGE_SHIFT;
68602 +               off = offset_in_page(valid);
68603 +               for (; ip < pages_per_frame; ip++, off = 0) {
68604 +                       page = pages[ip];
68605 +                       zero_user_segment(page, off, PAGE_SIZE);
68606 +                       flush_dcache_page(page);
68607 +                       SetPageUptodate(page);
68608 +               }
68610 +               ni_lock(ni);
68611 +               err = ni_write_frame(ni, pages, pages_per_frame);
68612 +               ni_unlock(ni);
68614 +               for (ip = 0; ip < pages_per_frame; ip++) {
68615 +                       page = pages[ip];
68616 +                       SetPageUptodate(page);
68617 +                       unlock_page(page);
68618 +                       put_page(page);
68619 +               }
68621 +               if (err)
68622 +                       goto out;
68624 +               ni->i_valid = valid = frame_vbo + frame_size;
68625 +       }
68627 +       /* copy user data [pos : pos + count) */
68628 +       while (count) {
68629 +               size_t copied, bytes;
68631 +               off = pos & (frame_size - 1);
68632 +               bytes = frame_size - off;
68633 +               if (bytes > count)
68634 +                       bytes = count;
68636 +               frame = pos >> frame_bits;
68637 +               frame_vbo = pos & ~(frame_size - 1);
68638 +               index = frame_vbo >> PAGE_SHIFT;
68640 +               if (unlikely(iov_iter_fault_in_readable(from, bytes))) {
68641 +                       err = -EFAULT;
68642 +                       goto out;
68643 +               }
68645 +               /* Load full frame */
68646 +               err = ntfs_get_frame_pages(mapping, index, pages,
68647 +                                          pages_per_frame, &frame_uptodate);
68648 +               if (err)
68649 +                       goto out;
68651 +               if (!frame_uptodate) {
68652 +                       loff_t to = pos + bytes;
68654 +                       if (off || (to < i_size && (to & (frame_size - 1)))) {
68655 +                               err = ni_read_frame(ni, frame_vbo, pages,
68656 +                                                   pages_per_frame);
68657 +                               if (err) {
68658 +                                       for (ip = 0; ip < pages_per_frame;
68659 +                                            ip++) {
68660 +                                               page = pages[ip];
68661 +                                               unlock_page(page);
68662 +                                               put_page(page);
68663 +                                       }
68664 +                                       goto out;
68665 +                               }
68666 +                       }
68667 +               }
68669 +               WARN_ON(!bytes);
68670 +               copied = 0;
68671 +               ip = off >> PAGE_SHIFT;
68672 +               off = offset_in_page(pos);
68674 +               /* copy user data to pages */
68675 +               for (;;) {
68676 +                       size_t cp, tail = PAGE_SIZE - off;
68678 +                       page = pages[ip];
68679 +                       cp = iov_iter_copy_from_user_atomic(page, from, off,
68680 +                                                           min(tail, bytes));
68681 +                       flush_dcache_page(page);
68682 +                       iov_iter_advance(from, cp);
68683 +                       copied += cp;
68684 +                       bytes -= cp;
68685 +                       if (!bytes || !cp)
68686 +                               break;
68688 +                       if (cp < tail) {
68689 +                               off += cp;
68690 +                       } else {
68691 +                               ip++;
68692 +                               off = 0;
68693 +                       }
68694 +               }
68696 +               ni_lock(ni);
68697 +               err = ni_write_frame(ni, pages, pages_per_frame);
68698 +               ni_unlock(ni);
68700 +               for (ip = 0; ip < pages_per_frame; ip++) {
68701 +                       page = pages[ip];
68702 +                       ClearPageDirty(page);
68703 +                       SetPageUptodate(page);
68704 +                       unlock_page(page);
68705 +                       put_page(page);
68706 +               }
68708 +               if (err)
68709 +                       goto out;
68711 +               /*
68712 +                * We can loop for a long time in here. Be nice and allow
68713 +                * us to schedule out to avoid softlocking if preempt
68714 +                * is disabled.
68715 +                */
68716 +               cond_resched();
68718 +               pos += copied;
68719 +               written += copied;
68721 +               count = iov_iter_count(from);
68722 +       }
68724 +out:
68725 +       ntfs_free(pages);
68727 +       current->backing_dev_info = NULL;
68729 +       if (err < 0)
68730 +               return err;
68732 +       iocb->ki_pos += written;
68733 +       if (iocb->ki_pos > ni->i_valid)
68734 +               ni->i_valid = iocb->ki_pos;
68736 +       return written;
68740 + * file_operations::write_iter
68741 + */
68742 +static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
68744 +       struct file *file = iocb->ki_filp;
68745 +       struct address_space *mapping = file->f_mapping;
68746 +       struct inode *inode = mapping->host;
68747 +       ssize_t ret;
68748 +       struct ntfs_inode *ni = ntfs_i(inode);
68750 +       if (is_encrypted(ni)) {
68751 +               ntfs_inode_warn(inode, "encrypted i/o not supported");
68752 +               return -EOPNOTSUPP;
68753 +       }
68755 +       if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
68756 +               ntfs_inode_warn(inode, "direct i/o + compressed not supported");
68757 +               return -EOPNOTSUPP;
68758 +       }
68760 +       if (is_dedup(ni)) {
68761 +               ntfs_inode_warn(inode, "write into deduplicated not supported");
68762 +               return -EOPNOTSUPP;
68763 +       }
68765 +       if (!inode_trylock(inode)) {
68766 +               if (iocb->ki_flags & IOCB_NOWAIT)
68767 +                       return -EAGAIN;
68768 +               inode_lock(inode);
68769 +       }
68771 +       ret = generic_write_checks(iocb, from);
68772 +       if (ret <= 0)
68773 +               goto out;
68775 +       if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
68776 +               /* should never be here, see ntfs_file_open*/
68777 +               ret = -EOPNOTSUPP;
68778 +               goto out;
68779 +       }
68781 +       ret = ntfs_extend(inode, iocb->ki_pos, ret, file);
68782 +       if (ret)
68783 +               goto out;
68785 +       ret = is_compressed(ni) ? ntfs_compress_write(iocb, from)
68786 +                               : __generic_file_write_iter(iocb, from);
68788 +out:
68789 +       inode_unlock(inode);
68791 +       if (ret > 0)
68792 +               ret = generic_write_sync(iocb, ret);
68794 +       return ret;
68798 + * file_operations::open
68799 + */
68800 +int ntfs_file_open(struct inode *inode, struct file *file)
68802 +       struct ntfs_inode *ni = ntfs_i(inode);
68804 +       if (unlikely((is_compressed(ni) || is_encrypted(ni)) &&
68805 +                    (file->f_flags & O_DIRECT))) {
68806 +               return -EOPNOTSUPP;
68807 +       }
68809 +       /* Decompress "external compressed" file if opened for rw */
68810 +       if ((ni->ni_flags & NI_FLAG_COMPRESSED_MASK) &&
68811 +           (file->f_flags & (O_WRONLY | O_RDWR | O_TRUNC))) {
68812 +#ifdef CONFIG_NTFS3_LZX_XPRESS
68813 +               int err = ni_decompress_file(ni);
68815 +               if (err)
68816 +                       return err;
68817 +#else
68818 +               ntfs_inode_warn(
68819 +                       inode,
68820 +                       "activate CONFIG_NTFS3_LZX_XPRESS to write external compressed files");
68821 +               return -EOPNOTSUPP;
68822 +#endif
68823 +       }
68825 +       return generic_file_open(inode, file);
68829 + * file_operations::release
68830 + */
68831 +static int ntfs_file_release(struct inode *inode, struct file *file)
68833 +       struct ntfs_inode *ni = ntfs_i(inode);
68834 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
68835 +       int err = 0;
68837 +       /* if we are the last writer on the inode, drop the block reservation */
68838 +       if (sbi->options.prealloc && ((file->f_mode & FMODE_WRITE) &&
68839 +                                     atomic_read(&inode->i_writecount) == 1)) {
68840 +               ni_lock(ni);
68841 +               down_write(&ni->file.run_lock);
68843 +               err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
68844 +                                   inode->i_size, &ni->i_valid, false, NULL);
68846 +               up_write(&ni->file.run_lock);
68847 +               ni_unlock(ni);
68848 +       }
68849 +       return err;
68852 +/* file_operations::fiemap */
68853 +int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
68854 +               __u64 start, __u64 len)
68856 +       int err;
68857 +       struct ntfs_inode *ni = ntfs_i(inode);
68859 +       if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR)
68860 +               return -EOPNOTSUPP;
68862 +       ni_lock(ni);
68864 +       err = ni_fiemap(ni, fieinfo, start, len);
68866 +       ni_unlock(ni);
68868 +       return err;
68871 +const struct inode_operations ntfs_file_inode_operations = {
68872 +       .getattr = ntfs_getattr,
68873 +       .setattr = ntfs3_setattr,
68874 +       .listxattr = ntfs_listxattr,
68875 +       .permission = ntfs_permission,
68876 +       .get_acl = ntfs_get_acl,
68877 +       .set_acl = ntfs_set_acl,
68878 +       .fiemap = ntfs_fiemap,
68881 +const struct file_operations ntfs_file_operations = {
68882 +       .llseek = generic_file_llseek,
68883 +       .read_iter = ntfs_file_read_iter,
68884 +       .write_iter = ntfs_file_write_iter,
68885 +       .unlocked_ioctl = ntfs_ioctl,
68886 +#ifdef CONFIG_COMPAT
68887 +       .compat_ioctl = ntfs_compat_ioctl,
68888 +#endif
68889 +       .splice_read = generic_file_splice_read,
68890 +       .mmap = ntfs_file_mmap,
68891 +       .open = ntfs_file_open,
68892 +       .fsync = generic_file_fsync,
68893 +       .splice_write = iter_file_splice_write,
68894 +       .fallocate = ntfs_fallocate,
68895 +       .release = ntfs_file_release,
68897 diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
68898 new file mode 100644
68899 index 000000000000..c3121bf9c62f
68900 --- /dev/null
68901 +++ b/fs/ntfs3/frecord.c
68902 @@ -0,0 +1,3071 @@
68903 +// SPDX-License-Identifier: GPL-2.0
68905 + *
68906 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
68907 + *
68908 + */
68910 +#include <linux/blkdev.h>
68911 +#include <linux/buffer_head.h>
68912 +#include <linux/fiemap.h>
68913 +#include <linux/fs.h>
68914 +#include <linux/nls.h>
68915 +#include <linux/vmalloc.h>
68917 +#include "debug.h"
68918 +#include "ntfs.h"
68919 +#include "ntfs_fs.h"
68920 +#ifdef CONFIG_NTFS3_LZX_XPRESS
68921 +#include "lib/lib.h"
68922 +#endif
68924 +static struct mft_inode *ni_ins_mi(struct ntfs_inode *ni, struct rb_root *tree,
68925 +                                  CLST ino, struct rb_node *ins)
68927 +       struct rb_node **p = &tree->rb_node;
68928 +       struct rb_node *pr = NULL;
68930 +       while (*p) {
68931 +               struct mft_inode *mi;
68933 +               pr = *p;
68934 +               mi = rb_entry(pr, struct mft_inode, node);
68935 +               if (mi->rno > ino)
68936 +                       p = &pr->rb_left;
68937 +               else if (mi->rno < ino)
68938 +                       p = &pr->rb_right;
68939 +               else
68940 +                       return mi;
68941 +       }
68943 +       if (!ins)
68944 +               return NULL;
68946 +       rb_link_node(ins, pr, p);
68947 +       rb_insert_color(ins, tree);
68948 +       return rb_entry(ins, struct mft_inode, node);
68952 + * ni_find_mi
68953 + *
68954 + * finds mft_inode by record number
68955 + */
68956 +static struct mft_inode *ni_find_mi(struct ntfs_inode *ni, CLST rno)
68958 +       return ni_ins_mi(ni, &ni->mi_tree, rno, NULL);
68962 + * ni_add_mi
68963 + *
68964 + * adds new mft_inode into ntfs_inode
68965 + */
68966 +static void ni_add_mi(struct ntfs_inode *ni, struct mft_inode *mi)
68968 +       ni_ins_mi(ni, &ni->mi_tree, mi->rno, &mi->node);
68972 + * ni_remove_mi
68973 + *
68974 + * removes mft_inode from ntfs_inode
68975 + */
68976 +void ni_remove_mi(struct ntfs_inode *ni, struct mft_inode *mi)
68978 +       rb_erase(&mi->node, &ni->mi_tree);
68982 + * ni_std
68983 + *
68984 + * returns pointer into std_info from primary record
68985 + */
68986 +struct ATTR_STD_INFO *ni_std(struct ntfs_inode *ni)
68988 +       const struct ATTRIB *attr;
68990 +       attr = mi_find_attr(&ni->mi, NULL, ATTR_STD, NULL, 0, NULL);
68991 +       return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO))
68992 +                   : NULL;
68996 + * ni_std5
68997 + *
68998 + * returns pointer into std_info from primary record
68999 + */
69000 +struct ATTR_STD_INFO5 *ni_std5(struct ntfs_inode *ni)
69002 +       const struct ATTRIB *attr;
69004 +       attr = mi_find_attr(&ni->mi, NULL, ATTR_STD, NULL, 0, NULL);
69006 +       return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO5))
69007 +                   : NULL;
69011 + * ni_clear
69012 + *
69013 + * clears resources allocated by ntfs_inode
69014 + */
69015 +void ni_clear(struct ntfs_inode *ni)
69017 +       struct rb_node *node;
69019 +       if (!ni->vfs_inode.i_nlink && is_rec_inuse(ni->mi.mrec))
69020 +               ni_delete_all(ni);
69022 +       al_destroy(ni);
69024 +       for (node = rb_first(&ni->mi_tree); node;) {
69025 +               struct rb_node *next = rb_next(node);
69026 +               struct mft_inode *mi = rb_entry(node, struct mft_inode, node);
69028 +               rb_erase(node, &ni->mi_tree);
69029 +               mi_put(mi);
69030 +               node = next;
69031 +       }
69033 +       /* bad inode always has mode == S_IFREG */
69034 +       if (ni->ni_flags & NI_FLAG_DIR)
69035 +               indx_clear(&ni->dir);
69036 +       else {
69037 +               run_close(&ni->file.run);
69038 +#ifdef CONFIG_NTFS3_LZX_XPRESS
69039 +               if (ni->file.offs_page) {
69040 +                       /* on-demand allocated page for offsets */
69041 +                       put_page(ni->file.offs_page);
69042 +                       ni->file.offs_page = NULL;
69043 +               }
69044 +#endif
69045 +       }
69047 +       mi_clear(&ni->mi);
69051 + * ni_load_mi_ex
69052 + *
69053 + * finds mft_inode by record number.
69054 + */
69055 +int ni_load_mi_ex(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi)
69057 +       int err;
69058 +       struct mft_inode *r;
69060 +       r = ni_find_mi(ni, rno);
69061 +       if (r)
69062 +               goto out;
69064 +       err = mi_get(ni->mi.sbi, rno, &r);
69065 +       if (err)
69066 +               return err;
69068 +       ni_add_mi(ni, r);
69070 +out:
69071 +       if (mi)
69072 +               *mi = r;
69073 +       return 0;
69077 + * ni_load_mi
69078 + *
69079 + * load mft_inode corresponded list_entry
69080 + */
69081 +int ni_load_mi(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le,
69082 +              struct mft_inode **mi)
69084 +       CLST rno;
69086 +       if (!le) {
69087 +               *mi = &ni->mi;
69088 +               return 0;
69089 +       }
69091 +       rno = ino_get(&le->ref);
69092 +       if (rno == ni->mi.rno) {
69093 +               *mi = &ni->mi;
69094 +               return 0;
69095 +       }
69096 +       return ni_load_mi_ex(ni, rno, mi);
69100 + * ni_find_attr
69101 + *
69102 + * returns attribute and record this attribute belongs to
69103 + */
69104 +struct ATTRIB *ni_find_attr(struct ntfs_inode *ni, struct ATTRIB *attr,
69105 +                           struct ATTR_LIST_ENTRY **le_o, enum ATTR_TYPE type,
69106 +                           const __le16 *name, u8 name_len, const CLST *vcn,
69107 +                           struct mft_inode **mi)
69109 +       struct ATTR_LIST_ENTRY *le;
69110 +       struct mft_inode *m;
69112 +       if (!ni->attr_list.size ||
69113 +           (!name_len && (type == ATTR_LIST || type == ATTR_STD))) {
69114 +               if (le_o)
69115 +                       *le_o = NULL;
69116 +               if (mi)
69117 +                       *mi = &ni->mi;
69119 +               /* Look for required attribute in primary record */
69120 +               return mi_find_attr(&ni->mi, attr, type, name, name_len, NULL);
69121 +       }
69123 +       /* first look for list entry of required type */
69124 +       le = al_find_ex(ni, le_o ? *le_o : NULL, type, name, name_len, vcn);
69125 +       if (!le)
69126 +               return NULL;
69128 +       if (le_o)
69129 +               *le_o = le;
69131 +       /* Load record that contains this attribute */
69132 +       if (ni_load_mi(ni, le, &m))
69133 +               return NULL;
69135 +       /* Look for required attribute */
69136 +       attr = mi_find_attr(m, NULL, type, name, name_len, &le->id);
69138 +       if (!attr)
69139 +               goto out;
69141 +       if (!attr->non_res) {
69142 +               if (vcn && *vcn)
69143 +                       goto out;
69144 +       } else if (!vcn) {
69145 +               if (attr->nres.svcn)
69146 +                       goto out;
69147 +       } else if (le64_to_cpu(attr->nres.svcn) > *vcn ||
69148 +                  *vcn > le64_to_cpu(attr->nres.evcn)) {
69149 +               goto out;
69150 +       }
69152 +       if (mi)
69153 +               *mi = m;
69154 +       return attr;
69156 +out:
69157 +       ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
69158 +       return NULL;
69162 + * ni_enum_attr_ex
69163 + *
69164 + * enumerates attributes in ntfs_inode
69165 + */
69166 +struct ATTRIB *ni_enum_attr_ex(struct ntfs_inode *ni, struct ATTRIB *attr,
69167 +                              struct ATTR_LIST_ENTRY **le,
69168 +                              struct mft_inode **mi)
69170 +       struct mft_inode *mi2;
69171 +       struct ATTR_LIST_ENTRY *le2;
69173 +       /* Do we have an attribute list? */
69174 +       if (!ni->attr_list.size) {
69175 +               *le = NULL;
69176 +               if (mi)
69177 +                       *mi = &ni->mi;
69178 +               /* Enum attributes in primary record */
69179 +               return mi_enum_attr(&ni->mi, attr);
69180 +       }
69182 +       /* get next list entry */
69183 +       le2 = *le = al_enumerate(ni, attr ? *le : NULL);
69184 +       if (!le2)
69185 +               return NULL;
69187 +       /* Load record that contains the required attribute */
69188 +       if (ni_load_mi(ni, le2, &mi2))
69189 +               return NULL;
69191 +       if (mi)
69192 +               *mi = mi2;
69194 +       /* Find attribute in loaded record */
69195 +       return rec_find_attr_le(mi2, le2);
69199 + * ni_load_attr
69200 + *
69201 + * loads attribute that contains given vcn
69202 + */
69203 +struct ATTRIB *ni_load_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
69204 +                           const __le16 *name, u8 name_len, CLST vcn,
69205 +                           struct mft_inode **pmi)
69207 +       struct ATTR_LIST_ENTRY *le;
69208 +       struct ATTRIB *attr;
69209 +       struct mft_inode *mi;
69210 +       struct ATTR_LIST_ENTRY *next;
69212 +       if (!ni->attr_list.size) {
69213 +               if (pmi)
69214 +                       *pmi = &ni->mi;
69215 +               return mi_find_attr(&ni->mi, NULL, type, name, name_len, NULL);
69216 +       }
69218 +       le = al_find_ex(ni, NULL, type, name, name_len, NULL);
69219 +       if (!le)
69220 +               return NULL;
69222 +       /*
69223 +        * Unfortunately ATTR_LIST_ENTRY contains only start vcn
69224 +        * So to find the ATTRIB segment that contains 'vcn' we should
69225 +        * enumerate some entries
69226 +        */
69227 +       if (vcn) {
69228 +               for (;; le = next) {
69229 +                       next = al_find_ex(ni, le, type, name, name_len, NULL);
69230 +                       if (!next || le64_to_cpu(next->vcn) > vcn)
69231 +                               break;
69232 +               }
69233 +       }
69235 +       if (ni_load_mi(ni, le, &mi))
69236 +               return NULL;
69238 +       if (pmi)
69239 +               *pmi = mi;
69241 +       attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
69242 +       if (!attr)
69243 +               return NULL;
69245 +       if (!attr->non_res)
69246 +               return attr;
69248 +       if (le64_to_cpu(attr->nres.svcn) <= vcn &&
69249 +           vcn <= le64_to_cpu(attr->nres.evcn))
69250 +               return attr;
69252 +       return NULL;
69256 + * ni_load_all_mi
69257 + *
69258 + * loads all subrecords
69259 + */
69260 +int ni_load_all_mi(struct ntfs_inode *ni)
69262 +       int err;
69263 +       struct ATTR_LIST_ENTRY *le;
69265 +       if (!ni->attr_list.size)
69266 +               return 0;
69268 +       le = NULL;
69270 +       while ((le = al_enumerate(ni, le))) {
69271 +               CLST rno = ino_get(&le->ref);
69273 +               if (rno == ni->mi.rno)
69274 +                       continue;
69276 +               err = ni_load_mi_ex(ni, rno, NULL);
69277 +               if (err)
69278 +                       return err;
69279 +       }
69281 +       return 0;
69285 + * ni_add_subrecord
69286 + *
69287 + * allocate + format + attach a new subrecord
69288 + */
69289 +bool ni_add_subrecord(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi)
69291 +       struct mft_inode *m;
69293 +       m = ntfs_zalloc(sizeof(struct mft_inode));
69294 +       if (!m)
69295 +               return false;
69297 +       if (mi_format_new(m, ni->mi.sbi, rno, 0, ni->mi.rno == MFT_REC_MFT)) {
69298 +               mi_put(m);
69299 +               return false;
69300 +       }
69302 +       mi_get_ref(&ni->mi, &m->mrec->parent_ref);
69304 +       ni_add_mi(ni, m);
69305 +       *mi = m;
69306 +       return true;
69310 + * ni_remove_attr
69311 + *
69312 + * removes all attributes for the given type/name/id
69313 + */
69314 +int ni_remove_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
69315 +                  const __le16 *name, size_t name_len, bool base_only,
69316 +                  const __le16 *id)
69318 +       int err;
69319 +       struct ATTRIB *attr;
69320 +       struct ATTR_LIST_ENTRY *le;
69321 +       struct mft_inode *mi;
69322 +       u32 type_in;
69323 +       int diff;
69325 +       if (base_only || type == ATTR_LIST || !ni->attr_list.size) {
69326 +               attr = mi_find_attr(&ni->mi, NULL, type, name, name_len, id);
69327 +               if (!attr)
69328 +                       return -ENOENT;
69330 +               mi_remove_attr(&ni->mi, attr);
69331 +               return 0;
69332 +       }
69334 +       type_in = le32_to_cpu(type);
69335 +       le = NULL;
69337 +       for (;;) {
69338 +               le = al_enumerate(ni, le);
69339 +               if (!le)
69340 +                       return 0;
69342 +next_le2:
69343 +               diff = le32_to_cpu(le->type) - type_in;
69344 +               if (diff < 0)
69345 +                       continue;
69347 +               if (diff > 0)
69348 +                       return 0;
69350 +               if (le->name_len != name_len)
69351 +                       continue;
69353 +               if (name_len &&
69354 +                   memcmp(le_name(le), name, name_len * sizeof(short)))
69355 +                       continue;
69357 +               if (id && le->id != *id)
69358 +                       continue;
69359 +               err = ni_load_mi(ni, le, &mi);
69360 +               if (err)
69361 +                       return err;
69363 +               al_remove_le(ni, le);
69365 +               attr = mi_find_attr(mi, NULL, type, name, name_len, id);
69366 +               if (!attr)
69367 +                       return -ENOENT;
69369 +               mi_remove_attr(mi, attr);
69371 +               if (PtrOffset(ni->attr_list.le, le) >= ni->attr_list.size)
69372 +                       return 0;
69373 +               goto next_le2;
69374 +       }
69378 + * ni_ins_new_attr
69379 + *
69380 + * inserts the attribute into record
69381 + * Returns not full constructed attribute or NULL if not possible to create
69382 + */
69383 +static struct ATTRIB *ni_ins_new_attr(struct ntfs_inode *ni,
69384 +                                     struct mft_inode *mi,
69385 +                                     struct ATTR_LIST_ENTRY *le,
69386 +                                     enum ATTR_TYPE type, const __le16 *name,
69387 +                                     u8 name_len, u32 asize, u16 name_off,
69388 +                                     CLST svcn)
69390 +       int err;
69391 +       struct ATTRIB *attr;
69392 +       bool le_added = false;
69393 +       struct MFT_REF ref;
69395 +       mi_get_ref(mi, &ref);
69397 +       if (type != ATTR_LIST && !le && ni->attr_list.size) {
69398 +               err = al_add_le(ni, type, name, name_len, svcn, cpu_to_le16(-1),
69399 +                               &ref, &le);
69400 +               if (err) {
69401 +                       /* no memory or no space */
69402 +                       return NULL;
69403 +               }
69404 +               le_added = true;
69406 +               /*
69407 +                * al_add_le -> attr_set_size (list) -> ni_expand_list
69408 +                * which moves some attributes out of primary record
69409 +                * this means that name may point into moved memory
69410 +                * reinit 'name' from le
69411 +                */
69412 +               name = le->name;
69413 +       }
69415 +       attr = mi_insert_attr(mi, type, name, name_len, asize, name_off);
69416 +       if (!attr) {
69417 +               if (le_added)
69418 +                       al_remove_le(ni, le);
69419 +               return NULL;
69420 +       }
69422 +       if (type == ATTR_LIST) {
69423 +               /*attr list is not in list entry array*/
69424 +               goto out;
69425 +       }
69427 +       if (!le)
69428 +               goto out;
69430 +       /* Update ATTRIB Id and record reference */
69431 +       le->id = attr->id;
69432 +       ni->attr_list.dirty = true;
69433 +       le->ref = ref;
69435 +out:
69436 +       return attr;
69440 + * random write access to sparsed or compressed file may result to
69441 + * not optimized packed runs.
69442 + * Here it is the place to optimize it
69443 + */
69444 +static int ni_repack(struct ntfs_inode *ni)
69446 +       int err = 0;
69447 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
69448 +       struct mft_inode *mi, *mi_p = NULL;
69449 +       struct ATTRIB *attr = NULL, *attr_p;
69450 +       struct ATTR_LIST_ENTRY *le = NULL, *le_p;
69451 +       CLST alloc = 0;
69452 +       u8 cluster_bits = sbi->cluster_bits;
69453 +       CLST svcn, evcn = 0, svcn_p, evcn_p, next_svcn;
69454 +       u32 roff, rs = sbi->record_size;
69455 +       struct runs_tree run;
69457 +       run_init(&run);
69459 +       while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi))) {
69460 +               if (!attr->non_res)
69461 +                       continue;
69463 +               svcn = le64_to_cpu(attr->nres.svcn);
69464 +               if (svcn != le64_to_cpu(le->vcn)) {
69465 +                       err = -EINVAL;
69466 +                       break;
69467 +               }
69469 +               if (!svcn) {
69470 +                       alloc = le64_to_cpu(attr->nres.alloc_size) >>
69471 +                               cluster_bits;
69472 +                       mi_p = NULL;
69473 +               } else if (svcn != evcn + 1) {
69474 +                       err = -EINVAL;
69475 +                       break;
69476 +               }
69478 +               evcn = le64_to_cpu(attr->nres.evcn);
69480 +               if (svcn > evcn + 1) {
69481 +                       err = -EINVAL;
69482 +                       break;
69483 +               }
69485 +               if (!mi_p) {
69486 +                       /* do not try if too little free space */
69487 +                       if (le32_to_cpu(mi->mrec->used) + 8 >= rs)
69488 +                               continue;
69490 +                       /* do not try if last attribute segment */
69491 +                       if (evcn + 1 == alloc)
69492 +                               continue;
69493 +                       run_close(&run);
69494 +               }
69496 +               roff = le16_to_cpu(attr->nres.run_off);
69497 +               err = run_unpack(&run, sbi, ni->mi.rno, svcn, evcn, svcn,
69498 +                                Add2Ptr(attr, roff),
69499 +                                le32_to_cpu(attr->size) - roff);
69500 +               if (err < 0)
69501 +                       break;
69503 +               if (!mi_p) {
69504 +                       mi_p = mi;
69505 +                       attr_p = attr;
69506 +                       svcn_p = svcn;
69507 +                       evcn_p = evcn;
69508 +                       le_p = le;
69509 +                       err = 0;
69510 +                       continue;
69511 +               }
69513 +               /*
69514 +                * run contains data from two records: mi_p and mi
69515 +                * try to pack in one
69516 +                */
69517 +               err = mi_pack_runs(mi_p, attr_p, &run, evcn + 1 - svcn_p);
69518 +               if (err)
69519 +                       break;
69521 +               next_svcn = le64_to_cpu(attr_p->nres.evcn) + 1;
69523 +               if (next_svcn >= evcn + 1) {
69524 +                       /* we can remove this attribute segment */
69525 +                       al_remove_le(ni, le);
69526 +                       mi_remove_attr(mi, attr);
69527 +                       le = le_p;
69528 +                       continue;
69529 +               }
69531 +               attr->nres.svcn = le->vcn = cpu_to_le64(next_svcn);
69532 +               mi->dirty = true;
69533 +               ni->attr_list.dirty = true;
69535 +               if (evcn + 1 == alloc) {
69536 +                       err = mi_pack_runs(mi, attr, &run,
69537 +                                          evcn + 1 - next_svcn);
69538 +                       if (err)
69539 +                               break;
69540 +                       mi_p = NULL;
69541 +               } else {
69542 +                       mi_p = mi;
69543 +                       attr_p = attr;
69544 +                       svcn_p = next_svcn;
69545 +                       evcn_p = evcn;
69546 +                       le_p = le;
69547 +                       run_truncate_head(&run, next_svcn);
69548 +               }
69549 +       }
69551 +       if (err) {
69552 +               ntfs_inode_warn(&ni->vfs_inode, "repack problem");
69553 +               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
69555 +               /* Pack loaded but not packed runs */
69556 +               if (mi_p)
69557 +                       mi_pack_runs(mi_p, attr_p, &run, evcn_p + 1 - svcn_p);
69558 +       }
69560 +       run_close(&run);
69561 +       return err;
69565 + * ni_try_remove_attr_list
69566 + *
69567 + * Can we remove attribute list?
69568 + * Check the case when primary record contains enough space for all attributes
69569 + */
69570 +static int ni_try_remove_attr_list(struct ntfs_inode *ni)
69572 +       int err = 0;
69573 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
69574 +       struct ATTRIB *attr, *attr_list, *attr_ins;
69575 +       struct ATTR_LIST_ENTRY *le;
69576 +       struct mft_inode *mi;
69577 +       u32 asize, free;
69578 +       struct MFT_REF ref;
69579 +       __le16 id;
69581 +       if (!ni->attr_list.dirty)
69582 +               return 0;
69584 +       err = ni_repack(ni);
69585 +       if (err)
69586 +               return err;
69588 +       attr_list = mi_find_attr(&ni->mi, NULL, ATTR_LIST, NULL, 0, NULL);
69589 +       if (!attr_list)
69590 +               return 0;
69592 +       asize = le32_to_cpu(attr_list->size);
69594 +       /* free space in primary record without attribute list */
69595 +       free = sbi->record_size - le32_to_cpu(ni->mi.mrec->used) + asize;
69596 +       mi_get_ref(&ni->mi, &ref);
69598 +       le = NULL;
69599 +       while ((le = al_enumerate(ni, le))) {
69600 +               if (!memcmp(&le->ref, &ref, sizeof(ref)))
69601 +                       continue;
69603 +               if (le->vcn)
69604 +                       return 0;
69606 +               mi = ni_find_mi(ni, ino_get(&le->ref));
69607 +               if (!mi)
69608 +                       return 0;
69610 +               attr = mi_find_attr(mi, NULL, le->type, le_name(le),
69611 +                                   le->name_len, &le->id);
69612 +               if (!attr)
69613 +                       return 0;
69615 +               asize = le32_to_cpu(attr->size);
69616 +               if (asize > free)
69617 +                       return 0;
69619 +               free -= asize;
69620 +       }
69622 +       /* Is seems that attribute list can be removed from primary record */
69623 +       mi_remove_attr(&ni->mi, attr_list);
69625 +       /*
69626 +        * Repeat the cycle above and move all attributes to primary record.
69627 +        * It should be success!
69628 +        */
69629 +       le = NULL;
69630 +       while ((le = al_enumerate(ni, le))) {
69631 +               if (!memcmp(&le->ref, &ref, sizeof(ref)))
69632 +                       continue;
69634 +               mi = ni_find_mi(ni, ino_get(&le->ref));
69636 +               attr = mi_find_attr(mi, NULL, le->type, le_name(le),
69637 +                                   le->name_len, &le->id);
69638 +               asize = le32_to_cpu(attr->size);
69640 +               /* insert into primary record */
69641 +               attr_ins = mi_insert_attr(&ni->mi, le->type, le_name(le),
69642 +                                         le->name_len, asize,
69643 +                                         le16_to_cpu(attr->name_off));
69644 +               id = attr_ins->id;
69646 +               /* copy all except id */
69647 +               memcpy(attr_ins, attr, asize);
69648 +               attr_ins->id = id;
69650 +               /* remove from original record */
69651 +               mi_remove_attr(mi, attr);
69652 +       }
69654 +       run_deallocate(sbi, &ni->attr_list.run, true);
69655 +       run_close(&ni->attr_list.run);
69656 +       ni->attr_list.size = 0;
69657 +       ntfs_free(ni->attr_list.le);
69658 +       ni->attr_list.le = NULL;
69659 +       ni->attr_list.dirty = false;
69661 +       return 0;
69665 + * ni_create_attr_list
69666 + *
69667 + * generates an attribute list for this primary record
69668 + */
69669 +int ni_create_attr_list(struct ntfs_inode *ni)
69671 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
69672 +       int err;
69673 +       u32 lsize;
69674 +       struct ATTRIB *attr;
69675 +       struct ATTRIB *arr_move[7];
69676 +       struct ATTR_LIST_ENTRY *le, *le_b[7];
69677 +       struct MFT_REC *rec;
69678 +       bool is_mft;
69679 +       CLST rno = 0;
69680 +       struct mft_inode *mi;
69681 +       u32 free_b, nb, to_free, rs;
69682 +       u16 sz;
69684 +       is_mft = ni->mi.rno == MFT_REC_MFT;
69685 +       rec = ni->mi.mrec;
69686 +       rs = sbi->record_size;
69688 +       /*
69689 +        * Skip estimating exact memory requirement
69690 +        * Looks like one record_size is always enough
69691 +        */
69692 +       le = ntfs_malloc(al_aligned(rs));
69693 +       if (!le) {
69694 +               err = -ENOMEM;
69695 +               goto out;
69696 +       }
69698 +       mi_get_ref(&ni->mi, &le->ref);
69699 +       ni->attr_list.le = le;
69701 +       attr = NULL;
69702 +       nb = 0;
69703 +       free_b = 0;
69704 +       attr = NULL;
69706 +       for (; (attr = mi_enum_attr(&ni->mi, attr)); le = Add2Ptr(le, sz)) {
69707 +               sz = le_size(attr->name_len);
69708 +               le->type = attr->type;
69709 +               le->size = cpu_to_le16(sz);
69710 +               le->name_len = attr->name_len;
69711 +               le->name_off = offsetof(struct ATTR_LIST_ENTRY, name);
69712 +               le->vcn = 0;
69713 +               if (le != ni->attr_list.le)
69714 +                       le->ref = ni->attr_list.le->ref;
69715 +               le->id = attr->id;
69717 +               if (attr->name_len)
69718 +                       memcpy(le->name, attr_name(attr),
69719 +                              sizeof(short) * attr->name_len);
69720 +               else if (attr->type == ATTR_STD)
69721 +                       continue;
69722 +               else if (attr->type == ATTR_LIST)
69723 +                       continue;
69724 +               else if (is_mft && attr->type == ATTR_DATA)
69725 +                       continue;
69727 +               if (!nb || nb < ARRAY_SIZE(arr_move)) {
69728 +                       le_b[nb] = le;
69729 +                       arr_move[nb++] = attr;
69730 +                       free_b += le32_to_cpu(attr->size);
69731 +               }
69732 +       }
69734 +       lsize = PtrOffset(ni->attr_list.le, le);
69735 +       ni->attr_list.size = lsize;
69737 +       to_free = le32_to_cpu(rec->used) + lsize + SIZEOF_RESIDENT;
69738 +       if (to_free <= rs) {
69739 +               to_free = 0;
69740 +       } else {
69741 +               to_free -= rs;
69743 +               if (to_free > free_b) {
69744 +                       err = -EINVAL;
69745 +                       goto out1;
69746 +               }
69747 +       }
69749 +       /* Allocate child mft. */
69750 +       err = ntfs_look_free_mft(sbi, &rno, is_mft, ni, &mi);
69751 +       if (err)
69752 +               goto out1;
69754 +       /* Call 'mi_remove_attr' in reverse order to keep pointers 'arr_move' valid */
69755 +       while (to_free > 0) {
69756 +               struct ATTRIB *b = arr_move[--nb];
69757 +               u32 asize = le32_to_cpu(b->size);
69758 +               u16 name_off = le16_to_cpu(b->name_off);
69760 +               attr = mi_insert_attr(mi, b->type, Add2Ptr(b, name_off),
69761 +                                     b->name_len, asize, name_off);
69762 +               WARN_ON(!attr);
69764 +               mi_get_ref(mi, &le_b[nb]->ref);
69765 +               le_b[nb]->id = attr->id;
69767 +               /* copy all except id */
69768 +               memcpy(attr, b, asize);
69769 +               attr->id = le_b[nb]->id;
69771 +               WARN_ON(!mi_remove_attr(&ni->mi, b));
69773 +               if (to_free <= asize)
69774 +                       break;
69775 +               to_free -= asize;
69776 +               WARN_ON(!nb);
69777 +       }
69779 +       attr = mi_insert_attr(&ni->mi, ATTR_LIST, NULL, 0,
69780 +                             lsize + SIZEOF_RESIDENT, SIZEOF_RESIDENT);
69781 +       WARN_ON(!attr);
69783 +       attr->non_res = 0;
69784 +       attr->flags = 0;
69785 +       attr->res.data_size = cpu_to_le32(lsize);
69786 +       attr->res.data_off = SIZEOF_RESIDENT_LE;
69787 +       attr->res.flags = 0;
69788 +       attr->res.res = 0;
69790 +       memcpy(resident_data_ex(attr, lsize), ni->attr_list.le, lsize);
69792 +       ni->attr_list.dirty = false;
69794 +       mark_inode_dirty(&ni->vfs_inode);
69795 +       goto out;
69797 +out1:
69798 +       ntfs_free(ni->attr_list.le);
69799 +       ni->attr_list.le = NULL;
69800 +       ni->attr_list.size = 0;
69802 +out:
69803 +       return err;
69807 + * ni_ins_attr_ext
69808 + *
69809 + * This method adds an external attribute to the ntfs_inode.
69810 + */
69811 +static int ni_ins_attr_ext(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le,
69812 +                          enum ATTR_TYPE type, const __le16 *name, u8 name_len,
69813 +                          u32 asize, CLST svcn, u16 name_off, bool force_ext,
69814 +                          struct ATTRIB **ins_attr, struct mft_inode **ins_mi)
69816 +       struct ATTRIB *attr;
69817 +       struct mft_inode *mi;
69818 +       CLST rno;
69819 +       u64 vbo;
69820 +       struct rb_node *node;
69821 +       int err;
69822 +       bool is_mft, is_mft_data;
69823 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
69825 +       is_mft = ni->mi.rno == MFT_REC_MFT;
69826 +       is_mft_data = is_mft && type == ATTR_DATA && !name_len;
69828 +       if (asize > sbi->max_bytes_per_attr) {
69829 +               err = -EINVAL;
69830 +               goto out;
69831 +       }
69833 +       /*
69834 +        * standard information and attr_list cannot be made external.
69835 +        * The Log File cannot have any external attributes
69836 +        */
69837 +       if (type == ATTR_STD || type == ATTR_LIST ||
69838 +           ni->mi.rno == MFT_REC_LOG) {
69839 +               err = -EINVAL;
69840 +               goto out;
69841 +       }
69843 +       /* Create attribute list if it is not already existed */
69844 +       if (!ni->attr_list.size) {
69845 +               err = ni_create_attr_list(ni);
69846 +               if (err)
69847 +                       goto out;
69848 +       }
69850 +       vbo = is_mft_data ? ((u64)svcn << sbi->cluster_bits) : 0;
69852 +       if (force_ext)
69853 +               goto insert_ext;
69855 +       /* Load all subrecords into memory. */
69856 +       err = ni_load_all_mi(ni);
69857 +       if (err)
69858 +               goto out;
69860 +       /* Check each of loaded subrecord */
69861 +       for (node = rb_first(&ni->mi_tree); node; node = rb_next(node)) {
69862 +               mi = rb_entry(node, struct mft_inode, node);
69864 +               if (is_mft_data &&
69865 +                   (mi_enum_attr(mi, NULL) ||
69866 +                    vbo <= ((u64)mi->rno << sbi->record_bits))) {
69867 +                       /* We can't accept this record 'case MFT's bootstrapping */
69868 +                       continue;
69869 +               }
69870 +               if (is_mft &&
69871 +                   mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, NULL)) {
69872 +                       /*
69873 +                        * This child record already has a ATTR_DATA.
69874 +                        * So it can't accept any other records.
69875 +                        */
69876 +                       continue;
69877 +               }
69879 +               if ((type != ATTR_NAME || name_len) &&
69880 +                   mi_find_attr(mi, NULL, type, name, name_len, NULL)) {
69881 +                       /* Only indexed attributes can share same record */
69882 +                       continue;
69883 +               }
69885 +               /* Try to insert attribute into this subrecord */
69886 +               attr = ni_ins_new_attr(ni, mi, le, type, name, name_len, asize,
69887 +                                      name_off, svcn);
69888 +               if (!attr)
69889 +                       continue;
69891 +               if (ins_attr)
69892 +                       *ins_attr = attr;
69893 +               return 0;
69894 +       }
69896 +insert_ext:
69897 +       /* We have to allocate a new child subrecord*/
69898 +       err = ntfs_look_free_mft(sbi, &rno, is_mft_data, ni, &mi);
69899 +       if (err)
69900 +               goto out;
69902 +       if (is_mft_data && vbo <= ((u64)rno << sbi->record_bits)) {
69903 +               err = -EINVAL;
69904 +               goto out1;
69905 +       }
69907 +       attr = ni_ins_new_attr(ni, mi, le, type, name, name_len, asize,
69908 +                              name_off, svcn);
69909 +       if (!attr)
69910 +               goto out2;
69912 +       if (ins_attr)
69913 +               *ins_attr = attr;
69914 +       if (ins_mi)
69915 +               *ins_mi = mi;
69917 +       return 0;
69919 +out2:
69920 +       ni_remove_mi(ni, mi);
69921 +       mi_put(mi);
69922 +       err = -EINVAL;
69924 +out1:
69925 +       ntfs_mark_rec_free(sbi, rno);
69927 +out:
69928 +       return err;
69932 + * ni_insert_attr
69933 + *
69934 + * inserts an attribute into the file.
69935 + *
69936 + * If the primary record has room, it will just insert the attribute.
69937 + * If not, it may make the attribute external.
69938 + * For $MFT::Data it may make room for the attribute by
69939 + * making other attributes external.
69940 + *
69941 + * NOTE:
69942 + * The ATTR_LIST and ATTR_STD cannot be made external.
69943 + * This function does not fill new attribute full
69944 + * It only fills 'size'/'type'/'id'/'name_len' fields
69945 + */
69946 +static int ni_insert_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
69947 +                         const __le16 *name, u8 name_len, u32 asize,
69948 +                         u16 name_off, CLST svcn, struct ATTRIB **ins_attr,
69949 +                         struct mft_inode **ins_mi)
69951 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
69952 +       int err;
69953 +       struct ATTRIB *attr, *eattr;
69954 +       struct MFT_REC *rec;
69955 +       bool is_mft;
69956 +       struct ATTR_LIST_ENTRY *le;
69957 +       u32 list_reserve, max_free, free, used, t32;
69958 +       __le16 id;
69959 +       u16 t16;
69961 +       is_mft = ni->mi.rno == MFT_REC_MFT;
69962 +       rec = ni->mi.mrec;
69964 +       list_reserve = SIZEOF_NONRESIDENT + 3 * (1 + 2 * sizeof(u32));
69965 +       used = le32_to_cpu(rec->used);
69966 +       free = sbi->record_size - used;
69968 +       if (is_mft && type != ATTR_LIST) {
69969 +               /* Reserve space for the ATTRIB List. */
69970 +               if (free < list_reserve)
69971 +                       free = 0;
69972 +               else
69973 +                       free -= list_reserve;
69974 +       }
69976 +       if (asize <= free) {
69977 +               attr = ni_ins_new_attr(ni, &ni->mi, NULL, type, name, name_len,
69978 +                                      asize, name_off, svcn);
69979 +               if (attr) {
69980 +                       if (ins_attr)
69981 +                               *ins_attr = attr;
69982 +                       if (ins_mi)
69983 +                               *ins_mi = &ni->mi;
69984 +                       err = 0;
69985 +                       goto out;
69986 +               }
69987 +       }
69989 +       if (!is_mft || type != ATTR_DATA || svcn) {
69990 +               /* This ATTRIB will be external. */
69991 +               err = ni_ins_attr_ext(ni, NULL, type, name, name_len, asize,
69992 +                                     svcn, name_off, false, ins_attr, ins_mi);
69993 +               goto out;
69994 +       }
69996 +       /*
69997 +        * Here we have: "is_mft && type == ATTR_DATA && !svcn
69998 +        *
69999 +        * The first chunk of the $MFT::Data ATTRIB must be the base record.
70000 +        * Evict as many other attributes as possible.
70001 +        */
70002 +       max_free = free;
70004 +       /* Estimate the result of moving all possible attributes away.*/
70005 +       attr = NULL;
70007 +       while ((attr = mi_enum_attr(&ni->mi, attr))) {
70008 +               if (attr->type == ATTR_STD)
70009 +                       continue;
70010 +               if (attr->type == ATTR_LIST)
70011 +                       continue;
70012 +               max_free += le32_to_cpu(attr->size);
70013 +       }
70015 +       if (max_free < asize + list_reserve) {
70016 +               /* Impossible to insert this attribute into primary record */
70017 +               err = -EINVAL;
70018 +               goto out;
70019 +       }
70021 +       /* Start real attribute moving */
70022 +       attr = NULL;
70024 +       for (;;) {
70025 +               attr = mi_enum_attr(&ni->mi, attr);
70026 +               if (!attr) {
70027 +                       /* We should never be here 'cause we have already check this case */
70028 +                       err = -EINVAL;
70029 +                       goto out;
70030 +               }
70032 +               /* Skip attributes that MUST be primary record */
70033 +               if (attr->type == ATTR_STD || attr->type == ATTR_LIST)
70034 +                       continue;
70036 +               le = NULL;
70037 +               if (ni->attr_list.size) {
70038 +                       le = al_find_le(ni, NULL, attr);
70039 +                       if (!le) {
70040 +                               /* Really this is a serious bug */
70041 +                               err = -EINVAL;
70042 +                               goto out;
70043 +                       }
70044 +               }
70046 +               t32 = le32_to_cpu(attr->size);
70047 +               t16 = le16_to_cpu(attr->name_off);
70048 +               err = ni_ins_attr_ext(ni, le, attr->type, Add2Ptr(attr, t16),
70049 +                                     attr->name_len, t32, attr_svcn(attr), t16,
70050 +                                     false, &eattr, NULL);
70051 +               if (err)
70052 +                       return err;
70054 +               id = eattr->id;
70055 +               memcpy(eattr, attr, t32);
70056 +               eattr->id = id;
70058 +               /* remove attrib from primary record */
70059 +               mi_remove_attr(&ni->mi, attr);
70061 +               /* attr now points to next attribute */
70062 +               if (attr->type == ATTR_END)
70063 +                       goto out;
70064 +       }
70065 +       while (asize + list_reserve > sbi->record_size - le32_to_cpu(rec->used))
70066 +               ;
70068 +       attr = ni_ins_new_attr(ni, &ni->mi, NULL, type, name, name_len, asize,
70069 +                              name_off, svcn);
70070 +       if (!attr) {
70071 +               err = -EINVAL;
70072 +               goto out;
70073 +       }
70075 +       if (ins_attr)
70076 +               *ins_attr = attr;
70077 +       if (ins_mi)
70078 +               *ins_mi = &ni->mi;
70080 +out:
70081 +       return err;
70085 + * ni_expand_mft_list
70086 + *
70087 + * This method splits ATTR_DATA of $MFT
70088 + */
70089 +static int ni_expand_mft_list(struct ntfs_inode *ni)
70091 +       int err = 0;
70092 +       struct runs_tree *run = &ni->file.run;
70093 +       u32 asize, run_size, done = 0;
70094 +       struct ATTRIB *attr;
70095 +       struct rb_node *node;
70096 +       CLST mft_min, mft_new, svcn, evcn, plen;
70097 +       struct mft_inode *mi, *mi_min, *mi_new;
70098 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
70100 +       /* Find the nearest Mft */
70101 +       mft_min = 0;
70102 +       mft_new = 0;
70103 +       mi_min = NULL;
70105 +       for (node = rb_first(&ni->mi_tree); node; node = rb_next(node)) {
70106 +               mi = rb_entry(node, struct mft_inode, node);
70108 +               attr = mi_enum_attr(mi, NULL);
70110 +               if (!attr) {
70111 +                       mft_min = mi->rno;
70112 +                       mi_min = mi;
70113 +                       break;
70114 +               }
70115 +       }
70117 +       if (ntfs_look_free_mft(sbi, &mft_new, true, ni, &mi_new)) {
70118 +               mft_new = 0;
70119 +               // really this is not critical
70120 +       } else if (mft_min > mft_new) {
70121 +               mft_min = mft_new;
70122 +               mi_min = mi_new;
70123 +       } else {
70124 +               ntfs_mark_rec_free(sbi, mft_new);
70125 +               mft_new = 0;
70126 +               ni_remove_mi(ni, mi_new);
70127 +       }
70129 +       attr = mi_find_attr(&ni->mi, NULL, ATTR_DATA, NULL, 0, NULL);
70130 +       if (!attr) {
70131 +               err = -EINVAL;
70132 +               goto out;
70133 +       }
70135 +       asize = le32_to_cpu(attr->size);
70137 +       evcn = le64_to_cpu(attr->nres.evcn);
70138 +       svcn = bytes_to_cluster(sbi, (u64)(mft_min + 1) << sbi->record_bits);
70139 +       if (evcn + 1 >= svcn) {
70140 +               err = -EINVAL;
70141 +               goto out;
70142 +       }
70144 +       /*
70145 +        * split primary attribute [0 evcn] in two parts [0 svcn) + [svcn evcn]
70146 +        *
70147 +        * Update first part of ATTR_DATA in 'primary MFT
70148 +        */
70149 +       err = run_pack(run, 0, svcn, Add2Ptr(attr, SIZEOF_NONRESIDENT),
70150 +                      asize - SIZEOF_NONRESIDENT, &plen);
70151 +       if (err < 0)
70152 +               goto out;
70154 +       run_size = QuadAlign(err);
70155 +       err = 0;
70157 +       if (plen < svcn) {
70158 +               err = -EINVAL;
70159 +               goto out;
70160 +       }
70162 +       attr->nres.evcn = cpu_to_le64(svcn - 1);
70163 +       attr->size = cpu_to_le32(run_size + SIZEOF_NONRESIDENT);
70164 +       /* 'done' - how many bytes of primary MFT becomes free */
70165 +       done = asize - run_size - SIZEOF_NONRESIDENT;
70166 +       le32_sub_cpu(&ni->mi.mrec->used, done);
70168 +       /* Estimate the size of second part: run_buf=NULL */
70169 +       err = run_pack(run, svcn, evcn + 1 - svcn, NULL, sbi->record_size,
70170 +                      &plen);
70171 +       if (err < 0)
70172 +               goto out;
70174 +       run_size = QuadAlign(err);
70175 +       err = 0;
70177 +       if (plen < evcn + 1 - svcn) {
70178 +               err = -EINVAL;
70179 +               goto out;
70180 +       }
70182 +       /*
70183 +        * This function may implicitly call expand attr_list
70184 +        * Insert second part of ATTR_DATA in 'mi_min'
70185 +        */
70186 +       attr = ni_ins_new_attr(ni, mi_min, NULL, ATTR_DATA, NULL, 0,
70187 +                              SIZEOF_NONRESIDENT + run_size,
70188 +                              SIZEOF_NONRESIDENT, svcn);
70189 +       if (!attr) {
70190 +               err = -EINVAL;
70191 +               goto out;
70192 +       }
70194 +       attr->non_res = 1;
70195 +       attr->name_off = SIZEOF_NONRESIDENT_LE;
70196 +       attr->flags = 0;
70198 +       run_pack(run, svcn, evcn + 1 - svcn, Add2Ptr(attr, SIZEOF_NONRESIDENT),
70199 +                run_size, &plen);
70201 +       attr->nres.svcn = cpu_to_le64(svcn);
70202 +       attr->nres.evcn = cpu_to_le64(evcn);
70203 +       attr->nres.run_off = cpu_to_le16(SIZEOF_NONRESIDENT);
70205 +out:
70206 +       if (mft_new) {
70207 +               ntfs_mark_rec_free(sbi, mft_new);
70208 +               ni_remove_mi(ni, mi_new);
70209 +       }
70211 +       return !err && !done ? -EOPNOTSUPP : err;
70215 + * ni_expand_list
70216 + *
70217 + * This method moves all possible attributes out of primary record
70218 + */
70219 +int ni_expand_list(struct ntfs_inode *ni)
70221 +       int err = 0;
70222 +       u32 asize, done = 0;
70223 +       struct ATTRIB *attr, *ins_attr;
70224 +       struct ATTR_LIST_ENTRY *le;
70225 +       bool is_mft = ni->mi.rno == MFT_REC_MFT;
70226 +       struct MFT_REF ref;
70228 +       mi_get_ref(&ni->mi, &ref);
70229 +       le = NULL;
70231 +       while ((le = al_enumerate(ni, le))) {
70232 +               if (le->type == ATTR_STD)
70233 +                       continue;
70235 +               if (memcmp(&ref, &le->ref, sizeof(struct MFT_REF)))
70236 +                       continue;
70238 +               if (is_mft && le->type == ATTR_DATA)
70239 +                       continue;
70241 +               /* Find attribute in primary record */
70242 +               attr = rec_find_attr_le(&ni->mi, le);
70243 +               if (!attr) {
70244 +                       err = -EINVAL;
70245 +                       goto out;
70246 +               }
70248 +               asize = le32_to_cpu(attr->size);
70250 +               /* Always insert into new record to avoid collisions (deep recursive) */
70251 +               err = ni_ins_attr_ext(ni, le, attr->type, attr_name(attr),
70252 +                                     attr->name_len, asize, attr_svcn(attr),
70253 +                                     le16_to_cpu(attr->name_off), true,
70254 +                                     &ins_attr, NULL);
70256 +               if (err)
70257 +                       goto out;
70259 +               memcpy(ins_attr, attr, asize);
70260 +               ins_attr->id = le->id;
70261 +               mi_remove_attr(&ni->mi, attr);
70263 +               done += asize;
70264 +               goto out;
70265 +       }
70267 +       if (!is_mft) {
70268 +               err = -EFBIG; /* attr list is too big(?) */
70269 +               goto out;
70270 +       }
70272 +       /* split mft data as much as possible */
70273 +       err = ni_expand_mft_list(ni);
70274 +       if (err)
70275 +               goto out;
70277 +out:
70278 +       return !err && !done ? -EOPNOTSUPP : err;
70282 + * ni_insert_nonresident
70283 + *
70284 + * inserts new nonresident attribute
70285 + */
70286 +int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
70287 +                         const __le16 *name, u8 name_len,
70288 +                         const struct runs_tree *run, CLST svcn, CLST len,
70289 +                         __le16 flags, struct ATTRIB **new_attr,
70290 +                         struct mft_inode **mi)
70292 +       int err;
70293 +       CLST plen;
70294 +       struct ATTRIB *attr;
70295 +       bool is_ext =
70296 +               (flags & (ATTR_FLAG_SPARSED | ATTR_FLAG_COMPRESSED)) && !svcn;
70297 +       u32 name_size = QuadAlign(name_len * sizeof(short));
70298 +       u32 name_off = is_ext ? SIZEOF_NONRESIDENT_EX : SIZEOF_NONRESIDENT;
70299 +       u32 run_off = name_off + name_size;
70300 +       u32 run_size, asize;
70301 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
70303 +       err = run_pack(run, svcn, len, NULL, sbi->max_bytes_per_attr - run_off,
70304 +                      &plen);
70305 +       if (err < 0)
70306 +               goto out;
70308 +       run_size = QuadAlign(err);
70310 +       if (plen < len) {
70311 +               err = -EINVAL;
70312 +               goto out;
70313 +       }
70315 +       asize = run_off + run_size;
70317 +       if (asize > sbi->max_bytes_per_attr) {
70318 +               err = -EINVAL;
70319 +               goto out;
70320 +       }
70322 +       err = ni_insert_attr(ni, type, name, name_len, asize, name_off, svcn,
70323 +                            &attr, mi);
70325 +       if (err)
70326 +               goto out;
70328 +       attr->non_res = 1;
70329 +       attr->name_off = cpu_to_le16(name_off);
70330 +       attr->flags = flags;
70332 +       run_pack(run, svcn, len, Add2Ptr(attr, run_off), run_size, &plen);
70334 +       attr->nres.svcn = cpu_to_le64(svcn);
70335 +       attr->nres.evcn = cpu_to_le64((u64)svcn + len - 1);
70337 +       err = 0;
70338 +       if (new_attr)
70339 +               *new_attr = attr;
70341 +       *(__le64 *)&attr->nres.run_off = cpu_to_le64(run_off);
70343 +       attr->nres.alloc_size =
70344 +               svcn ? 0 : cpu_to_le64((u64)len << ni->mi.sbi->cluster_bits);
70345 +       attr->nres.data_size = attr->nres.alloc_size;
70346 +       attr->nres.valid_size = attr->nres.alloc_size;
70348 +       if (is_ext) {
70349 +               if (flags & ATTR_FLAG_COMPRESSED)
70350 +                       attr->nres.c_unit = COMPRESSION_UNIT;
70351 +               attr->nres.total_size = attr->nres.alloc_size;
70352 +       }
70354 +out:
70355 +       return err;
70359 + * ni_insert_resident
70360 + *
70361 + * inserts new resident attribute
70362 + */
70363 +int ni_insert_resident(struct ntfs_inode *ni, u32 data_size,
70364 +                      enum ATTR_TYPE type, const __le16 *name, u8 name_len,
70365 +                      struct ATTRIB **new_attr, struct mft_inode **mi)
70367 +       int err;
70368 +       u32 name_size = QuadAlign(name_len * sizeof(short));
70369 +       u32 asize = SIZEOF_RESIDENT + name_size + QuadAlign(data_size);
70370 +       struct ATTRIB *attr;
70372 +       err = ni_insert_attr(ni, type, name, name_len, asize, SIZEOF_RESIDENT,
70373 +                            0, &attr, mi);
70374 +       if (err)
70375 +               return err;
70377 +       attr->non_res = 0;
70378 +       attr->flags = 0;
70380 +       attr->res.data_size = cpu_to_le32(data_size);
70381 +       attr->res.data_off = cpu_to_le16(SIZEOF_RESIDENT + name_size);
70382 +       if (type == ATTR_NAME)
70383 +               attr->res.flags = RESIDENT_FLAG_INDEXED;
70384 +       attr->res.res = 0;
70386 +       if (new_attr)
70387 +               *new_attr = attr;
70389 +       return 0;
70393 + * ni_remove_attr_le
70394 + *
70395 + * removes attribute from record
70396 + */
70397 +int ni_remove_attr_le(struct ntfs_inode *ni, struct ATTRIB *attr,
70398 +                     struct ATTR_LIST_ENTRY *le)
70400 +       int err;
70401 +       struct mft_inode *mi;
70403 +       err = ni_load_mi(ni, le, &mi);
70404 +       if (err)
70405 +               return err;
70407 +       mi_remove_attr(mi, attr);
70409 +       if (le)
70410 +               al_remove_le(ni, le);
70412 +       return 0;
70416 + * ni_delete_all
70417 + *
70418 + * removes all attributes and frees allocates space
70419 + * ntfs_evict_inode->ntfs_clear_inode->ni_delete_all (if no links)
70420 + */
70421 +int ni_delete_all(struct ntfs_inode *ni)
70423 +       int err;
70424 +       struct ATTR_LIST_ENTRY *le = NULL;
70425 +       struct ATTRIB *attr = NULL;
70426 +       struct rb_node *node;
70427 +       u16 roff;
70428 +       u32 asize;
70429 +       CLST svcn, evcn;
70430 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
70431 +       bool nt3 = is_ntfs3(sbi);
70432 +       struct MFT_REF ref;
70434 +       while ((attr = ni_enum_attr_ex(ni, attr, &le, NULL))) {
70435 +               if (!nt3 || attr->name_len) {
70436 +                       ;
70437 +               } else if (attr->type == ATTR_REPARSE) {
70438 +                       mi_get_ref(&ni->mi, &ref);
70439 +                       ntfs_remove_reparse(sbi, 0, &ref);
70440 +               } else if (attr->type == ATTR_ID && !attr->non_res &&
70441 +                          le32_to_cpu(attr->res.data_size) >=
70442 +                                  sizeof(struct GUID)) {
70443 +                       ntfs_objid_remove(sbi, resident_data(attr));
70444 +               }
70446 +               if (!attr->non_res)
70447 +                       continue;
70449 +               svcn = le64_to_cpu(attr->nres.svcn);
70450 +               evcn = le64_to_cpu(attr->nres.evcn);
70452 +               if (evcn + 1 <= svcn)
70453 +                       continue;
70455 +               asize = le32_to_cpu(attr->size);
70456 +               roff = le16_to_cpu(attr->nres.run_off);
70458 +               /*run==1 means unpack and deallocate*/
70459 +               run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn,
70460 +                             Add2Ptr(attr, roff), asize - roff);
70461 +       }
70463 +       if (ni->attr_list.size) {
70464 +               run_deallocate(ni->mi.sbi, &ni->attr_list.run, true);
70465 +               al_destroy(ni);
70466 +       }
70468 +       /* Free all subrecords */
70469 +       for (node = rb_first(&ni->mi_tree); node;) {
70470 +               struct rb_node *next = rb_next(node);
70471 +               struct mft_inode *mi = rb_entry(node, struct mft_inode, node);
70473 +               clear_rec_inuse(mi->mrec);
70474 +               mi->dirty = true;
70475 +               mi_write(mi, 0);
70477 +               ntfs_mark_rec_free(sbi, mi->rno);
70478 +               ni_remove_mi(ni, mi);
70479 +               mi_put(mi);
70480 +               node = next;
70481 +       }
70483 +       // Free base record
70484 +       clear_rec_inuse(ni->mi.mrec);
70485 +       ni->mi.dirty = true;
70486 +       err = mi_write(&ni->mi, 0);
70488 +       ntfs_mark_rec_free(sbi, ni->mi.rno);
70490 +       return err;
70494 + * ni_fname_name
70495 + *
70496 + * returns file name attribute by its value
70497 + */
70498 +struct ATTR_FILE_NAME *ni_fname_name(struct ntfs_inode *ni,
70499 +                                    const struct cpu_str *uni,
70500 +                                    const struct MFT_REF *home_dir,
70501 +                                    struct ATTR_LIST_ENTRY **le)
70503 +       struct ATTRIB *attr = NULL;
70504 +       struct ATTR_FILE_NAME *fname;
70506 +       *le = NULL;
70508 +       /* Enumerate all names */
70509 +next:
70510 +       attr = ni_find_attr(ni, attr, le, ATTR_NAME, NULL, 0, NULL, NULL);
70511 +       if (!attr)
70512 +               return NULL;
70514 +       fname = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME);
70515 +       if (!fname)
70516 +               goto next;
70518 +       if (home_dir && memcmp(home_dir, &fname->home, sizeof(*home_dir)))
70519 +               goto next;
70521 +       if (!uni)
70522 +               goto next;
70524 +       if (uni->len != fname->name_len)
70525 +               goto next;
70527 +       if (ntfs_cmp_names_cpu(uni, (struct le_str *)&fname->name_len, NULL,
70528 +                              false))
70529 +               goto next;
70531 +       return fname;
70535 + * ni_fname_type
70536 + *
70537 + * returns file name attribute with given type
70538 + */
70539 +struct ATTR_FILE_NAME *ni_fname_type(struct ntfs_inode *ni, u8 name_type,
70540 +                                    struct ATTR_LIST_ENTRY **le)
70542 +       struct ATTRIB *attr = NULL;
70543 +       struct ATTR_FILE_NAME *fname;
70545 +       *le = NULL;
70547 +       /* Enumerate all names */
70548 +       for (;;) {
70549 +               attr = ni_find_attr(ni, attr, le, ATTR_NAME, NULL, 0, NULL,
70550 +                                   NULL);
70551 +               if (!attr)
70552 +                       return NULL;
70554 +               fname = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME);
70555 +               if (fname && name_type == fname->type)
70556 +                       return fname;
70557 +       }
70561 + * Process compressed/sparsed in special way
70562 + * NOTE: you need to set ni->std_fa = new_fa
70563 + * after this function to keep internal structures in consistency
70564 + */
70565 +int ni_new_attr_flags(struct ntfs_inode *ni, enum FILE_ATTRIBUTE new_fa)
70567 +       struct ATTRIB *attr;
70568 +       struct mft_inode *mi;
70569 +       __le16 new_aflags;
70570 +       u32 new_asize;
70572 +       attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
70573 +       if (!attr)
70574 +               return -EINVAL;
70576 +       new_aflags = attr->flags;
70578 +       if (new_fa & FILE_ATTRIBUTE_SPARSE_FILE)
70579 +               new_aflags |= ATTR_FLAG_SPARSED;
70580 +       else
70581 +               new_aflags &= ~ATTR_FLAG_SPARSED;
70583 +       if (new_fa & FILE_ATTRIBUTE_COMPRESSED)
70584 +               new_aflags |= ATTR_FLAG_COMPRESSED;
70585 +       else
70586 +               new_aflags &= ~ATTR_FLAG_COMPRESSED;
70588 +       if (new_aflags == attr->flags)
70589 +               return 0;
70591 +       if ((new_aflags & (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED)) ==
70592 +           (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED)) {
70593 +               ntfs_inode_warn(&ni->vfs_inode,
70594 +                               "file can't be sparsed and compressed");
70595 +               return -EOPNOTSUPP;
70596 +       }
70598 +       if (!attr->non_res)
70599 +               goto out;
70601 +       if (attr->nres.data_size) {
70602 +               ntfs_inode_warn(
70603 +                       &ni->vfs_inode,
70604 +                       "one can change sparsed/compressed only for empty files");
70605 +               return -EOPNOTSUPP;
70606 +       }
70608 +       /* resize nonresident empty attribute in-place only*/
70609 +       new_asize = (new_aflags & (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED))
70610 +                           ? (SIZEOF_NONRESIDENT_EX + 8)
70611 +                           : (SIZEOF_NONRESIDENT + 8);
70613 +       if (!mi_resize_attr(mi, attr, new_asize - le32_to_cpu(attr->size)))
70614 +               return -EOPNOTSUPP;
70616 +       if (new_aflags & ATTR_FLAG_SPARSED) {
70617 +               attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
70618 +               /* windows uses 16 clusters per frame but supports one cluster per frame too*/
70619 +               attr->nres.c_unit = 0;
70620 +               ni->vfs_inode.i_mapping->a_ops = &ntfs_aops;
70621 +       } else if (new_aflags & ATTR_FLAG_COMPRESSED) {
70622 +               attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
70623 +               /* the only allowed: 16 clusters per frame */
70624 +               attr->nres.c_unit = NTFS_LZNT_CUNIT;
70625 +               ni->vfs_inode.i_mapping->a_ops = &ntfs_aops_cmpr;
70626 +       } else {
70627 +               attr->name_off = SIZEOF_NONRESIDENT_LE;
70628 +               /* normal files */
70629 +               attr->nres.c_unit = 0;
70630 +               ni->vfs_inode.i_mapping->a_ops = &ntfs_aops;
70631 +       }
70632 +       attr->nres.run_off = attr->name_off;
70633 +out:
70634 +       attr->flags = new_aflags;
70635 +       mi->dirty = true;
70637 +       return 0;
70641 + * ni_parse_reparse
70642 + *
70643 + * buffer is at least 24 bytes
70644 + */
70645 +enum REPARSE_SIGN ni_parse_reparse(struct ntfs_inode *ni, struct ATTRIB *attr,
70646 +                                  void *buffer)
70648 +       const struct REPARSE_DATA_BUFFER *rp = NULL;
70649 +       u8 bits;
70650 +       u16 len;
70651 +       typeof(rp->CompressReparseBuffer) *cmpr;
70653 +       static_assert(sizeof(struct REPARSE_DATA_BUFFER) <= 24);
70655 +       /* Try to estimate reparse point */
70656 +       if (!attr->non_res) {
70657 +               rp = resident_data_ex(attr, sizeof(struct REPARSE_DATA_BUFFER));
70658 +       } else if (le64_to_cpu(attr->nres.data_size) >=
70659 +                  sizeof(struct REPARSE_DATA_BUFFER)) {
70660 +               struct runs_tree run;
70662 +               run_init(&run);
70664 +               if (!attr_load_runs_vcn(ni, ATTR_REPARSE, NULL, 0, &run, 0) &&
70665 +                   !ntfs_read_run_nb(ni->mi.sbi, &run, 0, buffer,
70666 +                                     sizeof(struct REPARSE_DATA_BUFFER),
70667 +                                     NULL)) {
70668 +                       rp = buffer;
70669 +               }
70671 +               run_close(&run);
70672 +       }
70674 +       if (!rp)
70675 +               return REPARSE_NONE;
70677 +       len = le16_to_cpu(rp->ReparseDataLength);
70678 +       switch (rp->ReparseTag) {
70679 +       case (IO_REPARSE_TAG_MICROSOFT | IO_REPARSE_TAG_SYMBOLIC_LINK):
70680 +               break; /* Symbolic link */
70681 +       case IO_REPARSE_TAG_MOUNT_POINT:
70682 +               break; /* Mount points and junctions */
70683 +       case IO_REPARSE_TAG_SYMLINK:
70684 +               break;
70685 +       case IO_REPARSE_TAG_COMPRESS:
70686 +               /*
70687 +                * WOF - Windows Overlay Filter - used to compress files with lzx/xpress
70688 +                * Unlike native NTFS file compression, the Windows Overlay Filter supports
70689 +                * only read operations. This means that it doesn’t need to sector-align each
70690 +                * compressed chunk, so the compressed data can be packed more tightly together.
70691 +                * If you open the file for writing, the Windows Overlay Filter just decompresses
70692 +                * the entire file, turning it back into a plain file.
70693 +                *
70694 +                * ntfs3 driver decompresses the entire file only on write or change size requests
70695 +                */
70697 +               cmpr = &rp->CompressReparseBuffer;
70698 +               if (len < sizeof(*cmpr) ||
70699 +                   cmpr->WofVersion != WOF_CURRENT_VERSION ||
70700 +                   cmpr->WofProvider != WOF_PROVIDER_SYSTEM ||
70701 +                   cmpr->ProviderVer != WOF_PROVIDER_CURRENT_VERSION) {
70702 +                       return REPARSE_NONE;
70703 +               }
70705 +               switch (cmpr->CompressionFormat) {
70706 +               case WOF_COMPRESSION_XPRESS4K:
70707 +                       bits = 0xc; // 4k
70708 +                       break;
70709 +               case WOF_COMPRESSION_XPRESS8K:
70710 +                       bits = 0xd; // 8k
70711 +                       break;
70712 +               case WOF_COMPRESSION_XPRESS16K:
70713 +                       bits = 0xe; // 16k
70714 +                       break;
70715 +               case WOF_COMPRESSION_LZX32K:
70716 +                       bits = 0xf; // 32k
70717 +                       break;
70718 +               default:
70719 +                       bits = 0x10; // 64k
70720 +                       break;
70721 +               }
70722 +               ni_set_ext_compress_bits(ni, bits);
70723 +               return REPARSE_COMPRESSED;
70725 +       case IO_REPARSE_TAG_DEDUP:
70726 +               ni->ni_flags |= NI_FLAG_DEDUPLICATED;
70727 +               return REPARSE_DEDUPLICATED;
70729 +       default:
70730 +               if (rp->ReparseTag & IO_REPARSE_TAG_NAME_SURROGATE)
70731 +                       break;
70733 +               return REPARSE_NONE;
70734 +       }
70736 +       /* Looks like normal symlink */
70737 +       return REPARSE_LINK;
70741 + * helper for file_fiemap
70742 + * assumed ni_lock
70743 + * TODO: less aggressive locks
70744 + */
70745 +int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
70746 +             __u64 vbo, __u64 len)
70748 +       int err = 0;
70749 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
70750 +       u8 cluster_bits = sbi->cluster_bits;
70751 +       struct runs_tree *run;
70752 +       struct rw_semaphore *run_lock;
70753 +       struct ATTRIB *attr;
70754 +       CLST vcn = vbo >> cluster_bits;
70755 +       CLST lcn, clen;
70756 +       u64 valid = ni->i_valid;
70757 +       u64 lbo, bytes;
70758 +       u64 end, alloc_size;
70759 +       size_t idx = -1;
70760 +       u32 flags;
70761 +       bool ok;
70763 +       if (S_ISDIR(ni->vfs_inode.i_mode)) {
70764 +               run = &ni->dir.alloc_run;
70765 +               attr = ni_find_attr(ni, NULL, NULL, ATTR_ALLOC, I30_NAME,
70766 +                                   ARRAY_SIZE(I30_NAME), NULL, NULL);
70767 +               run_lock = &ni->dir.run_lock;
70768 +       } else {
70769 +               run = &ni->file.run;
70770 +               attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
70771 +                                   NULL);
70772 +               if (!attr) {
70773 +                       err = -EINVAL;
70774 +                       goto out;
70775 +               }
70776 +               if (is_attr_compressed(attr)) {
70777 +                       /*unfortunately cp -r incorrectly treats compressed clusters*/
70778 +                       err = -EOPNOTSUPP;
70779 +                       ntfs_inode_warn(
70780 +                               &ni->vfs_inode,
70781 +                               "fiemap is not supported for compressed file (cp -r)");
70782 +                       goto out;
70783 +               }
70784 +               run_lock = &ni->file.run_lock;
70785 +       }
70787 +       if (!attr || !attr->non_res) {
70788 +               err = fiemap_fill_next_extent(
70789 +                       fieinfo, 0, 0,
70790 +                       attr ? le32_to_cpu(attr->res.data_size) : 0,
70791 +                       FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_LAST |
70792 +                               FIEMAP_EXTENT_MERGED);
70793 +               goto out;
70794 +       }
70796 +       end = vbo + len;
70797 +       alloc_size = le64_to_cpu(attr->nres.alloc_size);
70798 +       if (end > alloc_size)
70799 +               end = alloc_size;
70801 +       down_read(run_lock);
70803 +       while (vbo < end) {
70804 +               if (idx == -1) {
70805 +                       ok = run_lookup_entry(run, vcn, &lcn, &clen, &idx);
70806 +               } else {
70807 +                       CLST vcn_next = vcn;
70809 +                       ok = run_get_entry(run, ++idx, &vcn, &lcn, &clen) &&
70810 +                            vcn == vcn_next;
70811 +                       if (!ok)
70812 +                               vcn = vcn_next;
70813 +               }
70815 +               if (!ok) {
70816 +                       up_read(run_lock);
70817 +                       down_write(run_lock);
70819 +                       err = attr_load_runs_vcn(ni, attr->type,
70820 +                                                attr_name(attr),
70821 +                                                attr->name_len, run, vcn);
70823 +                       up_write(run_lock);
70824 +                       down_read(run_lock);
70826 +                       if (err)
70827 +                               break;
70829 +                       ok = run_lookup_entry(run, vcn, &lcn, &clen, &idx);
70831 +                       if (!ok) {
70832 +                               err = -EINVAL;
70833 +                               break;
70834 +                       }
70835 +               }
70837 +               if (!clen) {
70838 +                       err = -EINVAL; // ?
70839 +                       break;
70840 +               }
70842 +               if (lcn == SPARSE_LCN) {
70843 +                       vcn += clen;
70844 +                       vbo = (u64)vcn << cluster_bits;
70845 +                       continue;
70846 +               }
70848 +               flags = FIEMAP_EXTENT_MERGED;
70849 +               if (S_ISDIR(ni->vfs_inode.i_mode)) {
70850 +                       ;
70851 +               } else if (is_attr_compressed(attr)) {
70852 +                       CLST clst_data;
70854 +                       err = attr_is_frame_compressed(
70855 +                               ni, attr, vcn >> attr->nres.c_unit, &clst_data);
70856 +                       if (err)
70857 +                               break;
70858 +                       if (clst_data < NTFS_LZNT_CLUSTERS)
70859 +                               flags |= FIEMAP_EXTENT_ENCODED;
70860 +               } else if (is_attr_encrypted(attr)) {
70861 +                       flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
70862 +               }
70864 +               vbo = (u64)vcn << cluster_bits;
70865 +               bytes = (u64)clen << cluster_bits;
70866 +               lbo = (u64)lcn << cluster_bits;
70868 +               vcn += clen;
70870 +               if (vbo + bytes >= end) {
70871 +                       bytes = end - vbo;
70872 +                       flags |= FIEMAP_EXTENT_LAST;
70873 +               }
70875 +               if (vbo + bytes <= valid) {
70876 +                       ;
70877 +               } else if (vbo >= valid) {
70878 +                       flags |= FIEMAP_EXTENT_UNWRITTEN;
70879 +               } else {
70880 +                       /* vbo < valid && valid < vbo + bytes */
70881 +                       u64 dlen = valid - vbo;
70883 +                       err = fiemap_fill_next_extent(fieinfo, vbo, lbo, dlen,
70884 +                                                     flags);
70885 +                       if (err < 0)
70886 +                               break;
70887 +                       if (err == 1) {
70888 +                               err = 0;
70889 +                               break;
70890 +                       }
70892 +                       vbo = valid;
70893 +                       bytes -= dlen;
70894 +                       if (!bytes)
70895 +                               continue;
70897 +                       lbo += dlen;
70898 +                       flags |= FIEMAP_EXTENT_UNWRITTEN;
70899 +               }
70901 +               err = fiemap_fill_next_extent(fieinfo, vbo, lbo, bytes, flags);
70902 +               if (err < 0)
70903 +                       break;
70904 +               if (err == 1) {
70905 +                       err = 0;
70906 +                       break;
70907 +               }
70909 +               vbo += bytes;
70910 +       }
70912 +       up_read(run_lock);
70914 +out:
70915 +       return err;
70919 + * When decompressing, we typically obtain more than one page per reference.
70920 + * We inject the additional pages into the page cache.
70921 + */
70922 +int ni_readpage_cmpr(struct ntfs_inode *ni, struct page *page)
70924 +       int err;
70925 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
70926 +       struct address_space *mapping = page->mapping;
70927 +       pgoff_t index = page->index;
70928 +       u64 frame_vbo, vbo = (u64)index << PAGE_SHIFT;
70929 +       struct page **pages = NULL; /*array of at most 16 pages. stack?*/
70930 +       u8 frame_bits;
70931 +       CLST frame;
70932 +       u32 i, idx, frame_size, pages_per_frame;
70933 +       gfp_t gfp_mask;
70934 +       struct page *pg;
70936 +       if (vbo >= ni->vfs_inode.i_size) {
70937 +               SetPageUptodate(page);
70938 +               err = 0;
70939 +               goto out;
70940 +       }
70942 +       if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) {
70943 +               /* xpress or lzx */
70944 +               frame_bits = ni_ext_compress_bits(ni);
70945 +       } else {
70946 +               /* lznt compression*/
70947 +               frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits;
70948 +       }
70949 +       frame_size = 1u << frame_bits;
70950 +       frame = vbo >> frame_bits;
70951 +       frame_vbo = (u64)frame << frame_bits;
70952 +       idx = (vbo - frame_vbo) >> PAGE_SHIFT;
70954 +       pages_per_frame = frame_size >> PAGE_SHIFT;
70955 +       pages = ntfs_zalloc(pages_per_frame * sizeof(struct page *));
70956 +       if (!pages) {
70957 +               err = -ENOMEM;
70958 +               goto out;
70959 +       }
70961 +       pages[idx] = page;
70962 +       index = frame_vbo >> PAGE_SHIFT;
70963 +       gfp_mask = mapping_gfp_mask(mapping);
70965 +       for (i = 0; i < pages_per_frame; i++, index++) {
70966 +               if (i == idx)
70967 +                       continue;
70969 +               pg = find_or_create_page(mapping, index, gfp_mask);
70970 +               if (!pg) {
70971 +                       err = -ENOMEM;
70972 +                       goto out1;
70973 +               }
70974 +               pages[i] = pg;
70975 +       }
70977 +       err = ni_read_frame(ni, frame_vbo, pages, pages_per_frame);
70979 +out1:
70980 +       if (err)
70981 +               SetPageError(page);
70983 +       for (i = 0; i < pages_per_frame; i++) {
70984 +               pg = pages[i];
70985 +               if (i == idx)
70986 +                       continue;
70987 +               unlock_page(pg);
70988 +               put_page(pg);
70989 +       }
70991 +out:
70992 +       /* At this point, err contains 0 or -EIO depending on the "critical" page */
70993 +       ntfs_free(pages);
70994 +       unlock_page(page);
70996 +       return err;
70999 +#ifdef CONFIG_NTFS3_LZX_XPRESS
71001 + * decompress lzx/xpress compressed file
71002 + * remove ATTR_DATA::WofCompressedData
71003 + * remove ATTR_REPARSE
71004 + */
71005 +int ni_decompress_file(struct ntfs_inode *ni)
71007 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
71008 +       struct inode *inode = &ni->vfs_inode;
71009 +       loff_t i_size = inode->i_size;
71010 +       struct address_space *mapping = inode->i_mapping;
71011 +       gfp_t gfp_mask = mapping_gfp_mask(mapping);
71012 +       struct page **pages = NULL;
71013 +       struct ATTR_LIST_ENTRY *le;
71014 +       struct ATTRIB *attr;
71015 +       CLST vcn, cend, lcn, clen, end;
71016 +       pgoff_t index;
71017 +       u64 vbo;
71018 +       u8 frame_bits;
71019 +       u32 i, frame_size, pages_per_frame, bytes;
71020 +       struct mft_inode *mi;
71021 +       int err;
71023 +       /* clusters for decompressed data*/
71024 +       cend = bytes_to_cluster(sbi, i_size);
71026 +       if (!i_size)
71027 +               goto remove_wof;
71029 +       /* check in advance */
71030 +       if (cend > wnd_zeroes(&sbi->used.bitmap)) {
71031 +               err = -ENOSPC;
71032 +               goto out;
71033 +       }
71035 +       frame_bits = ni_ext_compress_bits(ni);
71036 +       frame_size = 1u << frame_bits;
71037 +       pages_per_frame = frame_size >> PAGE_SHIFT;
71038 +       pages = ntfs_zalloc(pages_per_frame * sizeof(struct page *));
71039 +       if (!pages) {
71040 +               err = -ENOMEM;
71041 +               goto out;
71042 +       }
71044 +       /*
71045 +        * Step 1: decompress data and copy to new allocated clusters
71046 +        */
71047 +       index = 0;
71048 +       for (vbo = 0; vbo < i_size; vbo += bytes) {
71049 +               u32 nr_pages;
71050 +               bool new;
71052 +               if (vbo + frame_size > i_size) {
71053 +                       bytes = i_size - vbo;
71054 +                       nr_pages = (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
71055 +               } else {
71056 +                       nr_pages = pages_per_frame;
71057 +                       bytes = frame_size;
71058 +               }
71060 +               end = bytes_to_cluster(sbi, vbo + bytes);
71062 +               for (vcn = vbo >> sbi->cluster_bits; vcn < end; vcn += clen) {
71063 +                       err = attr_data_get_block(ni, vcn, cend - vcn, &lcn,
71064 +                                                 &clen, &new);
71065 +                       if (err)
71066 +                               goto out;
71067 +               }
71069 +               for (i = 0; i < pages_per_frame; i++, index++) {
71070 +                       struct page *pg;
71072 +                       pg = find_or_create_page(mapping, index, gfp_mask);
71073 +                       if (!pg) {
71074 +                               while (i--) {
71075 +                                       unlock_page(pages[i]);
71076 +                                       put_page(pages[i]);
71077 +                               }
71078 +                               err = -ENOMEM;
71079 +                               goto out;
71080 +                       }
71081 +                       pages[i] = pg;
71082 +               }
71084 +               err = ni_read_frame(ni, vbo, pages, pages_per_frame);
71086 +               if (!err) {
71087 +                       down_read(&ni->file.run_lock);
71088 +                       err = ntfs_bio_pages(sbi, &ni->file.run, pages,
71089 +                                            nr_pages, vbo, bytes,
71090 +                                            REQ_OP_WRITE);
71091 +                       up_read(&ni->file.run_lock);
71092 +               }
71094 +               for (i = 0; i < pages_per_frame; i++) {
71095 +                       unlock_page(pages[i]);
71096 +                       put_page(pages[i]);
71097 +               }
71099 +               if (err)
71100 +                       goto out;
71102 +               cond_resched();
71103 +       }
71105 +remove_wof:
71106 +       /*
71107 +        * Step 2: deallocate attributes ATTR_DATA::WofCompressedData and ATTR_REPARSE
71108 +        */
71109 +       attr = NULL;
71110 +       le = NULL;
71111 +       while ((attr = ni_enum_attr_ex(ni, attr, &le, NULL))) {
71112 +               CLST svcn, evcn;
71113 +               u32 asize, roff;
71115 +               if (attr->type == ATTR_REPARSE) {
71116 +                       struct MFT_REF ref;
71118 +                       mi_get_ref(&ni->mi, &ref);
71119 +                       ntfs_remove_reparse(sbi, 0, &ref);
71120 +               }
71122 +               if (!attr->non_res)
71123 +                       continue;
71125 +               if (attr->type != ATTR_REPARSE &&
71126 +                   (attr->type != ATTR_DATA ||
71127 +                    attr->name_len != ARRAY_SIZE(WOF_NAME) ||
71128 +                    memcmp(attr_name(attr), WOF_NAME, sizeof(WOF_NAME))))
71129 +                       continue;
71131 +               svcn = le64_to_cpu(attr->nres.svcn);
71132 +               evcn = le64_to_cpu(attr->nres.evcn);
71134 +               if (evcn + 1 <= svcn)
71135 +                       continue;
71137 +               asize = le32_to_cpu(attr->size);
71138 +               roff = le16_to_cpu(attr->nres.run_off);
71140 +               /*run==1 means unpack and deallocate*/
71141 +               run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn,
71142 +                             Add2Ptr(attr, roff), asize - roff);
71143 +       }
71145 +       /*
71146 +        * Step 3: remove attribute ATTR_DATA::WofCompressedData
71147 +        */
71148 +       err = ni_remove_attr(ni, ATTR_DATA, WOF_NAME, ARRAY_SIZE(WOF_NAME),
71149 +                            false, NULL);
71150 +       if (err)
71151 +               goto out;
71153 +       /*
71154 +        * Step 4: remove ATTR_REPARSE
71155 +        */
71156 +       err = ni_remove_attr(ni, ATTR_REPARSE, NULL, 0, false, NULL);
71157 +       if (err)
71158 +               goto out;
71160 +       /*
71161 +        * Step 5: remove sparse flag from data attribute
71162 +        */
71163 +       attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
71164 +       if (!attr) {
71165 +               err = -EINVAL;
71166 +               goto out;
71167 +       }
71169 +       if (attr->non_res && is_attr_sparsed(attr)) {
71170 +               /* sparsed attribute header is 8 bytes bigger than normal*/
71171 +               struct MFT_REC *rec = mi->mrec;
71172 +               u32 used = le32_to_cpu(rec->used);
71173 +               u32 asize = le32_to_cpu(attr->size);
71174 +               u16 roff = le16_to_cpu(attr->nres.run_off);
71175 +               char *rbuf = Add2Ptr(attr, roff);
71177 +               memmove(rbuf - 8, rbuf, used - PtrOffset(rec, rbuf));
71178 +               attr->size = cpu_to_le32(asize - 8);
71179 +               attr->flags &= ~ATTR_FLAG_SPARSED;
71180 +               attr->nres.run_off = cpu_to_le16(roff - 8);
71181 +               attr->nres.c_unit = 0;
71182 +               rec->used = cpu_to_le32(used - 8);
71183 +               mi->dirty = true;
71184 +               ni->std_fa &= ~(FILE_ATTRIBUTE_SPARSE_FILE |
71185 +                               FILE_ATTRIBUTE_REPARSE_POINT);
71187 +               mark_inode_dirty(inode);
71188 +       }
71190 +       /* clear cached flag */
71191 +       ni->ni_flags &= ~NI_FLAG_COMPRESSED_MASK;
71192 +       if (ni->file.offs_page) {
71193 +               put_page(ni->file.offs_page);
71194 +               ni->file.offs_page = NULL;
71195 +       }
71196 +       mapping->a_ops = &ntfs_aops;
71198 +out:
71199 +       ntfs_free(pages);
71200 +       if (err) {
71201 +               make_bad_inode(inode);
71202 +               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
71203 +       }
71205 +       return err;
71208 +/* external compression lzx/xpress */
71209 +static int decompress_lzx_xpress(struct ntfs_sb_info *sbi, const char *cmpr,
71210 +                                size_t cmpr_size, void *unc, size_t unc_size,
71211 +                                u32 frame_size)
71213 +       int err;
71214 +       void *ctx;
71216 +       if (cmpr_size == unc_size) {
71217 +               /* frame not compressed */
71218 +               memcpy(unc, cmpr, unc_size);
71219 +               return 0;
71220 +       }
71222 +       err = 0;
71223 +       if (frame_size == 0x8000) {
71224 +               mutex_lock(&sbi->compress.mtx_lzx);
71225 +               /* LZX: frame compressed */
71226 +               ctx = sbi->compress.lzx;
71227 +               if (!ctx) {
71228 +                       /* Lazy initialize lzx decompress context */
71229 +                       ctx = lzx_allocate_decompressor();
71230 +                       if (!ctx) {
71231 +                               err = -ENOMEM;
71232 +                               goto out1;
71233 +                       }
71235 +                       sbi->compress.lzx = ctx;
71236 +               }
71238 +               if (lzx_decompress(ctx, cmpr, cmpr_size, unc, unc_size)) {
71239 +                       /* treat all errors as "invalid argument" */
71240 +                       err = -EINVAL;
71241 +               }
71242 +out1:
71243 +               mutex_unlock(&sbi->compress.mtx_lzx);
71244 +       } else {
71245 +               /* XPRESS: frame compressed */
71246 +               mutex_lock(&sbi->compress.mtx_xpress);
71247 +               ctx = sbi->compress.xpress;
71248 +               if (!ctx) {
71249 +                       /* Lazy initialize xpress decompress context */
71250 +                       ctx = xpress_allocate_decompressor();
71251 +                       if (!ctx) {
71252 +                               err = -ENOMEM;
71253 +                               goto out2;
71254 +                       }
71256 +                       sbi->compress.xpress = ctx;
71257 +               }
71259 +               if (xpress_decompress(ctx, cmpr, cmpr_size, unc, unc_size)) {
71260 +                       /* treat all errors as "invalid argument" */
71261 +                       err = -EINVAL;
71262 +               }
71263 +out2:
71264 +               mutex_unlock(&sbi->compress.mtx_xpress);
71265 +       }
71266 +       return err;
71268 +#endif
71271 + * ni_read_frame
71272 + *
71273 + * pages - array of locked pages
71274 + */
71275 +int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
71276 +                 u32 pages_per_frame)
71278 +       int err;
71279 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
71280 +       u8 cluster_bits = sbi->cluster_bits;
71281 +       char *frame_ondisk = NULL;
71282 +       char *frame_mem = NULL;
71283 +       struct page **pages_disk = NULL;
71284 +       struct ATTR_LIST_ENTRY *le = NULL;
71285 +       struct runs_tree *run = &ni->file.run;
71286 +       u64 valid_size = ni->i_valid;
71287 +       u64 vbo_disk;
71288 +       size_t unc_size;
71289 +       u32 frame_size, i, npages_disk, ondisk_size;
71290 +       struct page *pg;
71291 +       struct ATTRIB *attr;
71292 +       CLST frame, clst_data;
71294 +       /*
71295 +        * To simplify decompress algorithm do vmap for source and target pages
71296 +        */
71297 +       for (i = 0; i < pages_per_frame; i++)
71298 +               kmap(pages[i]);
71300 +       frame_size = pages_per_frame << PAGE_SHIFT;
71301 +       frame_mem = vmap(pages, pages_per_frame, VM_MAP, PAGE_KERNEL);
71302 +       if (!frame_mem) {
71303 +               err = -ENOMEM;
71304 +               goto out;
71305 +       }
71307 +       attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL, NULL);
71308 +       if (!attr) {
71309 +               err = -ENOENT;
71310 +               goto out1;
71311 +       }
71313 +       if (!attr->non_res) {
71314 +               u32 data_size = le32_to_cpu(attr->res.data_size);
71316 +               memset(frame_mem, 0, frame_size);
71317 +               if (frame_vbo < data_size) {
71318 +                       ondisk_size = data_size - frame_vbo;
71319 +                       memcpy(frame_mem, resident_data(attr) + frame_vbo,
71320 +                              min(ondisk_size, frame_size));
71321 +               }
71322 +               err = 0;
71323 +               goto out1;
71324 +       }
71326 +       if (frame_vbo >= valid_size) {
71327 +               memset(frame_mem, 0, frame_size);
71328 +               err = 0;
71329 +               goto out1;
71330 +       }
71332 +       if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) {
71333 +#ifndef CONFIG_NTFS3_LZX_XPRESS
71334 +               err = -EOPNOTSUPP;
71335 +               goto out1;
71336 +#else
71337 +               u32 frame_bits = ni_ext_compress_bits(ni);
71338 +               u64 frame64 = frame_vbo >> frame_bits;
71339 +               u64 frames, vbo_data;
71341 +               if (frame_size != (1u << frame_bits)) {
71342 +                       err = -EINVAL;
71343 +                       goto out1;
71344 +               }
71345 +               switch (frame_size) {
71346 +               case 0x1000:
71347 +               case 0x2000:
71348 +               case 0x4000:
71349 +               case 0x8000:
71350 +                       break;
71351 +               default:
71352 +                       /* unknown compression */
71353 +                       err = -EOPNOTSUPP;
71354 +                       goto out1;
71355 +               }
71357 +               attr = ni_find_attr(ni, attr, &le, ATTR_DATA, WOF_NAME,
71358 +                                   ARRAY_SIZE(WOF_NAME), NULL, NULL);
71359 +               if (!attr) {
71360 +                       ntfs_inode_err(
71361 +                               &ni->vfs_inode,
71362 +                               "external compressed file should contains data attribute \"WofCompressedData\"");
71363 +                       err = -EINVAL;
71364 +                       goto out1;
71365 +               }
71367 +               if (!attr->non_res) {
71368 +                       run = NULL;
71369 +               } else {
71370 +                       run = run_alloc();
71371 +                       if (!run) {
71372 +                               err = -ENOMEM;
71373 +                               goto out1;
71374 +                       }
71375 +               }
71377 +               frames = (ni->vfs_inode.i_size - 1) >> frame_bits;
71379 +               err = attr_wof_frame_info(ni, attr, run, frame64, frames,
71380 +                                         frame_bits, &ondisk_size, &vbo_data);
71381 +               if (err)
71382 +                       goto out2;
71384 +               if (frame64 == frames) {
71385 +                       unc_size = 1 + ((ni->vfs_inode.i_size - 1) &
71386 +                                       (frame_size - 1));
71387 +                       ondisk_size = attr_size(attr) - vbo_data;
71388 +               } else {
71389 +                       unc_size = frame_size;
71390 +               }
71392 +               if (ondisk_size > frame_size) {
71393 +                       err = -EINVAL;
71394 +                       goto out2;
71395 +               }
71397 +               if (!attr->non_res) {
71398 +                       if (vbo_data + ondisk_size >
71399 +                           le32_to_cpu(attr->res.data_size)) {
71400 +                               err = -EINVAL;
71401 +                               goto out1;
71402 +                       }
71404 +                       err = decompress_lzx_xpress(
71405 +                               sbi, Add2Ptr(resident_data(attr), vbo_data),
71406 +                               ondisk_size, frame_mem, unc_size, frame_size);
71407 +                       goto out1;
71408 +               }
71409 +               vbo_disk = vbo_data;
71410 +               /* load all runs to read [vbo_disk-vbo_to) */
71411 +               err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
71412 +                                          ARRAY_SIZE(WOF_NAME), run, vbo_disk,
71413 +                                          vbo_data + ondisk_size);
71414 +               if (err)
71415 +                       goto out2;
71416 +               npages_disk = (ondisk_size + (vbo_disk & (PAGE_SIZE - 1)) +
71417 +                              PAGE_SIZE - 1) >>
71418 +                             PAGE_SHIFT;
71419 +#endif
71420 +       } else if (is_attr_compressed(attr)) {
71421 +               /* lznt compression*/
71422 +               if (sbi->cluster_size > NTFS_LZNT_MAX_CLUSTER) {
71423 +                       err = -EOPNOTSUPP;
71424 +                       goto out1;
71425 +               }
71427 +               if (attr->nres.c_unit != NTFS_LZNT_CUNIT) {
71428 +                       err = -EOPNOTSUPP;
71429 +                       goto out1;
71430 +               }
71432 +               down_write(&ni->file.run_lock);
71433 +               run_truncate_around(run, le64_to_cpu(attr->nres.svcn));
71434 +               frame = frame_vbo >> (cluster_bits + NTFS_LZNT_CUNIT);
71435 +               err = attr_is_frame_compressed(ni, attr, frame, &clst_data);
71436 +               up_write(&ni->file.run_lock);
71437 +               if (err)
71438 +                       goto out1;
71440 +               if (!clst_data) {
71441 +                       memset(frame_mem, 0, frame_size);
71442 +                       goto out1;
71443 +               }
71445 +               frame_size = sbi->cluster_size << NTFS_LZNT_CUNIT;
71446 +               ondisk_size = clst_data << cluster_bits;
71448 +               if (clst_data >= NTFS_LZNT_CLUSTERS) {
71449 +                       /* frame is not compressed */
71450 +                       down_read(&ni->file.run_lock);
71451 +                       err = ntfs_bio_pages(sbi, run, pages, pages_per_frame,
71452 +                                            frame_vbo, ondisk_size,
71453 +                                            REQ_OP_READ);
71454 +                       up_read(&ni->file.run_lock);
71455 +                       goto out1;
71456 +               }
71457 +               vbo_disk = frame_vbo;
71458 +               npages_disk = (ondisk_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
71459 +       } else {
71460 +               __builtin_unreachable();
71461 +               err = -EINVAL;
71462 +               goto out1;
71463 +       }
71465 +       pages_disk = ntfs_zalloc(npages_disk * sizeof(struct page *));
71466 +       if (!pages_disk) {
71467 +               err = -ENOMEM;
71468 +               goto out2;
71469 +       }
71471 +       for (i = 0; i < npages_disk; i++) {
71472 +               pg = alloc_page(GFP_KERNEL);
71473 +               if (!pg) {
71474 +                       err = -ENOMEM;
71475 +                       goto out3;
71476 +               }
71477 +               pages_disk[i] = pg;
71478 +               lock_page(pg);
71479 +               kmap(pg);
71480 +       }
71482 +       /* read 'ondisk_size' bytes from disk */
71483 +       down_read(&ni->file.run_lock);
71484 +       err = ntfs_bio_pages(sbi, run, pages_disk, npages_disk, vbo_disk,
71485 +                            ondisk_size, REQ_OP_READ);
71486 +       up_read(&ni->file.run_lock);
71487 +       if (err)
71488 +               goto out3;
71490 +       /*
71491 +        * To simplify decompress algorithm do vmap for source and target pages
71492 +        */
71493 +       frame_ondisk = vmap(pages_disk, npages_disk, VM_MAP, PAGE_KERNEL_RO);
71494 +       if (!frame_ondisk) {
71495 +               err = -ENOMEM;
71496 +               goto out3;
71497 +       }
71499 +       /* decompress: frame_ondisk -> frame_mem */
71500 +#ifdef CONFIG_NTFS3_LZX_XPRESS
71501 +       if (run != &ni->file.run) {
71502 +               /* LZX or XPRESS */
71503 +               err = decompress_lzx_xpress(
71504 +                       sbi, frame_ondisk + (vbo_disk & (PAGE_SIZE - 1)),
71505 +                       ondisk_size, frame_mem, unc_size, frame_size);
71506 +       } else
71507 +#endif
71508 +       {
71509 +               /* LZNT - native ntfs compression */
71510 +               unc_size = decompress_lznt(frame_ondisk, ondisk_size, frame_mem,
71511 +                                          frame_size);
71512 +               if ((ssize_t)unc_size < 0)
71513 +                       err = unc_size;
71514 +               else if (!unc_size || unc_size > frame_size)
71515 +                       err = -EINVAL;
71516 +       }
71517 +       if (!err && valid_size < frame_vbo + frame_size) {
71518 +               size_t ok = valid_size - frame_vbo;
71520 +               memset(frame_mem + ok, 0, frame_size - ok);
71521 +       }
71523 +       vunmap(frame_ondisk);
71525 +out3:
71526 +       for (i = 0; i < npages_disk; i++) {
71527 +               pg = pages_disk[i];
71528 +               if (pg) {
71529 +                       kunmap(pg);
71530 +                       unlock_page(pg);
71531 +                       put_page(pg);
71532 +               }
71533 +       }
71534 +       ntfs_free(pages_disk);
71536 +out2:
71537 +#ifdef CONFIG_NTFS3_LZX_XPRESS
71538 +       if (run != &ni->file.run)
71539 +               run_free(run);
71540 +#endif
71541 +out1:
71542 +       vunmap(frame_mem);
71543 +out:
71544 +       for (i = 0; i < pages_per_frame; i++) {
71545 +               pg = pages[i];
71546 +               kunmap(pg);
71547 +               ClearPageError(pg);
71548 +               SetPageUptodate(pg);
71549 +       }
71551 +       return err;
71555 + * ni_write_frame
71556 + *
71557 + * pages - array of locked pages
71558 + */
71559 +int ni_write_frame(struct ntfs_inode *ni, struct page **pages,
71560 +                  u32 pages_per_frame)
71562 +       int err;
71563 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
71564 +       u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits;
71565 +       u32 frame_size = sbi->cluster_size << NTFS_LZNT_CUNIT;
71566 +       u64 frame_vbo = (u64)pages[0]->index << PAGE_SHIFT;
71567 +       CLST frame = frame_vbo >> frame_bits;
71568 +       char *frame_ondisk = NULL;
71569 +       struct page **pages_disk = NULL;
71570 +       struct ATTR_LIST_ENTRY *le = NULL;
71571 +       char *frame_mem;
71572 +       struct ATTRIB *attr;
71573 +       struct mft_inode *mi;
71574 +       u32 i;
71575 +       struct page *pg;
71576 +       size_t compr_size, ondisk_size;
71577 +       struct lznt *lznt;
71579 +       attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL, &mi);
71580 +       if (!attr) {
71581 +               err = -ENOENT;
71582 +               goto out;
71583 +       }
71585 +       if (WARN_ON(!is_attr_compressed(attr))) {
71586 +               err = -EINVAL;
71587 +               goto out;
71588 +       }
71590 +       if (sbi->cluster_size > NTFS_LZNT_MAX_CLUSTER) {
71591 +               err = -EOPNOTSUPP;
71592 +               goto out;
71593 +       }
71595 +       if (!attr->non_res) {
71596 +               down_write(&ni->file.run_lock);
71597 +               err = attr_make_nonresident(ni, attr, le, mi,
71598 +                                           le32_to_cpu(attr->res.data_size),
71599 +                                           &ni->file.run, &attr, pages[0]);
71600 +               up_write(&ni->file.run_lock);
71601 +               if (err)
71602 +                       goto out;
71603 +       }
71605 +       if (attr->nres.c_unit != NTFS_LZNT_CUNIT) {
71606 +               err = -EOPNOTSUPP;
71607 +               goto out;
71608 +       }
71610 +       pages_disk = ntfs_zalloc(pages_per_frame * sizeof(struct page *));
71611 +       if (!pages_disk) {
71612 +               err = -ENOMEM;
71613 +               goto out;
71614 +       }
71616 +       for (i = 0; i < pages_per_frame; i++) {
71617 +               pg = alloc_page(GFP_KERNEL);
71618 +               if (!pg) {
71619 +                       err = -ENOMEM;
71620 +                       goto out1;
71621 +               }
71622 +               pages_disk[i] = pg;
71623 +               lock_page(pg);
71624 +               kmap(pg);
71625 +       }
71627 +       /*
71628 +        * To simplify compress algorithm do vmap for source and target pages
71629 +        */
71630 +       frame_ondisk = vmap(pages_disk, pages_per_frame, VM_MAP, PAGE_KERNEL);
71631 +       if (!frame_ondisk) {
71632 +               err = -ENOMEM;
71633 +               goto out1;
71634 +       }
71636 +       for (i = 0; i < pages_per_frame; i++)
71637 +               kmap(pages[i]);
71639 +       /* map in-memory frame for read-only */
71640 +       frame_mem = vmap(pages, pages_per_frame, VM_MAP, PAGE_KERNEL_RO);
71641 +       if (!frame_mem) {
71642 +               err = -ENOMEM;
71643 +               goto out2;
71644 +       }
71646 +       mutex_lock(&sbi->compress.mtx_lznt);
71647 +       lznt = NULL;
71648 +       if (!sbi->compress.lznt) {
71649 +               /*
71650 +                * lznt implements two levels of compression:
71651 +                * 0 - standard compression
71652 +                * 1 - best compression, requires a lot of cpu
71653 +                * use mount option?
71654 +                */
71655 +               lznt = get_lznt_ctx(0);
71656 +               if (!lznt) {
71657 +                       mutex_unlock(&sbi->compress.mtx_lznt);
71658 +                       err = -ENOMEM;
71659 +                       goto out3;
71660 +               }
71662 +               sbi->compress.lznt = lznt;
71663 +               lznt = NULL;
71664 +       }
71666 +       /* compress: frame_mem -> frame_ondisk */
71667 +       compr_size = compress_lznt(frame_mem, frame_size, frame_ondisk,
71668 +                                  frame_size, sbi->compress.lznt);
71669 +       mutex_unlock(&sbi->compress.mtx_lznt);
71670 +       ntfs_free(lznt);
71672 +       if (compr_size + sbi->cluster_size > frame_size) {
71673 +               /* frame is not compressed */
71674 +               compr_size = frame_size;
71675 +               ondisk_size = frame_size;
71676 +       } else if (compr_size) {
71677 +               /* frame is compressed */
71678 +               ondisk_size = ntfs_up_cluster(sbi, compr_size);
71679 +               memset(frame_ondisk + compr_size, 0, ondisk_size - compr_size);
71680 +       } else {
71681 +               /* frame is sparsed */
71682 +               ondisk_size = 0;
71683 +       }
71685 +       down_write(&ni->file.run_lock);
71686 +       run_truncate_around(&ni->file.run, le64_to_cpu(attr->nres.svcn));
71687 +       err = attr_allocate_frame(ni, frame, compr_size, ni->i_valid);
71688 +       up_write(&ni->file.run_lock);
71689 +       if (err)
71690 +               goto out2;
71692 +       if (!ondisk_size)
71693 +               goto out2;
71695 +       down_read(&ni->file.run_lock);
71696 +       err = ntfs_bio_pages(sbi, &ni->file.run,
71697 +                            ondisk_size < frame_size ? pages_disk : pages,
71698 +                            pages_per_frame, frame_vbo, ondisk_size,
71699 +                            REQ_OP_WRITE);
71700 +       up_read(&ni->file.run_lock);
71702 +out3:
71703 +       vunmap(frame_mem);
71705 +out2:
71706 +       for (i = 0; i < pages_per_frame; i++)
71707 +               kunmap(pages[i]);
71709 +       vunmap(frame_ondisk);
71710 +out1:
71711 +       for (i = 0; i < pages_per_frame; i++) {
71712 +               pg = pages_disk[i];
71713 +               if (pg) {
71714 +                       kunmap(pg);
71715 +                       unlock_page(pg);
71716 +                       put_page(pg);
71717 +               }
71718 +       }
71719 +       ntfs_free(pages_disk);
71720 +out:
71721 +       return err;
71725 + * update duplicate info of ATTR_FILE_NAME in MFT and in parent directories
71726 + */
71727 +static bool ni_update_parent(struct ntfs_inode *ni, struct NTFS_DUP_INFO *dup,
71728 +                            int sync)
71730 +       struct ATTRIB *attr;
71731 +       struct mft_inode *mi;
71732 +       struct ATTR_LIST_ENTRY *le = NULL;
71733 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
71734 +       struct super_block *sb = sbi->sb;
71735 +       bool re_dirty = false;
71736 +       bool active = sb->s_flags & SB_ACTIVE;
71737 +       bool upd_parent = ni->ni_flags & NI_FLAG_UPDATE_PARENT;
71739 +       if (ni->mi.mrec->flags & RECORD_FLAG_DIR) {
71740 +               dup->fa |= FILE_ATTRIBUTE_DIRECTORY;
71741 +               attr = NULL;
71742 +               dup->alloc_size = 0;
71743 +               dup->data_size = 0;
71744 +       } else {
71745 +               dup->fa &= ~FILE_ATTRIBUTE_DIRECTORY;
71747 +               attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL,
71748 +                                   &mi);
71749 +               if (!attr) {
71750 +                       dup->alloc_size = dup->data_size = 0;
71751 +               } else if (!attr->non_res) {
71752 +                       u32 data_size = le32_to_cpu(attr->res.data_size);
71754 +                       dup->alloc_size = cpu_to_le64(QuadAlign(data_size));
71755 +                       dup->data_size = cpu_to_le64(data_size);
71756 +               } else {
71757 +                       u64 new_valid = ni->i_valid;
71758 +                       u64 data_size = le64_to_cpu(attr->nres.data_size);
71759 +                       __le64 valid_le;
71761 +                       dup->alloc_size = is_attr_ext(attr)
71762 +                                                 ? attr->nres.total_size
71763 +                                                 : attr->nres.alloc_size;
71764 +                       dup->data_size = attr->nres.data_size;
71766 +                       if (new_valid > data_size)
71767 +                               new_valid = data_size;
71769 +                       valid_le = cpu_to_le64(new_valid);
71770 +                       if (valid_le != attr->nres.valid_size) {
71771 +                               attr->nres.valid_size = valid_le;
71772 +                               mi->dirty = true;
71773 +                       }
71774 +               }
71775 +       }
71777 +       /* TODO: fill reparse info */
71778 +       dup->reparse = 0;
71779 +       dup->ea_size = 0;
71781 +       if (ni->ni_flags & NI_FLAG_EA) {
71782 +               attr = ni_find_attr(ni, attr, &le, ATTR_EA_INFO, NULL, 0, NULL,
71783 +                                   NULL);
71784 +               if (attr) {
71785 +                       const struct EA_INFO *info;
71787 +                       info = resident_data_ex(attr, sizeof(struct EA_INFO));
71788 +                       dup->ea_size = info->size_pack;
71789 +               }
71790 +       }
71792 +       attr = NULL;
71793 +       le = NULL;
71795 +       while ((attr = ni_find_attr(ni, attr, &le, ATTR_NAME, NULL, 0, NULL,
71796 +                                   &mi))) {
71797 +               struct inode *dir;
71798 +               struct ATTR_FILE_NAME *fname;
71800 +               fname = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME);
71801 +               if (!fname)
71802 +                       continue;
71804 +               if (memcmp(&fname->dup, dup, sizeof(fname->dup))) {
71805 +                       memcpy(&fname->dup, dup, sizeof(fname->dup));
71806 +                       mi->dirty = true;
71807 +               } else if (!upd_parent) {
71808 +                       continue;
71809 +               }
71811 +               if (!active)
71812 +                       continue; /*avoid __wait_on_freeing_inode(inode); */
71814 +               /*ntfs_iget5 may sleep*/
71815 +               dir = ntfs_iget5(sb, &fname->home, NULL);
71816 +               if (IS_ERR(dir)) {
71817 +                       ntfs_inode_warn(
71818 +                               &ni->vfs_inode,
71819 +                               "failed to open parent directory r=%lx to update",
71820 +                               (long)ino_get(&fname->home));
71821 +                       continue;
71822 +               }
71824 +               if (!is_bad_inode(dir)) {
71825 +                       struct ntfs_inode *dir_ni = ntfs_i(dir);
71827 +                       if (!ni_trylock(dir_ni)) {
71828 +                               re_dirty = true;
71829 +                       } else {
71830 +                               indx_update_dup(dir_ni, sbi, fname, dup, sync);
71831 +                               ni_unlock(dir_ni);
71832 +                       }
71833 +               }
71834 +               iput(dir);
71835 +       }
71837 +       return re_dirty;
71841 + * ni_write_inode
71842 + *
71843 + * write mft base record and all subrecords to disk
71844 + */
71845 +int ni_write_inode(struct inode *inode, int sync, const char *hint)
71847 +       int err = 0, err2;
71848 +       struct ntfs_inode *ni = ntfs_i(inode);
71849 +       struct super_block *sb = inode->i_sb;
71850 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
71851 +       bool re_dirty = false;
71852 +       struct ATTR_STD_INFO *std;
71853 +       struct rb_node *node, *next;
71854 +       struct NTFS_DUP_INFO dup;
71856 +       if (is_bad_inode(inode) || sb_rdonly(sb))
71857 +               return 0;
71859 +       if (!ni_trylock(ni)) {
71860 +               /* 'ni' is under modification, skip for now */
71861 +               mark_inode_dirty_sync(inode);
71862 +               return 0;
71863 +       }
71865 +       if (is_rec_inuse(ni->mi.mrec) &&
71866 +           !(sbi->flags & NTFS_FLAGS_LOG_REPLAYING) && inode->i_nlink) {
71867 +               bool modified = false;
71869 +               /* update times in standard attribute */
71870 +               std = ni_std(ni);
71871 +               if (!std) {
71872 +                       err = -EINVAL;
71873 +                       goto out;
71874 +               }
71876 +               /* Update the access times if they have changed. */
71877 +               dup.m_time = kernel2nt(&inode->i_mtime);
71878 +               if (std->m_time != dup.m_time) {
71879 +                       std->m_time = dup.m_time;
71880 +                       modified = true;
71881 +               }
71883 +               dup.c_time = kernel2nt(&inode->i_ctime);
71884 +               if (std->c_time != dup.c_time) {
71885 +                       std->c_time = dup.c_time;
71886 +                       modified = true;
71887 +               }
71889 +               dup.a_time = kernel2nt(&inode->i_atime);
71890 +               if (std->a_time != dup.a_time) {
71891 +                       std->a_time = dup.a_time;
71892 +                       modified = true;
71893 +               }
71895 +               dup.fa = ni->std_fa;
71896 +               if (std->fa != dup.fa) {
71897 +                       std->fa = dup.fa;
71898 +                       modified = true;
71899 +               }
71901 +               if (modified)
71902 +                       ni->mi.dirty = true;
71904 +               if (!ntfs_is_meta_file(sbi, inode->i_ino) &&
71905 +                   (modified || (ni->ni_flags & NI_FLAG_UPDATE_PARENT))) {
71906 +                       dup.cr_time = std->cr_time;
71907 +                       /* Not critical if this function fail */
71908 +                       re_dirty = ni_update_parent(ni, &dup, sync);
71910 +                       if (re_dirty)
71911 +                               ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
71912 +                       else
71913 +                               ni->ni_flags &= ~NI_FLAG_UPDATE_PARENT;
71914 +               }
71916 +               /* update attribute list */
71917 +               if (ni->attr_list.size && ni->attr_list.dirty) {
71918 +                       if (inode->i_ino != MFT_REC_MFT || sync) {
71919 +                               err = ni_try_remove_attr_list(ni);
71920 +                               if (err)
71921 +                                       goto out;
71922 +                       }
71924 +                       err = al_update(ni);
71925 +                       if (err)
71926 +                               goto out;
71927 +               }
71928 +       }
71930 +       for (node = rb_first(&ni->mi_tree); node; node = next) {
71931 +               struct mft_inode *mi = rb_entry(node, struct mft_inode, node);
71932 +               bool is_empty;
71934 +               next = rb_next(node);
71936 +               if (!mi->dirty)
71937 +                       continue;
71939 +               is_empty = !mi_enum_attr(mi, NULL);
71941 +               if (is_empty)
71942 +                       clear_rec_inuse(mi->mrec);
71944 +               err2 = mi_write(mi, sync);
71945 +               if (!err && err2)
71946 +                       err = err2;
71948 +               if (is_empty) {
71949 +                       ntfs_mark_rec_free(sbi, mi->rno);
71950 +                       rb_erase(node, &ni->mi_tree);
71951 +                       mi_put(mi);
71952 +               }
71953 +       }
71955 +       if (ni->mi.dirty) {
71956 +               err2 = mi_write(&ni->mi, sync);
71957 +               if (!err && err2)
71958 +                       err = err2;
71959 +       }
71960 +out:
71961 +       ni_unlock(ni);
71963 +       if (err) {
71964 +               ntfs_err(sb, "%s r=%lx failed, %d.", hint, inode->i_ino, err);
71965 +               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
71966 +               return err;
71967 +       }
71969 +       if (re_dirty && (sb->s_flags & SB_ACTIVE))
71970 +               mark_inode_dirty_sync(inode);
71972 +       return 0;
71974 diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
71975 new file mode 100644
71976 index 000000000000..53da12252408
71977 --- /dev/null
71978 +++ b/fs/ntfs3/fslog.c
71979 @@ -0,0 +1,5181 @@
71980 +// SPDX-License-Identifier: GPL-2.0
71982 + *
71983 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
71984 + *
71985 + */
71987 +#include <linux/blkdev.h>
71988 +#include <linux/buffer_head.h>
71989 +#include <linux/fs.h>
71990 +#include <linux/hash.h>
71991 +#include <linux/nls.h>
71992 +#include <linux/random.h>
71993 +#include <linux/ratelimit.h>
71994 +#include <linux/slab.h>
71996 +#include "debug.h"
71997 +#include "ntfs.h"
71998 +#include "ntfs_fs.h"
72001 + * LOG FILE structs
72002 + */
72004 +// clang-format off
72006 +#define MaxLogFileSize     0x100000000ull
72007 +#define DefaultLogPageSize 4096
72008 +#define MinLogRecordPages  0x30
72010 +struct RESTART_HDR {
72011 +       struct NTFS_RECORD_HEADER rhdr; // 'RSTR'
72012 +       __le32 sys_page_size; // 0x10: Page size of the system which initialized the log
72013 +       __le32 page_size;     // 0x14: Log page size used for this log file
72014 +       __le16 ra_off;        // 0x18:
72015 +       __le16 minor_ver;     // 0x1A:
72016 +       __le16 major_ver;     // 0x1C:
72017 +       __le16 fixups[];
72020 +#define LFS_NO_CLIENT 0xffff
72021 +#define LFS_NO_CLIENT_LE cpu_to_le16(0xffff)
72023 +struct CLIENT_REC {
72024 +       __le64 oldest_lsn;
72025 +       __le64 restart_lsn; // 0x08:
72026 +       __le16 prev_client; // 0x10:
72027 +       __le16 next_client; // 0x12:
72028 +       __le16 seq_num;     // 0x14:
72029 +       u8 align[6];        // 0x16
72030 +       __le32 name_bytes;  // 0x1C: in bytes
72031 +       __le16 name[32];    // 0x20: name of client
72034 +static_assert(sizeof(struct CLIENT_REC) == 0x60);
72036 +/* Two copies of these will exist at the beginning of the log file */
72037 +struct RESTART_AREA {
72038 +       __le64 current_lsn;    // 0x00: Current logical end of log file
72039 +       __le16 log_clients;    // 0x08: Maximum number of clients
72040 +       __le16 client_idx[2];  // 0x0A: free/use index into the client record arrays
72041 +       __le16 flags;          // 0x0E: See RESTART_SINGLE_PAGE_IO
72042 +       __le32 seq_num_bits;   // 0x10: the number of bits in sequence number.
72043 +       __le16 ra_len;         // 0x14:
72044 +       __le16 client_off;     // 0x16:
72045 +       __le64 l_size;         // 0x18: Usable log file size.
72046 +       __le32 last_lsn_data_len; // 0x20:
72047 +       __le16 rec_hdr_len;    // 0x24: log page data offset
72048 +       __le16 data_off;       // 0x26: log page data length
72049 +       __le32 open_log_count; // 0x28:
72050 +       __le32 align[5];       // 0x2C:
72051 +       struct CLIENT_REC clients[]; // 0x40:
72054 +struct LOG_REC_HDR {
72055 +       __le16 redo_op;      // 0x00:  NTFS_LOG_OPERATION
72056 +       __le16 undo_op;      // 0x02:  NTFS_LOG_OPERATION
72057 +       __le16 redo_off;     // 0x04:  Offset to Redo record
72058 +       __le16 redo_len;     // 0x06:  Redo length
72059 +       __le16 undo_off;     // 0x08:  Offset to Undo record
72060 +       __le16 undo_len;     // 0x0A:  Undo length
72061 +       __le16 target_attr;  // 0x0C:
72062 +       __le16 lcns_follow;  // 0x0E:
72063 +       __le16 record_off;   // 0x10:
72064 +       __le16 attr_off;     // 0x12:
72065 +       __le16 cluster_off;  // 0x14:
72066 +       __le16 reserved;     // 0x16:
72067 +       __le64 target_vcn;   // 0x18:
72068 +       __le64 page_lcns[];  // 0x20:
72071 +static_assert(sizeof(struct LOG_REC_HDR) == 0x20);
72073 +#define RESTART_ENTRY_ALLOCATED    0xFFFFFFFF
72074 +#define RESTART_ENTRY_ALLOCATED_LE cpu_to_le32(0xFFFFFFFF)
72076 +struct RESTART_TABLE {
72077 +       __le16 size;       // 0x00:  In bytes
72078 +       __le16 used;       // 0x02: entries
72079 +       __le16 total;      // 0x04: entries
72080 +       __le16 res[3];     // 0x06:
72081 +       __le32 free_goal;  // 0x0C:
72082 +       __le32 first_free; // 0x10
72083 +       __le32 last_free;  // 0x14
72087 +static_assert(sizeof(struct RESTART_TABLE) == 0x18);
72089 +struct ATTR_NAME_ENTRY {
72090 +       __le16 off; // offset in the Open attribute Table
72091 +       __le16 name_bytes;
72092 +       __le16 name[];
72095 +struct OPEN_ATTR_ENRTY {
72096 +       __le32 next;            // 0x00: RESTART_ENTRY_ALLOCATED if allocated
72097 +       __le32 bytes_per_index; // 0x04:
72098 +       enum ATTR_TYPE type;    // 0x08:
72099 +       u8 is_dirty_pages;      // 0x0C:
72100 +       u8 is_attr_name;        // 0x0B: Faked field to manage 'ptr'
72101 +       u8 name_len;            // 0x0C: Faked field to manage 'ptr'
72102 +       u8 res;
72103 +       struct MFT_REF ref; // 0x10: File Reference of file containing attribute
72104 +       __le64 open_record_lsn; // 0x18:
72105 +       void *ptr;              // 0x20:
72108 +/* 32 bit version of 'struct OPEN_ATTR_ENRTY' */
72109 +struct OPEN_ATTR_ENRTY_32 {
72110 +       __le32 next;            // 0x00: RESTART_ENTRY_ALLOCATED if allocated
72111 +       __le32 ptr;             // 0x04:
72112 +       struct MFT_REF ref;     // 0x08:
72113 +       __le64 open_record_lsn; // 0x10:
72114 +       u8 is_dirty_pages;      // 0x18:
72115 +       u8 is_attr_name;        // 0x19
72116 +       u8 res1[2];
72117 +       enum ATTR_TYPE type;    // 0x1C:
72118 +       u8 name_len;            // 0x20:  in wchar
72119 +       u8 res2[3];
72120 +       __le32 AttributeName;   // 0x24:
72121 +       __le32 bytes_per_index; // 0x28:
72124 +#define SIZEOF_OPENATTRIBUTEENTRY0 0x2c
72125 +// static_assert( 0x2C == sizeof(struct OPEN_ATTR_ENRTY_32) );
72126 +static_assert(sizeof(struct OPEN_ATTR_ENRTY) < SIZEOF_OPENATTRIBUTEENTRY0);
72129 + * One entry exists in the Dirty Pages Table for each page which is dirty at the
72130 + * time the Restart Area is written
72131 + */
72132 +struct DIR_PAGE_ENTRY {
72133 +       __le32 next;         // 0x00:  RESTART_ENTRY_ALLOCATED if allocated
72134 +       __le32 target_attr;  // 0x04:  Index into the Open attribute Table
72135 +       __le32 transfer_len; // 0x08:
72136 +       __le32 lcns_follow;  // 0x0C:
72137 +       __le64 vcn;          // 0x10:  Vcn of dirty page
72138 +       __le64 oldest_lsn;   // 0x18:
72139 +       __le64 page_lcns[];  // 0x20:
72142 +static_assert(sizeof(struct DIR_PAGE_ENTRY) == 0x20);
72144 +/* 32 bit version of 'struct DIR_PAGE_ENTRY' */
72145 +struct DIR_PAGE_ENTRY_32 {
72146 +       __le32 next;         // 0x00:  RESTART_ENTRY_ALLOCATED if allocated
72147 +       __le32 target_attr;  // 0x04:  Index into the Open attribute Table
72148 +       __le32 transfer_len; // 0x08:
72149 +       __le32 lcns_follow;  // 0x0C:
72150 +       __le32 reserved;     // 0x10:
72151 +       __le32 vcn_low;      // 0x14:  Vcn of dirty page
72152 +       __le32 vcn_hi;       // 0x18:  Vcn of dirty page
72153 +       __le32 oldest_lsn_low; // 0x1C:
72154 +       __le32 oldest_lsn_hi; // 0x1C:
72155 +       __le32 page_lcns_low; // 0x24:
72156 +       __le32 page_lcns_hi; // 0x24:
72159 +static_assert(offsetof(struct DIR_PAGE_ENTRY_32, vcn_low) == 0x14);
72160 +static_assert(sizeof(struct DIR_PAGE_ENTRY_32) == 0x2c);
72162 +enum transact_state {
72163 +       TransactionUninitialized = 0,
72164 +       TransactionActive,
72165 +       TransactionPrepared,
72166 +       TransactionCommitted
72169 +struct TRANSACTION_ENTRY {
72170 +       __le32 next;          // 0x00: RESTART_ENTRY_ALLOCATED if allocated
72171 +       u8 transact_state;    // 0x04:
72172 +       u8 reserved[3];       // 0x05:
72173 +       __le64 first_lsn;     // 0x08:
72174 +       __le64 prev_lsn;      // 0x10:
72175 +       __le64 undo_next_lsn; // 0x18:
72176 +       __le32 undo_records;  // 0x20: Number of undo log records pending abort
72177 +       __le32 undo_len;      // 0x24: Total undo size
72180 +static_assert(sizeof(struct TRANSACTION_ENTRY) == 0x28);
72182 +struct NTFS_RESTART {
72183 +       __le32 major_ver;             // 0x00:
72184 +       __le32 minor_ver;             // 0x04:
72185 +       __le64 check_point_start;     // 0x08:
72186 +       __le64 open_attr_table_lsn;   // 0x10:
72187 +       __le64 attr_names_lsn;        // 0x18:
72188 +       __le64 dirty_pages_table_lsn; // 0x20:
72189 +       __le64 transact_table_lsn;    // 0x28:
72190 +       __le32 open_attr_len;         // 0x30: In bytes
72191 +       __le32 attr_names_len;        // 0x34: In bytes
72192 +       __le32 dirty_pages_len;       // 0x38: In bytes
72193 +       __le32 transact_table_len;    // 0x3C: In bytes
72196 +static_assert(sizeof(struct NTFS_RESTART) == 0x40);
72198 +struct NEW_ATTRIBUTE_SIZES {
72199 +       __le64 alloc_size;
72200 +       __le64 valid_size;
72201 +       __le64 data_size;
72202 +       __le64 total_size;
72205 +struct BITMAP_RANGE {
72206 +       __le32 bitmap_off;
72207 +       __le32 bits;
72210 +struct LCN_RANGE {
72211 +       __le64 lcn;
72212 +       __le64 len;
72215 +/* The following type defines the different log record types */
72216 +#define LfsClientRecord  cpu_to_le32(1)
72217 +#define LfsClientRestart cpu_to_le32(2)
72219 +/* This is used to uniquely identify a client for a particular log file */
72220 +struct CLIENT_ID {
72221 +       __le16 seq_num;
72222 +       __le16 client_idx;
72225 +/* This is the header that begins every Log Record in the log file */
72226 +struct LFS_RECORD_HDR {
72227 +       __le64 this_lsn;    // 0x00:
72228 +       __le64 client_prev_lsn;  // 0x08:
72229 +       __le64 client_undo_next_lsn; // 0x10:
72230 +       __le32 client_data_len;  // 0x18:
72231 +       struct CLIENT_ID client; // 0x1C: Owner of this log record
72232 +       __le32 record_type; // 0x20: LfsClientRecord or LfsClientRestart
72233 +       __le32 transact_id; // 0x24:
72234 +       __le16 flags;       // 0x28:    LOG_RECORD_MULTI_PAGE
72235 +       u8 align[6];        // 0x2A:
72238 +#define LOG_RECORD_MULTI_PAGE cpu_to_le16(1)
72240 +static_assert(sizeof(struct LFS_RECORD_HDR) == 0x30);
72242 +struct LFS_RECORD {
72243 +       __le16 next_record_off; // 0x00: Offset of the free space in the page
72244 +       u8 align[6];         // 0x02:
72245 +       __le64 last_end_lsn; // 0x08: lsn for the last log record which ends on the page
72248 +static_assert(sizeof(struct LFS_RECORD) == 0x10);
72250 +struct RECORD_PAGE_HDR {
72251 +       struct NTFS_RECORD_HEADER rhdr; // 'RCRD'
72252 +       __le32 rflags;     // 0x10:  See LOG_PAGE_LOG_RECORD_END
72253 +       __le16 page_count; // 0x14:
72254 +       __le16 page_pos;   // 0x16:
72255 +       struct LFS_RECORD record_hdr; // 0x18
72256 +       __le16 fixups[10]; // 0x28
72257 +       __le32 file_off;   // 0x3c: used when major version >= 2
72260 +// clang-format on
72262 +// Page contains the end of a log record
72263 +#define LOG_PAGE_LOG_RECORD_END cpu_to_le32(0x00000001)
72265 +static inline bool is_log_record_end(const struct RECORD_PAGE_HDR *hdr)
72267 +       return hdr->rflags & LOG_PAGE_LOG_RECORD_END;
72270 +static_assert(offsetof(struct RECORD_PAGE_HDR, file_off) == 0x3c);
72273 + * END of NTFS LOG structures
72274 + */
72276 +/* Define some tuning parameters to keep the restart tables a reasonable size */
72277 +#define INITIAL_NUMBER_TRANSACTIONS 5
72279 +enum NTFS_LOG_OPERATION {
72281 +       Noop = 0x00,
72282 +       CompensationLogRecord = 0x01,
72283 +       InitializeFileRecordSegment = 0x02,
72284 +       DeallocateFileRecordSegment = 0x03,
72285 +       WriteEndOfFileRecordSegment = 0x04,
72286 +       CreateAttribute = 0x05,
72287 +       DeleteAttribute = 0x06,
72288 +       UpdateResidentValue = 0x07,
72289 +       UpdateNonresidentValue = 0x08,
72290 +       UpdateMappingPairs = 0x09,
72291 +       DeleteDirtyClusters = 0x0A,
72292 +       SetNewAttributeSizes = 0x0B,
72293 +       AddIndexEntryRoot = 0x0C,
72294 +       DeleteIndexEntryRoot = 0x0D,
72295 +       AddIndexEntryAllocation = 0x0E,
72296 +       DeleteIndexEntryAllocation = 0x0F,
72297 +       WriteEndOfIndexBuffer = 0x10,
72298 +       SetIndexEntryVcnRoot = 0x11,
72299 +       SetIndexEntryVcnAllocation = 0x12,
72300 +       UpdateFileNameRoot = 0x13,
72301 +       UpdateFileNameAllocation = 0x14,
72302 +       SetBitsInNonresidentBitMap = 0x15,
72303 +       ClearBitsInNonresidentBitMap = 0x16,
72304 +       HotFix = 0x17,
72305 +       EndTopLevelAction = 0x18,
72306 +       PrepareTransaction = 0x19,
72307 +       CommitTransaction = 0x1A,
72308 +       ForgetTransaction = 0x1B,
72309 +       OpenNonresidentAttribute = 0x1C,
72310 +       OpenAttributeTableDump = 0x1D,
72311 +       AttributeNamesDump = 0x1E,
72312 +       DirtyPageTableDump = 0x1F,
72313 +       TransactionTableDump = 0x20,
72314 +       UpdateRecordDataRoot = 0x21,
72315 +       UpdateRecordDataAllocation = 0x22,
72317 +       UpdateRelativeDataInIndex =
72318 +               0x23, // NtOfsRestartUpdateRelativeDataInIndex
72319 +       UpdateRelativeDataInIndex2 = 0x24,
72320 +       ZeroEndOfFileRecord = 0x25,
72324 + * Array for log records which require a target attribute
72325 + * A true indicates that the corresponding restart operation requires a target attribute
72326 + */
72327 +static const u8 AttributeRequired[] = {
72328 +       0xFC, 0xFB, 0xFF, 0x10, 0x06,
72331 +static inline bool is_target_required(u16 op)
72333 +       bool ret = op <= UpdateRecordDataAllocation &&
72334 +                  (AttributeRequired[op >> 3] >> (op & 7) & 1);
72335 +       return ret;
72338 +static inline bool can_skip_action(enum NTFS_LOG_OPERATION op)
72340 +       switch (op) {
72341 +       case Noop:
72342 +       case DeleteDirtyClusters:
72343 +       case HotFix:
72344 +       case EndTopLevelAction:
72345 +       case PrepareTransaction:
72346 +       case CommitTransaction:
72347 +       case ForgetTransaction:
72348 +       case CompensationLogRecord:
72349 +       case OpenNonresidentAttribute:
72350 +       case OpenAttributeTableDump:
72351 +       case AttributeNamesDump:
72352 +       case DirtyPageTableDump:
72353 +       case TransactionTableDump:
72354 +               return true;
72355 +       default:
72356 +               return false;
72357 +       }
72360 +enum { lcb_ctx_undo_next, lcb_ctx_prev, lcb_ctx_next };
72362 +/* bytes per restart table */
72363 +static inline u32 bytes_per_rt(const struct RESTART_TABLE *rt)
72365 +       return le16_to_cpu(rt->used) * le16_to_cpu(rt->size) +
72366 +              sizeof(struct RESTART_TABLE);
72369 +/* log record length */
72370 +static inline u32 lrh_length(const struct LOG_REC_HDR *lr)
72372 +       u16 t16 = le16_to_cpu(lr->lcns_follow);
72374 +       return struct_size(lr, page_lcns, max_t(u16, 1, t16));
72377 +struct lcb {
72378 +       struct LFS_RECORD_HDR *lrh; // Log record header of the current lsn
72379 +       struct LOG_REC_HDR *log_rec;
72380 +       u32 ctx_mode; // lcb_ctx_undo_next/lcb_ctx_prev/lcb_ctx_next
72381 +       struct CLIENT_ID client;
72382 +       bool alloc; // if true the we should deallocate 'log_rec'
72385 +static void lcb_put(struct lcb *lcb)
72387 +       if (lcb->alloc)
72388 +               ntfs_free(lcb->log_rec);
72389 +       ntfs_free(lcb->lrh);
72390 +       ntfs_free(lcb);
72394 + * oldest_client_lsn
72395 + *
72396 + * find the oldest lsn from active clients.
72397 + */
72398 +static inline void oldest_client_lsn(const struct CLIENT_REC *ca,
72399 +                                    __le16 next_client, u64 *oldest_lsn)
72401 +       while (next_client != LFS_NO_CLIENT_LE) {
72402 +               const struct CLIENT_REC *cr = ca + le16_to_cpu(next_client);
72403 +               u64 lsn = le64_to_cpu(cr->oldest_lsn);
72405 +               /* ignore this block if it's oldest lsn is 0 */
72406 +               if (lsn && lsn < *oldest_lsn)
72407 +                       *oldest_lsn = lsn;
72409 +               next_client = cr->next_client;
72410 +       }
72413 +static inline bool is_rst_page_hdr_valid(u32 file_off,
72414 +                                        const struct RESTART_HDR *rhdr)
72416 +       u32 sys_page = le32_to_cpu(rhdr->sys_page_size);
72417 +       u32 page_size = le32_to_cpu(rhdr->page_size);
72418 +       u32 end_usa;
72419 +       u16 ro;
72421 +       if (sys_page < SECTOR_SIZE || page_size < SECTOR_SIZE ||
72422 +           sys_page & (sys_page - 1) || page_size & (page_size - 1)) {
72423 +               return false;
72424 +       }
72426 +       /* Check that if the file offset isn't 0, it is the system page size */
72427 +       if (file_off && file_off != sys_page)
72428 +               return false;
72430 +       /* Check support version 1.1+ */
72431 +       if (le16_to_cpu(rhdr->major_ver) <= 1 && !rhdr->minor_ver)
72432 +               return false;
72434 +       if (le16_to_cpu(rhdr->major_ver) > 2)
72435 +               return false;
72437 +       ro = le16_to_cpu(rhdr->ra_off);
72438 +       if (!IsQuadAligned(ro) || ro > sys_page)
72439 +               return false;
72441 +       end_usa = ((sys_page >> SECTOR_SHIFT) + 1) * sizeof(short);
72442 +       end_usa += le16_to_cpu(rhdr->rhdr.fix_off);
72444 +       if (ro < end_usa)
72445 +               return false;
72447 +       return true;
72450 +static inline bool is_rst_area_valid(const struct RESTART_HDR *rhdr)
72452 +       const struct RESTART_AREA *ra;
72453 +       u16 cl, fl, ul;
72454 +       u32 off, l_size, file_dat_bits, file_size_round;
72455 +       u16 ro = le16_to_cpu(rhdr->ra_off);
72456 +       u32 sys_page = le32_to_cpu(rhdr->sys_page_size);
72458 +       if (ro + offsetof(struct RESTART_AREA, l_size) >
72459 +           SECTOR_SIZE - sizeof(short))
72460 +               return false;
72462 +       ra = Add2Ptr(rhdr, ro);
72463 +       cl = le16_to_cpu(ra->log_clients);
72465 +       if (cl > 1)
72466 +               return false;
72468 +       off = le16_to_cpu(ra->client_off);
72470 +       if (!IsQuadAligned(off) || ro + off > SECTOR_SIZE - sizeof(short))
72471 +               return false;
72473 +       off += cl * sizeof(struct CLIENT_REC);
72475 +       if (off > sys_page)
72476 +               return false;
72478 +       /*
72479 +        * Check the restart length field and whether the entire
72480 +        * restart area is contained that length
72481 +        */
72482 +       if (le16_to_cpu(rhdr->ra_off) + le16_to_cpu(ra->ra_len) > sys_page ||
72483 +           off > le16_to_cpu(ra->ra_len)) {
72484 +               return false;
72485 +       }
72487 +       /*
72488 +        * As a final check make sure that the use list and the free list
72489 +        * are either empty or point to a valid client
72490 +        */
72491 +       fl = le16_to_cpu(ra->client_idx[0]);
72492 +       ul = le16_to_cpu(ra->client_idx[1]);
72493 +       if ((fl != LFS_NO_CLIENT && fl >= cl) ||
72494 +           (ul != LFS_NO_CLIENT && ul >= cl))
72495 +               return false;
72497 +       /* Make sure the sequence number bits match the log file size */
72498 +       l_size = le64_to_cpu(ra->l_size);
72500 +       file_dat_bits = sizeof(u64) * 8 - le32_to_cpu(ra->seq_num_bits);
72501 +       file_size_round = 1u << (file_dat_bits + 3);
72502 +       if (file_size_round != l_size &&
72503 +           (file_size_round < l_size || (file_size_round / 2) > l_size)) {
72504 +               return false;
72505 +       }
72507 +       /* The log page data offset and record header length must be quad-aligned */
72508 +       if (!IsQuadAligned(le16_to_cpu(ra->data_off)) ||
72509 +           !IsQuadAligned(le16_to_cpu(ra->rec_hdr_len)))
72510 +               return false;
72512 +       return true;
72515 +static inline bool is_client_area_valid(const struct RESTART_HDR *rhdr,
72516 +                                       bool usa_error)
72518 +       u16 ro = le16_to_cpu(rhdr->ra_off);
72519 +       const struct RESTART_AREA *ra = Add2Ptr(rhdr, ro);
72520 +       u16 ra_len = le16_to_cpu(ra->ra_len);
72521 +       const struct CLIENT_REC *ca;
72522 +       u32 i;
72524 +       if (usa_error && ra_len + ro > SECTOR_SIZE - sizeof(short))
72525 +               return false;
72527 +       /* Find the start of the client array */
72528 +       ca = Add2Ptr(ra, le16_to_cpu(ra->client_off));
72530 +       /*
72531 +        * Start with the free list
72532 +        * Check that all the clients are valid and that there isn't a cycle
72533 +        * Do the in-use list on the second pass
72534 +        */
72535 +       for (i = 0; i < 2; i++) {
72536 +               u16 client_idx = le16_to_cpu(ra->client_idx[i]);
72537 +               bool first_client = true;
72538 +               u16 clients = le16_to_cpu(ra->log_clients);
72540 +               while (client_idx != LFS_NO_CLIENT) {
72541 +                       const struct CLIENT_REC *cr;
72543 +                       if (!clients ||
72544 +                           client_idx >= le16_to_cpu(ra->log_clients))
72545 +                               return false;
72547 +                       clients -= 1;
72548 +                       cr = ca + client_idx;
72550 +                       client_idx = le16_to_cpu(cr->next_client);
72552 +                       if (first_client) {
72553 +                               first_client = false;
72554 +                               if (cr->prev_client != LFS_NO_CLIENT_LE)
72555 +                                       return false;
72556 +                       }
72557 +               }
72558 +       }
72560 +       return true;
72564 + * remove_client
72565 + *
72566 + * remove a client record from a client record list an restart area
72567 + */
72568 +static inline void remove_client(struct CLIENT_REC *ca,
72569 +                                const struct CLIENT_REC *cr, __le16 *head)
72571 +       if (cr->prev_client == LFS_NO_CLIENT_LE)
72572 +               *head = cr->next_client;
72573 +       else
72574 +               ca[le16_to_cpu(cr->prev_client)].next_client = cr->next_client;
72576 +       if (cr->next_client != LFS_NO_CLIENT_LE)
72577 +               ca[le16_to_cpu(cr->next_client)].prev_client = cr->prev_client;
72581 + * add_client
72582 + *
72583 + * add a client record to the start of a list
72584 + */
72585 +static inline void add_client(struct CLIENT_REC *ca, u16 index, __le16 *head)
72587 +       struct CLIENT_REC *cr = ca + index;
72589 +       cr->prev_client = LFS_NO_CLIENT_LE;
72590 +       cr->next_client = *head;
72592 +       if (*head != LFS_NO_CLIENT_LE)
72593 +               ca[le16_to_cpu(*head)].prev_client = cpu_to_le16(index);
72595 +       *head = cpu_to_le16(index);
72599 + * enum_rstbl
72600 + *
72601 + */
72602 +static inline void *enum_rstbl(struct RESTART_TABLE *t, void *c)
72604 +       __le32 *e;
72605 +       u32 bprt;
72606 +       u16 rsize = t ? le16_to_cpu(t->size) : 0;
72608 +       if (!c) {
72609 +               if (!t || !t->total)
72610 +                       return NULL;
72611 +               e = Add2Ptr(t, sizeof(struct RESTART_TABLE));
72612 +       } else {
72613 +               e = Add2Ptr(c, rsize);
72614 +       }
72616 +       /* Loop until we hit the first one allocated, or the end of the list */
72617 +       for (bprt = bytes_per_rt(t); PtrOffset(t, e) < bprt;
72618 +            e = Add2Ptr(e, rsize)) {
72619 +               if (*e == RESTART_ENTRY_ALLOCATED_LE)
72620 +                       return e;
72621 +       }
72622 +       return NULL;
72626 + * find_dp
72627 + *
72628 + * searches for a 'vcn' in Dirty Page Table,
72629 + */
72630 +static inline struct DIR_PAGE_ENTRY *find_dp(struct RESTART_TABLE *dptbl,
72631 +                                            u32 target_attr, u64 vcn)
72633 +       __le32 ta = cpu_to_le32(target_attr);
72634 +       struct DIR_PAGE_ENTRY *dp = NULL;
72636 +       while ((dp = enum_rstbl(dptbl, dp))) {
72637 +               u64 dp_vcn = le64_to_cpu(dp->vcn);
72639 +               if (dp->target_attr == ta && vcn >= dp_vcn &&
72640 +                   vcn < dp_vcn + le32_to_cpu(dp->lcns_follow)) {
72641 +                       return dp;
72642 +               }
72643 +       }
72644 +       return NULL;
72647 +static inline u32 norm_file_page(u32 page_size, u32 *l_size, bool use_default)
72649 +       if (use_default)
72650 +               page_size = DefaultLogPageSize;
72652 +       /* Round the file size down to a system page boundary */
72653 +       *l_size &= ~(page_size - 1);
72655 +       /* File should contain at least 2 restart pages and MinLogRecordPages pages */
72656 +       if (*l_size < (MinLogRecordPages + 2) * page_size)
72657 +               return 0;
72659 +       return page_size;
72662 +static bool check_log_rec(const struct LOG_REC_HDR *lr, u32 bytes, u32 tr,
72663 +                         u32 bytes_per_attr_entry)
72665 +       u16 t16;
72667 +       if (bytes < sizeof(struct LOG_REC_HDR))
72668 +               return false;
72669 +       if (!tr)
72670 +               return false;
72672 +       if ((tr - sizeof(struct RESTART_TABLE)) %
72673 +           sizeof(struct TRANSACTION_ENTRY))
72674 +               return false;
72676 +       if (le16_to_cpu(lr->redo_off) & 7)
72677 +               return false;
72679 +       if (le16_to_cpu(lr->undo_off) & 7)
72680 +               return false;
72682 +       if (lr->target_attr)
72683 +               goto check_lcns;
72685 +       if (is_target_required(le16_to_cpu(lr->redo_op)))
72686 +               return false;
72688 +       if (is_target_required(le16_to_cpu(lr->undo_op)))
72689 +               return false;
72691 +check_lcns:
72692 +       if (!lr->lcns_follow)
72693 +               goto check_length;
72695 +       t16 = le16_to_cpu(lr->target_attr);
72696 +       if ((t16 - sizeof(struct RESTART_TABLE)) % bytes_per_attr_entry)
72697 +               return false;
72699 +check_length:
72700 +       if (bytes < lrh_length(lr))
72701 +               return false;
72703 +       return true;
72706 +static bool check_rstbl(const struct RESTART_TABLE *rt, size_t bytes)
72708 +       u32 ts;
72709 +       u32 i, off;
72710 +       u16 rsize = le16_to_cpu(rt->size);
72711 +       u16 ne = le16_to_cpu(rt->used);
72712 +       u32 ff = le32_to_cpu(rt->first_free);
72713 +       u32 lf = le32_to_cpu(rt->last_free);
72715 +       ts = rsize * ne + sizeof(struct RESTART_TABLE);
72717 +       if (!rsize || rsize > bytes ||
72718 +           rsize + sizeof(struct RESTART_TABLE) > bytes || bytes < ts ||
72719 +           le16_to_cpu(rt->total) > ne || ff > ts || lf > ts ||
72720 +           (ff && ff < sizeof(struct RESTART_TABLE)) ||
72721 +           (lf && lf < sizeof(struct RESTART_TABLE))) {
72722 +               return false;
72723 +       }
72725 +       /* Verify each entry is either allocated or points
72726 +        * to a valid offset the table
72727 +        */
72728 +       for (i = 0; i < ne; i++) {
72729 +               off = le32_to_cpu(*(__le32 *)Add2Ptr(
72730 +                       rt, i * rsize + sizeof(struct RESTART_TABLE)));
72732 +               if (off != RESTART_ENTRY_ALLOCATED && off &&
72733 +                   (off < sizeof(struct RESTART_TABLE) ||
72734 +                    ((off - sizeof(struct RESTART_TABLE)) % rsize))) {
72735 +                       return false;
72736 +               }
72737 +       }
72739 +       /* Walk through the list headed by the first entry to make
72740 +        * sure none of the entries are currently being used
72741 +        */
72742 +       for (off = ff; off;) {
72743 +               if (off == RESTART_ENTRY_ALLOCATED)
72744 +                       return false;
72746 +               off = le32_to_cpu(*(__le32 *)Add2Ptr(rt, off));
72747 +       }
72749 +       return true;
72753 + * free_rsttbl_idx
72754 + *
72755 + * frees a previously allocated index a Restart Table.
72756 + */
72757 +static inline void free_rsttbl_idx(struct RESTART_TABLE *rt, u32 off)
72759 +       __le32 *e;
72760 +       u32 lf = le32_to_cpu(rt->last_free);
72761 +       __le32 off_le = cpu_to_le32(off);
72763 +       e = Add2Ptr(rt, off);
72765 +       if (off < le32_to_cpu(rt->free_goal)) {
72766 +               *e = rt->first_free;
72767 +               rt->first_free = off_le;
72768 +               if (!lf)
72769 +                       rt->last_free = off_le;
72770 +       } else {
72771 +               if (lf)
72772 +                       *(__le32 *)Add2Ptr(rt, lf) = off_le;
72773 +               else
72774 +                       rt->first_free = off_le;
72776 +               rt->last_free = off_le;
72777 +               *e = 0;
72778 +       }
72780 +       le16_sub_cpu(&rt->total, 1);
72783 +static inline struct RESTART_TABLE *init_rsttbl(u16 esize, u16 used)
72785 +       __le32 *e, *last_free;
72786 +       u32 off;
72787 +       u32 bytes = esize * used + sizeof(struct RESTART_TABLE);
72788 +       u32 lf = sizeof(struct RESTART_TABLE) + (used - 1) * esize;
72789 +       struct RESTART_TABLE *t = ntfs_zalloc(bytes);
72791 +       t->size = cpu_to_le16(esize);
72792 +       t->used = cpu_to_le16(used);
72793 +       t->free_goal = cpu_to_le32(~0u);
72794 +       t->first_free = cpu_to_le32(sizeof(struct RESTART_TABLE));
72795 +       t->last_free = cpu_to_le32(lf);
72797 +       e = (__le32 *)(t + 1);
72798 +       last_free = Add2Ptr(t, lf);
72800 +       for (off = sizeof(struct RESTART_TABLE) + esize; e < last_free;
72801 +            e = Add2Ptr(e, esize), off += esize) {
72802 +               *e = cpu_to_le32(off);
72803 +       }
72804 +       return t;
72807 +static inline struct RESTART_TABLE *extend_rsttbl(struct RESTART_TABLE *tbl,
72808 +                                                 u32 add, u32 free_goal)
72810 +       u16 esize = le16_to_cpu(tbl->size);
72811 +       __le32 osize = cpu_to_le32(bytes_per_rt(tbl));
72812 +       u32 used = le16_to_cpu(tbl->used);
72813 +       struct RESTART_TABLE *rt = init_rsttbl(esize, used + add);
72815 +       memcpy(rt + 1, tbl + 1, esize * used);
72817 +       rt->free_goal = free_goal == ~0u
72818 +                               ? cpu_to_le32(~0u)
72819 +                               : cpu_to_le32(sizeof(struct RESTART_TABLE) +
72820 +                                             free_goal * esize);
72822 +       if (tbl->first_free) {
72823 +               rt->first_free = tbl->first_free;
72824 +               *(__le32 *)Add2Ptr(rt, le32_to_cpu(tbl->last_free)) = osize;
72825 +       } else {
72826 +               rt->first_free = osize;
72827 +       }
72829 +       rt->total = tbl->total;
72831 +       ntfs_free(tbl);
72832 +       return rt;
72836 + * alloc_rsttbl_idx
72837 + *
72838 + * allocates an index from within a previously initialized Restart Table
72839 + */
72840 +static inline void *alloc_rsttbl_idx(struct RESTART_TABLE **tbl)
72842 +       u32 off;
72843 +       __le32 *e;
72844 +       struct RESTART_TABLE *t = *tbl;
72846 +       if (!t->first_free)
72847 +               *tbl = t = extend_rsttbl(t, 16, ~0u);
72849 +       off = le32_to_cpu(t->first_free);
72851 +       /* Dequeue this entry and zero it. */
72852 +       e = Add2Ptr(t, off);
72854 +       t->first_free = *e;
72856 +       memset(e, 0, le16_to_cpu(t->size));
72858 +       *e = RESTART_ENTRY_ALLOCATED_LE;
72860 +       /* If list is going empty, then we fix the last_free as well. */
72861 +       if (!t->first_free)
72862 +               t->last_free = 0;
72864 +       le16_add_cpu(&t->total, 1);
72866 +       return Add2Ptr(t, off);
72870 + * alloc_rsttbl_from_idx
72871 + *
72872 + * allocates a specific index from within a previously initialized Restart Table
72873 + */
72874 +static inline void *alloc_rsttbl_from_idx(struct RESTART_TABLE **tbl, u32 vbo)
72876 +       u32 off;
72877 +       __le32 *e;
72878 +       struct RESTART_TABLE *rt = *tbl;
72879 +       u32 bytes = bytes_per_rt(rt);
72880 +       u16 esize = le16_to_cpu(rt->size);
72882 +       /* If the entry is not the table, we will have to extend the table */
72883 +       if (vbo >= bytes) {
72884 +               /*
72885 +                * extend the size by computing the number of entries between
72886 +                * the existing size and the desired index and adding
72887 +                * 1 to that
72888 +                */
72889 +               u32 bytes2idx = vbo - bytes;
72891 +               /* There should always be an integral number of entries being added */
72892 +               /* Now extend the table */
72893 +               *tbl = rt = extend_rsttbl(rt, bytes2idx / esize + 1, bytes);
72894 +               if (!rt)
72895 +                       return NULL;
72896 +       }
72898 +       /* see if the entry is already allocated, and just return if it is. */
72899 +       e = Add2Ptr(rt, vbo);
72901 +       if (*e == RESTART_ENTRY_ALLOCATED_LE)
72902 +               return e;
72904 +       /*
72905 +        * Walk through the table, looking for the entry we're
72906 +        * interested and the previous entry
72907 +        */
72908 +       off = le32_to_cpu(rt->first_free);
72909 +       e = Add2Ptr(rt, off);
72911 +       if (off == vbo) {
72912 +               /* this is a match */
72913 +               rt->first_free = *e;
72914 +               goto skip_looking;
72915 +       }
72917 +       /*
72918 +        * need to walk through the list looking for the predecessor of our entry
72919 +        */
72920 +       for (;;) {
72921 +               /* Remember the entry just found */
72922 +               u32 last_off = off;
72923 +               __le32 *last_e = e;
72925 +               /* should never run of entries. */
72927 +               /* Lookup up the next entry the list */
72928 +               off = le32_to_cpu(*last_e);
72929 +               e = Add2Ptr(rt, off);
72931 +               /* If this is our match we are done */
72932 +               if (off == vbo) {
72933 +                       *last_e = *e;
72935 +                       /* If this was the last entry, we update that the table as well */
72936 +                       if (le32_to_cpu(rt->last_free) == off)
72937 +                               rt->last_free = cpu_to_le32(last_off);
72938 +                       break;
72939 +               }
72940 +       }
72942 +skip_looking:
72943 +       /* If the list is now empty, we fix the last_free as well */
72944 +       if (!rt->first_free)
72945 +               rt->last_free = 0;
72947 +       /* Zero this entry */
72948 +       memset(e, 0, esize);
72949 +       *e = RESTART_ENTRY_ALLOCATED_LE;
72951 +       le16_add_cpu(&rt->total, 1);
72953 +       return e;
72956 +#define RESTART_SINGLE_PAGE_IO cpu_to_le16(0x0001)
72958 +#define NTFSLOG_WRAPPED 0x00000001
72959 +#define NTFSLOG_MULTIPLE_PAGE_IO 0x00000002
72960 +#define NTFSLOG_NO_LAST_LSN 0x00000004
72961 +#define NTFSLOG_REUSE_TAIL 0x00000010
72962 +#define NTFSLOG_NO_OLDEST_LSN 0x00000020
72965 + * Helper struct to work with NTFS LogFile
72966 + */
72967 +struct ntfs_log {
72968 +       struct ntfs_inode *ni;
72970 +       u32 l_size;
72971 +       u32 sys_page_size;
72972 +       u32 sys_page_mask;
72973 +       u32 page_size;
72974 +       u32 page_mask; // page_size - 1
72975 +       u8 page_bits;
72976 +       struct RECORD_PAGE_HDR *one_page_buf;
72978 +       struct RESTART_TABLE *open_attr_tbl;
72979 +       u32 transaction_id;
72980 +       u32 clst_per_page;
72982 +       u32 first_page;
72983 +       u32 next_page;
72984 +       u32 ra_off;
72985 +       u32 data_off;
72986 +       u32 restart_size;
72987 +       u32 data_size;
72988 +       u16 record_header_len;
72989 +       u64 seq_num;
72990 +       u32 seq_num_bits;
72991 +       u32 file_data_bits;
72992 +       u32 seq_num_mask; /* (1 << file_data_bits) - 1 */
72994 +       struct RESTART_AREA *ra; /* in-memory image of the next restart area */
72995 +       u32 ra_size; /* the usable size of the restart area */
72997 +       /*
72998 +        * If true, then the in-memory restart area is to be written
72999 +        * to the first position on the disk
73000 +        */
73001 +       bool init_ra;
73002 +       bool set_dirty; /* true if we need to set dirty flag */
73004 +       u64 oldest_lsn;
73006 +       u32 oldest_lsn_off;
73007 +       u64 last_lsn;
73009 +       u32 total_avail;
73010 +       u32 total_avail_pages;
73011 +       u32 total_undo_commit;
73012 +       u32 max_current_avail;
73013 +       u32 current_avail;
73014 +       u32 reserved;
73016 +       short major_ver;
73017 +       short minor_ver;
73019 +       u32 l_flags; /* See NTFSLOG_XXX */
73020 +       u32 current_openlog_count; /* On-disk value for open_log_count */
73022 +       struct CLIENT_ID client_id;
73023 +       u32 client_undo_commit;
73026 +static inline u32 lsn_to_vbo(struct ntfs_log *log, const u64 lsn)
73028 +       u32 vbo = (lsn << log->seq_num_bits) >> (log->seq_num_bits - 3);
73030 +       return vbo;
73033 +/* compute the offset in the log file of the next log page */
73034 +static inline u32 next_page_off(struct ntfs_log *log, u32 off)
73036 +       off = (off & ~log->sys_page_mask) + log->page_size;
73037 +       return off >= log->l_size ? log->first_page : off;
73040 +static inline u32 lsn_to_page_off(struct ntfs_log *log, u64 lsn)
73042 +       return (((u32)lsn) << 3) & log->page_mask;
73045 +static inline u64 vbo_to_lsn(struct ntfs_log *log, u32 off, u64 Seq)
73047 +       return (off >> 3) + (Seq << log->file_data_bits);
73050 +static inline bool is_lsn_in_file(struct ntfs_log *log, u64 lsn)
73052 +       return lsn >= log->oldest_lsn &&
73053 +              lsn <= le64_to_cpu(log->ra->current_lsn);
73056 +static inline u32 hdr_file_off(struct ntfs_log *log,
73057 +                              struct RECORD_PAGE_HDR *hdr)
73059 +       if (log->major_ver < 2)
73060 +               return le64_to_cpu(hdr->rhdr.lsn);
73062 +       return le32_to_cpu(hdr->file_off);
73065 +static inline u64 base_lsn(struct ntfs_log *log,
73066 +                          const struct RECORD_PAGE_HDR *hdr, u64 lsn)
73068 +       u64 h_lsn = le64_to_cpu(hdr->rhdr.lsn);
73069 +       u64 ret = (((h_lsn >> log->file_data_bits) +
73070 +                   (lsn < (lsn_to_vbo(log, h_lsn) & ~log->page_mask) ? 1 : 0))
73071 +                  << log->file_data_bits) +
73072 +                 ((((is_log_record_end(hdr) &&
73073 +                     h_lsn <= le64_to_cpu(hdr->record_hdr.last_end_lsn))
73074 +                            ? le16_to_cpu(hdr->record_hdr.next_record_off)
73075 +                            : log->page_size) +
73076 +                   lsn) >>
73077 +                  3);
73079 +       return ret;
73082 +static inline bool verify_client_lsn(struct ntfs_log *log,
73083 +                                    const struct CLIENT_REC *client, u64 lsn)
73085 +       return lsn >= le64_to_cpu(client->oldest_lsn) &&
73086 +              lsn <= le64_to_cpu(log->ra->current_lsn) && lsn;
73089 +struct restart_info {
73090 +       u64 last_lsn;
73091 +       struct RESTART_HDR *r_page;
73092 +       u32 vbo;
73093 +       bool chkdsk_was_run;
73094 +       bool valid_page;
73095 +       bool initialized;
73096 +       bool restart;
73099 +static int read_log_page(struct ntfs_log *log, u32 vbo,
73100 +                        struct RECORD_PAGE_HDR **buffer, bool *usa_error)
73102 +       int err = 0;
73103 +       u32 page_idx = vbo >> log->page_bits;
73104 +       u32 page_off = vbo & log->page_mask;
73105 +       u32 bytes = log->page_size - page_off;
73106 +       void *to_free = NULL;
73107 +       u32 page_vbo = page_idx << log->page_bits;
73108 +       struct RECORD_PAGE_HDR *page_buf;
73109 +       struct ntfs_inode *ni = log->ni;
73110 +       bool bBAAD;
73112 +       if (vbo >= log->l_size)
73113 +               return -EINVAL;
73115 +       if (!*buffer) {
73116 +               to_free = ntfs_malloc(bytes);
73117 +               if (!to_free)
73118 +                       return -ENOMEM;
73119 +               *buffer = to_free;
73120 +       }
73122 +       page_buf = page_off ? log->one_page_buf : *buffer;
73124 +       err = ntfs_read_run_nb(ni->mi.sbi, &ni->file.run, page_vbo, page_buf,
73125 +                              log->page_size, NULL);
73126 +       if (err)
73127 +               goto out;
73129 +       if (page_buf->rhdr.sign != NTFS_FFFF_SIGNATURE)
73130 +               ntfs_fix_post_read(&page_buf->rhdr, PAGE_SIZE, false);
73132 +       if (page_buf != *buffer)
73133 +               memcpy(*buffer, Add2Ptr(page_buf, page_off), bytes);
73135 +       bBAAD = page_buf->rhdr.sign == NTFS_BAAD_SIGNATURE;
73137 +       if (usa_error)
73138 +               *usa_error = bBAAD;
73139 +       /* Check that the update sequence array for this page is valid */
73140 +       /* If we don't allow errors, raise an error status */
73141 +       else if (bBAAD)
73142 +               err = -EINVAL;
73144 +out:
73145 +       if (err && to_free) {
73146 +               ntfs_free(to_free);
73147 +               *buffer = NULL;
73148 +       }
73150 +       return err;
73154 + * log_read_rst
73155 + *
73156 + * it walks through 512 blocks of the file looking for a valid restart page header
73157 + * It will stop the first time we find a valid page header
73158 + */
73159 +static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first,
73160 +                       struct restart_info *info)
73162 +       u32 skip, vbo;
73163 +       struct RESTART_HDR *r_page = ntfs_malloc(DefaultLogPageSize);
73165 +       if (!r_page)
73166 +               return -ENOMEM;
73168 +       memset(info, 0, sizeof(struct restart_info));
73170 +       /* Determine which restart area we are looking for */
73171 +       if (first) {
73172 +               vbo = 0;
73173 +               skip = 512;
73174 +       } else {
73175 +               vbo = 512;
73176 +               skip = 0;
73177 +       }
73179 +       /* loop continuously until we succeed */
73180 +       for (; vbo < l_size; vbo = 2 * vbo + skip, skip = 0) {
73181 +               bool usa_error;
73182 +               u32 sys_page_size;
73183 +               bool brst, bchk;
73184 +               struct RESTART_AREA *ra;
73186 +               /* Read a page header at the current offset */
73187 +               if (read_log_page(log, vbo, (struct RECORD_PAGE_HDR **)&r_page,
73188 +                                 &usa_error)) {
73189 +                       /* ignore any errors */
73190 +                       continue;
73191 +               }
73193 +               /* exit if the signature is a log record page */
73194 +               if (r_page->rhdr.sign == NTFS_RCRD_SIGNATURE) {
73195 +                       info->initialized = true;
73196 +                       break;
73197 +               }
73199 +               brst = r_page->rhdr.sign == NTFS_RSTR_SIGNATURE;
73200 +               bchk = r_page->rhdr.sign == NTFS_CHKD_SIGNATURE;
73202 +               if (!bchk && !brst) {
73203 +                       if (r_page->rhdr.sign != NTFS_FFFF_SIGNATURE) {
73204 +                               /*
73205 +                                * Remember if the signature does not
73206 +                                * indicate uninitialized file
73207 +                                */
73208 +                               info->initialized = true;
73209 +                       }
73210 +                       continue;
73211 +               }
73213 +               ra = NULL;
73214 +               info->valid_page = false;
73215 +               info->initialized = true;
73216 +               info->vbo = vbo;
73218 +               /* Let's check the restart area if this is a valid page */
73219 +               if (!is_rst_page_hdr_valid(vbo, r_page))
73220 +                       goto check_result;
73221 +               ra = Add2Ptr(r_page, le16_to_cpu(r_page->ra_off));
73223 +               if (!is_rst_area_valid(r_page))
73224 +                       goto check_result;
73226 +               /*
73227 +                * We have a valid restart page header and restart area.
73228 +                * If chkdsk was run or we have no clients then we have
73229 +                * no more checking to do
73230 +                */
73231 +               if (bchk || ra->client_idx[1] == LFS_NO_CLIENT_LE) {
73232 +                       info->valid_page = true;
73233 +                       goto check_result;
73234 +               }
73236 +               /* Read the entire restart area */
73237 +               sys_page_size = le32_to_cpu(r_page->sys_page_size);
73238 +               if (DefaultLogPageSize != sys_page_size) {
73239 +                       ntfs_free(r_page);
73240 +                       r_page = ntfs_zalloc(sys_page_size);
73241 +                       if (!r_page)
73242 +                               return -ENOMEM;
73244 +                       if (read_log_page(log, vbo,
73245 +                                         (struct RECORD_PAGE_HDR **)&r_page,
73246 +                                         &usa_error)) {
73247 +                               /* ignore any errors */
73248 +                               ntfs_free(r_page);
73249 +                               r_page = NULL;
73250 +                               continue;
73251 +                       }
73252 +               }
73254 +               if (is_client_area_valid(r_page, usa_error)) {
73255 +                       info->valid_page = true;
73256 +                       ra = Add2Ptr(r_page, le16_to_cpu(r_page->ra_off));
73257 +               }
73259 +check_result:
73260 +               /* If chkdsk was run then update the caller's values and return */
73261 +               if (r_page->rhdr.sign == NTFS_CHKD_SIGNATURE) {
73262 +                       info->chkdsk_was_run = true;
73263 +                       info->last_lsn = le64_to_cpu(r_page->rhdr.lsn);
73264 +                       info->restart = true;
73265 +                       info->r_page = r_page;
73266 +                       return 0;
73267 +               }
73269 +               /* If we have a valid page then copy the values we need from it */
73270 +               if (info->valid_page) {
73271 +                       info->last_lsn = le64_to_cpu(ra->current_lsn);
73272 +                       info->restart = true;
73273 +                       info->r_page = r_page;
73274 +                       return 0;
73275 +               }
73276 +       }
73278 +       ntfs_free(r_page);
73280 +       return 0;
73284 + * log_init_pg_hdr
73285 + *
73286 + * init "log' from restart page header
73287 + */
73288 +static void log_init_pg_hdr(struct ntfs_log *log, u32 sys_page_size,
73289 +                           u32 page_size, u16 major_ver, u16 minor_ver)
73291 +       log->sys_page_size = sys_page_size;
73292 +       log->sys_page_mask = sys_page_size - 1;
73293 +       log->page_size = page_size;
73294 +       log->page_mask = page_size - 1;
73295 +       log->page_bits = blksize_bits(page_size);
73297 +       log->clst_per_page = log->page_size >> log->ni->mi.sbi->cluster_bits;
73298 +       if (!log->clst_per_page)
73299 +               log->clst_per_page = 1;
73301 +       log->first_page = major_ver >= 2
73302 +                                 ? 0x22 * page_size
73303 +                                 : ((sys_page_size << 1) + (page_size << 1));
73304 +       log->major_ver = major_ver;
73305 +       log->minor_ver = minor_ver;
73309 + * log_create
73310 + *
73311 + * init "log" in cases when we don't have a restart area to use
73312 + */
73313 +static void log_create(struct ntfs_log *log, u32 l_size, const u64 last_lsn,
73314 +                      u32 open_log_count, bool wrapped, bool use_multi_page)
73316 +       log->l_size = l_size;
73317 +       /* All file offsets must be quadword aligned */
73318 +       log->file_data_bits = blksize_bits(l_size) - 3;
73319 +       log->seq_num_mask = (8 << log->file_data_bits) - 1;
73320 +       log->seq_num_bits = sizeof(u64) * 8 - log->file_data_bits;
73321 +       log->seq_num = (last_lsn >> log->file_data_bits) + 2;
73322 +       log->next_page = log->first_page;
73323 +       log->oldest_lsn = log->seq_num << log->file_data_bits;
73324 +       log->oldest_lsn_off = 0;
73325 +       log->last_lsn = log->oldest_lsn;
73327 +       log->l_flags |= NTFSLOG_NO_LAST_LSN | NTFSLOG_NO_OLDEST_LSN;
73329 +       /* Set the correct flags for the I/O and indicate if we have wrapped */
73330 +       if (wrapped)
73331 +               log->l_flags |= NTFSLOG_WRAPPED;
73333 +       if (use_multi_page)
73334 +               log->l_flags |= NTFSLOG_MULTIPLE_PAGE_IO;
73336 +       /* Compute the log page values */
73337 +       log->data_off = QuadAlign(
73338 +               offsetof(struct RECORD_PAGE_HDR, fixups) +
73339 +               sizeof(short) * ((log->page_size >> SECTOR_SHIFT) + 1));
73340 +       log->data_size = log->page_size - log->data_off;
73341 +       log->record_header_len = sizeof(struct LFS_RECORD_HDR);
73343 +       /* Remember the different page sizes for reservation */
73344 +       log->reserved = log->data_size - log->record_header_len;
73346 +       /* Compute the restart page values. */
73347 +       log->ra_off = QuadAlign(
73348 +               offsetof(struct RESTART_HDR, fixups) +
73349 +               sizeof(short) * ((log->sys_page_size >> SECTOR_SHIFT) + 1));
73350 +       log->restart_size = log->sys_page_size - log->ra_off;
73351 +       log->ra_size = struct_size(log->ra, clients, 1);
73352 +       log->current_openlog_count = open_log_count;
73354 +       /*
73355 +        * The total available log file space is the number of
73356 +        * log file pages times the space available on each page
73357 +        */
73358 +       log->total_avail_pages = log->l_size - log->first_page;
73359 +       log->total_avail = log->total_avail_pages >> log->page_bits;
73361 +       /*
73362 +        * We assume that we can't use the end of the page less than
73363 +        * the file record size
73364 +        * Then we won't need to reserve more than the caller asks for
73365 +        */
73366 +       log->max_current_avail = log->total_avail * log->reserved;
73367 +       log->total_avail = log->total_avail * log->data_size;
73368 +       log->current_avail = log->max_current_avail;
73372 + * log_create_ra
73373 + *
73374 + * This routine is called to fill a restart area from the values stored in 'log'
73375 + */
73376 +static struct RESTART_AREA *log_create_ra(struct ntfs_log *log)
73378 +       struct CLIENT_REC *cr;
73379 +       struct RESTART_AREA *ra = ntfs_zalloc(log->restart_size);
73381 +       if (!ra)
73382 +               return NULL;
73384 +       ra->current_lsn = cpu_to_le64(log->last_lsn);
73385 +       ra->log_clients = cpu_to_le16(1);
73386 +       ra->client_idx[1] = LFS_NO_CLIENT_LE;
73387 +       if (log->l_flags & NTFSLOG_MULTIPLE_PAGE_IO)
73388 +               ra->flags = RESTART_SINGLE_PAGE_IO;
73389 +       ra->seq_num_bits = cpu_to_le32(log->seq_num_bits);
73390 +       ra->ra_len = cpu_to_le16(log->ra_size);
73391 +       ra->client_off = cpu_to_le16(offsetof(struct RESTART_AREA, clients));
73392 +       ra->l_size = cpu_to_le64(log->l_size);
73393 +       ra->rec_hdr_len = cpu_to_le16(log->record_header_len);
73394 +       ra->data_off = cpu_to_le16(log->data_off);
73395 +       ra->open_log_count = cpu_to_le32(log->current_openlog_count + 1);
73397 +       cr = ra->clients;
73399 +       cr->prev_client = LFS_NO_CLIENT_LE;
73400 +       cr->next_client = LFS_NO_CLIENT_LE;
73402 +       return ra;
73405 +static u32 final_log_off(struct ntfs_log *log, u64 lsn, u32 data_len)
73407 +       u32 base_vbo = lsn << 3;
73408 +       u32 final_log_off = (base_vbo & log->seq_num_mask) & ~log->page_mask;
73409 +       u32 page_off = base_vbo & log->page_mask;
73410 +       u32 tail = log->page_size - page_off;
73412 +       page_off -= 1;
73414 +       /* Add the length of the header */
73415 +       data_len += log->record_header_len;
73417 +       /*
73418 +        * If this lsn is contained this log page we are done
73419 +        * Otherwise we need to walk through several log pages
73420 +        */
73421 +       if (data_len > tail) {
73422 +               data_len -= tail;
73423 +               tail = log->data_size;
73424 +               page_off = log->data_off - 1;
73426 +               for (;;) {
73427 +                       final_log_off = next_page_off(log, final_log_off);
73429 +                       /* We are done if the remaining bytes fit on this page */
73430 +                       if (data_len <= tail)
73431 +                               break;
73432 +                       data_len -= tail;
73433 +               }
73434 +       }
73436 +       /*
73437 +        * We add the remaining bytes to our starting position on this page
73438 +        * and then add that value to the file offset of this log page
73439 +        */
73440 +       return final_log_off + data_len + page_off;
73443 +static int next_log_lsn(struct ntfs_log *log, const struct LFS_RECORD_HDR *rh,
73444 +                       u64 *lsn)
73446 +       int err;
73447 +       u64 this_lsn = le64_to_cpu(rh->this_lsn);
73448 +       u32 vbo = lsn_to_vbo(log, this_lsn);
73449 +       u32 end =
73450 +               final_log_off(log, this_lsn, le32_to_cpu(rh->client_data_len));
73451 +       u32 hdr_off = end & ~log->sys_page_mask;
73452 +       u64 seq = this_lsn >> log->file_data_bits;
73453 +       struct RECORD_PAGE_HDR *page = NULL;
73455 +       /* Remember if we wrapped */
73456 +       if (end <= vbo)
73457 +               seq += 1;
73459 +       /* log page header for this page */
73460 +       err = read_log_page(log, hdr_off, &page, NULL);
73461 +       if (err)
73462 +               return err;
73464 +       /*
73465 +        * If the lsn we were given was not the last lsn on this page,
73466 +        * then the starting offset for the next lsn is on a quad word
73467 +        * boundary following the last file offset for the current lsn
73468 +        * Otherwise the file offset is the start of the data on the next page
73469 +        */
73470 +       if (this_lsn == le64_to_cpu(page->rhdr.lsn)) {
73471 +               /* If we wrapped, we need to increment the sequence number */
73472 +               hdr_off = next_page_off(log, hdr_off);
73473 +               if (hdr_off == log->first_page)
73474 +                       seq += 1;
73476 +               vbo = hdr_off + log->data_off;
73477 +       } else {
73478 +               vbo = QuadAlign(end);
73479 +       }
73481 +       /* Compute the lsn based on the file offset and the sequence count */
73482 +       *lsn = vbo_to_lsn(log, vbo, seq);
73484 +       /*
73485 +        * If this lsn is within the legal range for the file, we return true
73486 +        * Otherwise false indicates that there are no more lsn's
73487 +        */
73488 +       if (!is_lsn_in_file(log, *lsn))
73489 +               *lsn = 0;
73491 +       ntfs_free(page);
73493 +       return 0;
73497 + * current_log_avail
73498 + *
73499 + * calculate the number of bytes available for log records
73500 + */
73501 +static u32 current_log_avail(struct ntfs_log *log)
73503 +       u32 oldest_off, next_free_off, free_bytes;
73505 +       if (log->l_flags & NTFSLOG_NO_LAST_LSN) {
73506 +               /* The entire file is available */
73507 +               return log->max_current_avail;
73508 +       }
73510 +       /*
73511 +        * If there is a last lsn the restart area then we know that we will
73512 +        * have to compute the free range
73513 +        * If there is no oldest lsn then start at the first page of the file
73514 +        */
73515 +       oldest_off = (log->l_flags & NTFSLOG_NO_OLDEST_LSN)
73516 +                            ? log->first_page
73517 +                            : (log->oldest_lsn_off & ~log->sys_page_mask);
73519 +       /*
73520 +        * We will use the next log page offset to compute the next free page\
73521 +        * If we are going to reuse this page go to the next page
73522 +        * If we are at the first page then use the end of the file
73523 +        */
73524 +       next_free_off = (log->l_flags & NTFSLOG_REUSE_TAIL)
73525 +                               ? log->next_page + log->page_size
73526 +                       : log->next_page == log->first_page ? log->l_size
73527 +                                                           : log->next_page;
73529 +       /* If the two offsets are the same then there is no available space */
73530 +       if (oldest_off == next_free_off)
73531 +               return 0;
73532 +       /*
73533 +        * If the free offset follows the oldest offset then subtract
73534 +        * this range from the total available pages
73535 +        */
73536 +       free_bytes =
73537 +               oldest_off < next_free_off
73538 +                       ? log->total_avail_pages - (next_free_off - oldest_off)
73539 +                       : oldest_off - next_free_off;
73541 +       free_bytes >>= log->page_bits;
73542 +       return free_bytes * log->reserved;
73545 +static bool check_subseq_log_page(struct ntfs_log *log,
73546 +                                 const struct RECORD_PAGE_HDR *rp, u32 vbo,
73547 +                                 u64 seq)
73549 +       u64 lsn_seq;
73550 +       const struct NTFS_RECORD_HEADER *rhdr = &rp->rhdr;
73551 +       u64 lsn = le64_to_cpu(rhdr->lsn);
73553 +       if (rhdr->sign == NTFS_FFFF_SIGNATURE || !rhdr->sign)
73554 +               return false;
73556 +       /*
73557 +        * If the last lsn on the page occurs was written after the page
73558 +        * that caused the original error then we have a fatal error
73559 +        */
73560 +       lsn_seq = lsn >> log->file_data_bits;
73562 +       /*
73563 +        * If the sequence number for the lsn the page is equal or greater
73564 +        * than lsn we expect, then this is a subsequent write
73565 +        */
73566 +       return lsn_seq >= seq ||
73567 +              (lsn_seq == seq - 1 && log->first_page == vbo &&
73568 +               vbo != (lsn_to_vbo(log, lsn) & ~log->page_mask));
73572 + * last_log_lsn
73573 + *
73574 + * This routine walks through the log pages for a file, searching for the
73575 + * last log page written to the file
73576 + */
73577 +static int last_log_lsn(struct ntfs_log *log)
73579 +       int err;
73580 +       bool usa_error = false;
73581 +       bool replace_page = false;
73582 +       bool reuse_page = log->l_flags & NTFSLOG_REUSE_TAIL;
73583 +       bool wrapped_file, wrapped;
73585 +       u32 page_cnt = 1, page_pos = 1;
73586 +       u32 page_off = 0, page_off1 = 0, saved_off = 0;
73587 +       u32 final_off, second_off, final_off_prev = 0, second_off_prev = 0;
73588 +       u32 first_file_off = 0, second_file_off = 0;
73589 +       u32 part_io_count = 0;
73590 +       u32 tails = 0;
73591 +       u32 this_off, curpage_off, nextpage_off, remain_pages;
73593 +       u64 expected_seq, seq_base = 0, lsn_base = 0;
73594 +       u64 best_lsn, best_lsn1, best_lsn2;
73595 +       u64 lsn_cur, lsn1, lsn2;
73596 +       u64 last_ok_lsn = reuse_page ? log->last_lsn : 0;
73598 +       u16 cur_pos, best_page_pos;
73600 +       struct RECORD_PAGE_HDR *page = NULL;
73601 +       struct RECORD_PAGE_HDR *tst_page = NULL;
73602 +       struct RECORD_PAGE_HDR *first_tail = NULL;
73603 +       struct RECORD_PAGE_HDR *second_tail = NULL;
73604 +       struct RECORD_PAGE_HDR *tail_page = NULL;
73605 +       struct RECORD_PAGE_HDR *second_tail_prev = NULL;
73606 +       struct RECORD_PAGE_HDR *first_tail_prev = NULL;
73607 +       struct RECORD_PAGE_HDR *page_bufs = NULL;
73608 +       struct RECORD_PAGE_HDR *best_page;
73610 +       if (log->major_ver >= 2) {
73611 +               final_off = 0x02 * log->page_size;
73612 +               second_off = 0x12 * log->page_size;
73614 +               // 0x10 == 0x12 - 0x2
73615 +               page_bufs = ntfs_malloc(log->page_size * 0x10);
73616 +               if (!page_bufs)
73617 +                       return -ENOMEM;
73618 +       } else {
73619 +               second_off = log->first_page - log->page_size;
73620 +               final_off = second_off - log->page_size;
73621 +       }
73623 +next_tail:
73624 +       /* Read second tail page (at pos 3/0x12000) */
73625 +       if (read_log_page(log, second_off, &second_tail, &usa_error) ||
73626 +           usa_error || second_tail->rhdr.sign != NTFS_RCRD_SIGNATURE) {
73627 +               ntfs_free(second_tail);
73628 +               second_tail = NULL;
73629 +               second_file_off = 0;
73630 +               lsn2 = 0;
73631 +       } else {
73632 +               second_file_off = hdr_file_off(log, second_tail);
73633 +               lsn2 = le64_to_cpu(second_tail->record_hdr.last_end_lsn);
73634 +       }
73636 +       /* Read first tail page (at pos 2/0x2000 ) */
73637 +       if (read_log_page(log, final_off, &first_tail, &usa_error) ||
73638 +           usa_error || first_tail->rhdr.sign != NTFS_RCRD_SIGNATURE) {
73639 +               ntfs_free(first_tail);
73640 +               first_tail = NULL;
73641 +               first_file_off = 0;
73642 +               lsn1 = 0;
73643 +       } else {
73644 +               first_file_off = hdr_file_off(log, first_tail);
73645 +               lsn1 = le64_to_cpu(first_tail->record_hdr.last_end_lsn);
73646 +       }
73648 +       if (log->major_ver < 2) {
73649 +               int best_page;
73651 +               first_tail_prev = first_tail;
73652 +               final_off_prev = first_file_off;
73653 +               second_tail_prev = second_tail;
73654 +               second_off_prev = second_file_off;
73655 +               tails = 1;
73657 +               if (!first_tail && !second_tail)
73658 +                       goto tail_read;
73660 +               if (first_tail && second_tail)
73661 +                       best_page = lsn1 < lsn2 ? 1 : 0;
73662 +               else if (first_tail)
73663 +                       best_page = 0;
73664 +               else
73665 +                       best_page = 1;
73667 +               page_off = best_page ? second_file_off : first_file_off;
73668 +               seq_base = (best_page ? lsn2 : lsn1) >> log->file_data_bits;
73669 +               goto tail_read;
73670 +       }
73672 +       best_lsn1 = first_tail ? base_lsn(log, first_tail, first_file_off) : 0;
73673 +       best_lsn2 =
73674 +               second_tail ? base_lsn(log, second_tail, second_file_off) : 0;
73676 +       if (first_tail && second_tail) {
73677 +               if (best_lsn1 > best_lsn2) {
73678 +                       best_lsn = best_lsn1;
73679 +                       best_page = first_tail;
73680 +                       this_off = first_file_off;
73681 +               } else {
73682 +                       best_lsn = best_lsn2;
73683 +                       best_page = second_tail;
73684 +                       this_off = second_file_off;
73685 +               }
73686 +       } else if (first_tail) {
73687 +               best_lsn = best_lsn1;
73688 +               best_page = first_tail;
73689 +               this_off = first_file_off;
73690 +       } else if (second_tail) {
73691 +               best_lsn = best_lsn2;
73692 +               best_page = second_tail;
73693 +               this_off = second_file_off;
73694 +       } else {
73695 +               goto tail_read;
73696 +       }
73698 +       best_page_pos = le16_to_cpu(best_page->page_pos);
73700 +       if (!tails) {
73701 +               if (best_page_pos == page_pos) {
73702 +                       seq_base = best_lsn >> log->file_data_bits;
73703 +                       saved_off = page_off = le32_to_cpu(best_page->file_off);
73704 +                       lsn_base = best_lsn;
73706 +                       memmove(page_bufs, best_page, log->page_size);
73708 +                       page_cnt = le16_to_cpu(best_page->page_count);
73709 +                       if (page_cnt > 1)
73710 +                               page_pos += 1;
73712 +                       tails = 1;
73713 +               }
73714 +       } else if (seq_base == (best_lsn >> log->file_data_bits) &&
73715 +                  saved_off + log->page_size == this_off &&
73716 +                  lsn_base < best_lsn &&
73717 +                  (page_pos != page_cnt || best_page_pos == page_pos ||
73718 +                   best_page_pos == 1) &&
73719 +                  (page_pos >= page_cnt || best_page_pos == page_pos)) {
73720 +               u16 bppc = le16_to_cpu(best_page->page_count);
73722 +               saved_off += log->page_size;
73723 +               lsn_base = best_lsn;
73725 +               memmove(Add2Ptr(page_bufs, tails * log->page_size), best_page,
73726 +                       log->page_size);
73728 +               tails += 1;
73730 +               if (best_page_pos != bppc) {
73731 +                       page_cnt = bppc;
73732 +                       page_pos = best_page_pos;
73734 +                       if (page_cnt > 1)
73735 +                               page_pos += 1;
73736 +               } else {
73737 +                       page_pos = page_cnt = 1;
73738 +               }
73739 +       } else {
73740 +               ntfs_free(first_tail);
73741 +               ntfs_free(second_tail);
73742 +               goto tail_read;
73743 +       }
73745 +       ntfs_free(first_tail_prev);
73746 +       first_tail_prev = first_tail;
73747 +       final_off_prev = first_file_off;
73748 +       first_tail = NULL;
73750 +       ntfs_free(second_tail_prev);
73751 +       second_tail_prev = second_tail;
73752 +       second_off_prev = second_file_off;
73753 +       second_tail = NULL;
73755 +       final_off += log->page_size;
73756 +       second_off += log->page_size;
73758 +       if (tails < 0x10)
73759 +               goto next_tail;
73760 +tail_read:
73761 +       first_tail = first_tail_prev;
73762 +       final_off = final_off_prev;
73764 +       second_tail = second_tail_prev;
73765 +       second_off = second_off_prev;
73767 +       page_cnt = page_pos = 1;
73769 +       curpage_off = seq_base == log->seq_num ? min(log->next_page, page_off)
73770 +                                              : log->next_page;
73772 +       wrapped_file =
73773 +               curpage_off == log->first_page &&
73774 +               !(log->l_flags & (NTFSLOG_NO_LAST_LSN | NTFSLOG_REUSE_TAIL));
73776 +       expected_seq = wrapped_file ? (log->seq_num + 1) : log->seq_num;
73778 +       nextpage_off = curpage_off;
73780 +next_page:
73781 +       tail_page = NULL;
73782 +       /* Read the next log page */
73783 +       err = read_log_page(log, curpage_off, &page, &usa_error);
73785 +       /* Compute the next log page offset the file */
73786 +       nextpage_off = next_page_off(log, curpage_off);
73787 +       wrapped = nextpage_off == log->first_page;
73789 +       if (tails > 1) {
73790 +               struct RECORD_PAGE_HDR *cur_page =
73791 +                       Add2Ptr(page_bufs, curpage_off - page_off);
73793 +               if (curpage_off == saved_off) {
73794 +                       tail_page = cur_page;
73795 +                       goto use_tail_page;
73796 +               }
73798 +               if (page_off > curpage_off || curpage_off >= saved_off)
73799 +                       goto use_tail_page;
73801 +               if (page_off1)
73802 +                       goto use_cur_page;
73804 +               if (!err && !usa_error &&
73805 +                   page->rhdr.sign == NTFS_RCRD_SIGNATURE &&
73806 +                   cur_page->rhdr.lsn == page->rhdr.lsn &&
73807 +                   cur_page->record_hdr.next_record_off ==
73808 +                           page->record_hdr.next_record_off &&
73809 +                   ((page_pos == page_cnt &&
73810 +                     le16_to_cpu(page->page_pos) == 1) ||
73811 +                    (page_pos != page_cnt &&
73812 +                     le16_to_cpu(page->page_pos) == page_pos + 1 &&
73813 +                     le16_to_cpu(page->page_count) == page_cnt))) {
73814 +                       cur_page = NULL;
73815 +                       goto use_tail_page;
73816 +               }
73818 +               page_off1 = page_off;
73820 +use_cur_page:
73822 +               lsn_cur = le64_to_cpu(cur_page->rhdr.lsn);
73824 +               if (last_ok_lsn !=
73825 +                           le64_to_cpu(cur_page->record_hdr.last_end_lsn) &&
73826 +                   ((lsn_cur >> log->file_data_bits) +
73827 +                    ((curpage_off <
73828 +                      (lsn_to_vbo(log, lsn_cur) & ~log->page_mask))
73829 +                             ? 1
73830 +                             : 0)) != expected_seq) {
73831 +                       goto check_tail;
73832 +               }
73834 +               if (!is_log_record_end(cur_page)) {
73835 +                       tail_page = NULL;
73836 +                       last_ok_lsn = lsn_cur;
73837 +                       goto next_page_1;
73838 +               }
73840 +               log->seq_num = expected_seq;
73841 +               log->l_flags &= ~NTFSLOG_NO_LAST_LSN;
73842 +               log->last_lsn = le64_to_cpu(cur_page->record_hdr.last_end_lsn);
73843 +               log->ra->current_lsn = cur_page->record_hdr.last_end_lsn;
73845 +               if (log->record_header_len <=
73846 +                   log->page_size -
73847 +                           le16_to_cpu(cur_page->record_hdr.next_record_off)) {
73848 +                       log->l_flags |= NTFSLOG_REUSE_TAIL;
73849 +                       log->next_page = curpage_off;
73850 +               } else {
73851 +                       log->l_flags &= ~NTFSLOG_REUSE_TAIL;
73852 +                       log->next_page = nextpage_off;
73853 +               }
73855 +               if (wrapped_file)
73856 +                       log->l_flags |= NTFSLOG_WRAPPED;
73858 +               last_ok_lsn = le64_to_cpu(cur_page->record_hdr.last_end_lsn);
73859 +               goto next_page_1;
73860 +       }
73862 +       /*
73863 +        * If we are at the expected first page of a transfer check to see
73864 +        * if either tail copy is at this offset
73865 +        * If this page is the last page of a transfer, check if we wrote
73866 +        * a subsequent tail copy
73867 +        */
73868 +       if (page_cnt == page_pos || page_cnt == page_pos + 1) {
73869 +               /*
73870 +                * Check if the offset matches either the first or second
73871 +                * tail copy. It is possible it will match both
73872 +                */
73873 +               if (curpage_off == final_off)
73874 +                       tail_page = first_tail;
73876 +               /*
73877 +                * If we already matched on the first page then
73878 +                * check the ending lsn's.
73879 +                */
73880 +               if (curpage_off == second_off) {
73881 +                       if (!tail_page ||
73882 +                           (second_tail &&
73883 +                            le64_to_cpu(second_tail->record_hdr.last_end_lsn) >
73884 +                                    le64_to_cpu(first_tail->record_hdr
73885 +                                                        .last_end_lsn))) {
73886 +                               tail_page = second_tail;
73887 +                       }
73888 +               }
73889 +       }
73891 +use_tail_page:
73892 +       if (tail_page) {
73893 +               /* we have a candidate for a tail copy */
73894 +               lsn_cur = le64_to_cpu(tail_page->record_hdr.last_end_lsn);
73896 +               if (last_ok_lsn < lsn_cur) {
73897 +                       /*
73898 +                        * If the sequence number is not expected,
73899 +                        * then don't use the tail copy
73900 +                        */
73901 +                       if (expected_seq != (lsn_cur >> log->file_data_bits))
73902 +                               tail_page = NULL;
73903 +               } else if (last_ok_lsn > lsn_cur) {
73904 +                       /*
73905 +                        * If the last lsn is greater than the one on
73906 +                        * this page then forget this tail
73907 +                        */
73908 +                       tail_page = NULL;
73909 +               }
73910 +       }
73912 +       /* If we have an error on the current page, we will break of this loop */
73913 +       if (err || usa_error)
73914 +               goto check_tail;
73916 +       /*
73917 +        * Done if the last lsn on this page doesn't match the previous known
73918 +        * last lsn or the sequence number is not expected
73919 +        */
73920 +       lsn_cur = le64_to_cpu(page->rhdr.lsn);
73921 +       if (last_ok_lsn != lsn_cur &&
73922 +           expected_seq != (lsn_cur >> log->file_data_bits)) {
73923 +               goto check_tail;
73924 +       }
73926 +       /*
73927 +        * Check that the page position and page count values are correct
73928 +        * If this is the first page of a transfer the position must be 1
73929 +        * and the count will be unknown
73930 +        */
73931 +       if (page_cnt == page_pos) {
73932 +               if (page->page_pos != cpu_to_le16(1) &&
73933 +                   (!reuse_page || page->page_pos != page->page_count)) {
73934 +                       /*
73935 +                        * If the current page is the first page we are
73936 +                        * looking at and we are reusing this page then
73937 +                        * it can be either the first or last page of a
73938 +                        * transfer. Otherwise it can only be the first.
73939 +                        */
73940 +                       goto check_tail;
73941 +               }
73942 +       } else if (le16_to_cpu(page->page_count) != page_cnt ||
73943 +                  le16_to_cpu(page->page_pos) != page_pos + 1) {
73944 +               /*
73945 +                * The page position better be 1 more than the last page
73946 +                * position and the page count better match
73947 +                */
73948 +               goto check_tail;
73949 +       }
73951 +       /*
73952 +        * We have a valid page the file and may have a valid page
73953 +        * the tail copy area
73954 +        * If the tail page was written after the page the file then
73955 +        * break of the loop
73956 +        */
73957 +       if (tail_page &&
73958 +           le64_to_cpu(tail_page->record_hdr.last_end_lsn) > lsn_cur) {
73959 +               /* Remember if we will replace the page */
73960 +               replace_page = true;
73961 +               goto check_tail;
73962 +       }
73964 +       tail_page = NULL;
73966 +       if (is_log_record_end(page)) {
73967 +               /*
73968 +                * Since we have read this page we know the sequence number
73969 +                * is the same as our expected value
73970 +                */
73971 +               log->seq_num = expected_seq;
73972 +               log->last_lsn = le64_to_cpu(page->record_hdr.last_end_lsn);
73973 +               log->ra->current_lsn = page->record_hdr.last_end_lsn;
73974 +               log->l_flags &= ~NTFSLOG_NO_LAST_LSN;
73976 +               /*
73977 +                * If there is room on this page for another header then
73978 +                * remember we want to reuse the page
73979 +                */
73980 +               if (log->record_header_len <=
73981 +                   log->page_size -
73982 +                           le16_to_cpu(page->record_hdr.next_record_off)) {
73983 +                       log->l_flags |= NTFSLOG_REUSE_TAIL;
73984 +                       log->next_page = curpage_off;
73985 +               } else {
73986 +                       log->l_flags &= ~NTFSLOG_REUSE_TAIL;
73987 +                       log->next_page = nextpage_off;
73988 +               }
73990 +               /* Remember if we wrapped the log file */
73991 +               if (wrapped_file)
73992 +                       log->l_flags |= NTFSLOG_WRAPPED;
73993 +       }
73995 +       /*
73996 +        * Remember the last page count and position.
73997 +        * Also remember the last known lsn
73998 +        */
73999 +       page_cnt = le16_to_cpu(page->page_count);
74000 +       page_pos = le16_to_cpu(page->page_pos);
74001 +       last_ok_lsn = le64_to_cpu(page->rhdr.lsn);
74003 +next_page_1:
74005 +       if (wrapped) {
74006 +               expected_seq += 1;
74007 +               wrapped_file = 1;
74008 +       }
74010 +       curpage_off = nextpage_off;
74011 +       ntfs_free(page);
74012 +       page = NULL;
74013 +       reuse_page = 0;
74014 +       goto next_page;
74016 +check_tail:
74017 +       if (tail_page) {
74018 +               log->seq_num = expected_seq;
74019 +               log->last_lsn = le64_to_cpu(tail_page->record_hdr.last_end_lsn);
74020 +               log->ra->current_lsn = tail_page->record_hdr.last_end_lsn;
74021 +               log->l_flags &= ~NTFSLOG_NO_LAST_LSN;
74023 +               if (log->page_size -
74024 +                           le16_to_cpu(
74025 +                                   tail_page->record_hdr.next_record_off) >=
74026 +                   log->record_header_len) {
74027 +                       log->l_flags |= NTFSLOG_REUSE_TAIL;
74028 +                       log->next_page = curpage_off;
74029 +               } else {
74030 +                       log->l_flags &= ~NTFSLOG_REUSE_TAIL;
74031 +                       log->next_page = nextpage_off;
74032 +               }
74034 +               if (wrapped)
74035 +                       log->l_flags |= NTFSLOG_WRAPPED;
74036 +       }
74038 +       /* Remember that the partial IO will start at the next page */
74039 +       second_off = nextpage_off;
74041 +       /*
74042 +        * If the next page is the first page of the file then update
74043 +        * the sequence number for log records which begon the next page
74044 +        */
74045 +       if (wrapped)
74046 +               expected_seq += 1;
74048 +       /*
74049 +        * If we have a tail copy or are performing single page I/O we can
74050 +        * immediately look at the next page
74051 +        */
74052 +       if (replace_page || (log->ra->flags & RESTART_SINGLE_PAGE_IO)) {
74053 +               page_cnt = 2;
74054 +               page_pos = 1;
74055 +               goto check_valid;
74056 +       }
74058 +       if (page_pos != page_cnt)
74059 +               goto check_valid;
74060 +       /*
74061 +        * If the next page causes us to wrap to the beginning of the log
74062 +        * file then we know which page to check next.
74063 +        */
74064 +       if (wrapped) {
74065 +               page_cnt = 2;
74066 +               page_pos = 1;
74067 +               goto check_valid;
74068 +       }
74070 +       cur_pos = 2;
74072 +next_test_page:
74073 +       ntfs_free(tst_page);
74074 +       tst_page = NULL;
74076 +       /* Walk through the file, reading log pages */
74077 +       err = read_log_page(log, nextpage_off, &tst_page, &usa_error);
74079 +       /*
74080 +        * If we get a USA error then assume that we correctly found
74081 +        * the end of the original transfer
74082 +        */
74083 +       if (usa_error)
74084 +               goto file_is_valid;
74086 +       /*
74087 +        * If we were able to read the page, we examine it to see if it
74088 +        * is the same or different Io block
74089 +        */
74090 +       if (err)
74091 +               goto next_test_page_1;
74093 +       if (le16_to_cpu(tst_page->page_pos) == cur_pos &&
74094 +           check_subseq_log_page(log, tst_page, nextpage_off, expected_seq)) {
74095 +               page_cnt = le16_to_cpu(tst_page->page_count) + 1;
74096 +               page_pos = le16_to_cpu(tst_page->page_pos);
74097 +               goto check_valid;
74098 +       } else {
74099 +               goto file_is_valid;
74100 +       }
74102 +next_test_page_1:
74104 +       nextpage_off = next_page_off(log, curpage_off);
74105 +       wrapped = nextpage_off == log->first_page;
74107 +       if (wrapped) {
74108 +               expected_seq += 1;
74109 +               page_cnt = 2;
74110 +               page_pos = 1;
74111 +       }
74113 +       cur_pos += 1;
74114 +       part_io_count += 1;
74115 +       if (!wrapped)
74116 +               goto next_test_page;
74118 +check_valid:
74119 +       /* Skip over the remaining pages this transfer */
74120 +       remain_pages = page_cnt - page_pos - 1;
74121 +       part_io_count += remain_pages;
74123 +       while (remain_pages--) {
74124 +               nextpage_off = next_page_off(log, curpage_off);
74125 +               wrapped = nextpage_off == log->first_page;
74127 +               if (wrapped)
74128 +                       expected_seq += 1;
74129 +       }
74131 +       /* Call our routine to check this log page */
74132 +       ntfs_free(tst_page);
74133 +       tst_page = NULL;
74135 +       err = read_log_page(log, nextpage_off, &tst_page, &usa_error);
74136 +       if (!err && !usa_error &&
74137 +           check_subseq_log_page(log, tst_page, nextpage_off, expected_seq)) {
74138 +               err = -EINVAL;
74139 +               goto out;
74140 +       }
74142 +file_is_valid:
74144 +       /* We have a valid file */
74145 +       if (page_off1 || tail_page) {
74146 +               struct RECORD_PAGE_HDR *tmp_page;
74148 +               if (sb_rdonly(log->ni->mi.sbi->sb)) {
74149 +                       err = -EROFS;
74150 +                       goto out;
74151 +               }
74153 +               if (page_off1) {
74154 +                       tmp_page = Add2Ptr(page_bufs, page_off1 - page_off);
74155 +                       tails -= (page_off1 - page_off) / log->page_size;
74156 +                       if (!tail_page)
74157 +                               tails -= 1;
74158 +               } else {
74159 +                       tmp_page = tail_page;
74160 +                       tails = 1;
74161 +               }
74163 +               while (tails--) {
74164 +                       u64 off = hdr_file_off(log, tmp_page);
74166 +                       if (!page) {
74167 +                               page = ntfs_malloc(log->page_size);
74168 +                               if (!page)
74169 +                                       return -ENOMEM;
74170 +                       }
74172 +                       /*
74173 +                        * Correct page and copy the data from this page
74174 +                        * into it and flush it to disk
74175 +                        */
74176 +                       memcpy(page, tmp_page, log->page_size);
74178 +                       /* Fill last flushed lsn value flush the page */
74179 +                       if (log->major_ver < 2)
74180 +                               page->rhdr.lsn = page->record_hdr.last_end_lsn;
74181 +                       else
74182 +                               page->file_off = 0;
74184 +                       page->page_pos = page->page_count = cpu_to_le16(1);
74186 +                       ntfs_fix_pre_write(&page->rhdr, log->page_size);
74188 +                       err = ntfs_sb_write_run(log->ni->mi.sbi,
74189 +                                               &log->ni->file.run, off, page,
74190 +                                               log->page_size);
74192 +                       if (err)
74193 +                               goto out;
74195 +                       if (part_io_count && second_off == off) {
74196 +                               second_off += log->page_size;
74197 +                               part_io_count -= 1;
74198 +                       }
74200 +                       tmp_page = Add2Ptr(tmp_page, log->page_size);
74201 +               }
74202 +       }
74204 +       if (part_io_count) {
74205 +               if (sb_rdonly(log->ni->mi.sbi->sb)) {
74206 +                       err = -EROFS;
74207 +                       goto out;
74208 +               }
74209 +       }
74211 +out:
74212 +       ntfs_free(second_tail);
74213 +       ntfs_free(first_tail);
74214 +       ntfs_free(page);
74215 +       ntfs_free(tst_page);
74216 +       ntfs_free(page_bufs);
74218 +       return err;
74222 + * read_log_rec_buf
74223 + *
74224 + * copies a log record from the file to a buffer
74225 + * The log record may span several log pages and may even wrap the file
74226 + */
74227 +static int read_log_rec_buf(struct ntfs_log *log,
74228 +                           const struct LFS_RECORD_HDR *rh, void *buffer)
74230 +       int err;
74231 +       struct RECORD_PAGE_HDR *ph = NULL;
74232 +       u64 lsn = le64_to_cpu(rh->this_lsn);
74233 +       u32 vbo = lsn_to_vbo(log, lsn) & ~log->page_mask;
74234 +       u32 off = lsn_to_page_off(log, lsn) + log->record_header_len;
74235 +       u32 data_len = le32_to_cpu(rh->client_data_len);
74237 +       /*
74238 +        * While there are more bytes to transfer,
74239 +        * we continue to attempt to perform the read
74240 +        */
74241 +       for (;;) {
74242 +               bool usa_error;
74243 +               u32 tail = log->page_size - off;
74245 +               if (tail >= data_len)
74246 +                       tail = data_len;
74248 +               data_len -= tail;
74250 +               err = read_log_page(log, vbo, &ph, &usa_error);
74251 +               if (err)
74252 +                       goto out;
74254 +               /*
74255 +                * The last lsn on this page better be greater or equal
74256 +                * to the lsn we are copying
74257 +                */
74258 +               if (lsn > le64_to_cpu(ph->rhdr.lsn)) {
74259 +                       err = -EINVAL;
74260 +                       goto out;
74261 +               }
74263 +               memcpy(buffer, Add2Ptr(ph, off), tail);
74265 +               /* If there are no more bytes to transfer, we exit the loop */
74266 +               if (!data_len) {
74267 +                       if (!is_log_record_end(ph) ||
74268 +                           lsn > le64_to_cpu(ph->record_hdr.last_end_lsn)) {
74269 +                               err = -EINVAL;
74270 +                               goto out;
74271 +                       }
74272 +                       break;
74273 +               }
74275 +               if (ph->rhdr.lsn == ph->record_hdr.last_end_lsn ||
74276 +                   lsn > le64_to_cpu(ph->rhdr.lsn)) {
74277 +                       err = -EINVAL;
74278 +                       goto out;
74279 +               }
74281 +               vbo = next_page_off(log, vbo);
74282 +               off = log->data_off;
74284 +               /*
74285 +                * adjust our pointer the user's buffer to transfer
74286 +                * the next block to
74287 +                */
74288 +               buffer = Add2Ptr(buffer, tail);
74289 +       }
74291 +out:
74292 +       ntfs_free(ph);
74293 +       return err;
74296 +static int read_rst_area(struct ntfs_log *log, struct NTFS_RESTART **rst_,
74297 +                        u64 *lsn)
74299 +       int err;
74300 +       struct LFS_RECORD_HDR *rh = NULL;
74301 +       const struct CLIENT_REC *cr =
74302 +               Add2Ptr(log->ra, le16_to_cpu(log->ra->client_off));
74303 +       u64 lsnr, lsnc = le64_to_cpu(cr->restart_lsn);
74304 +       u32 len;
74305 +       struct NTFS_RESTART *rst;
74307 +       *lsn = 0;
74308 +       *rst_ = NULL;
74310 +       /* If the client doesn't have a restart area, go ahead and exit now */
74311 +       if (!lsnc)
74312 +               return 0;
74314 +       err = read_log_page(log, lsn_to_vbo(log, lsnc),
74315 +                           (struct RECORD_PAGE_HDR **)&rh, NULL);
74316 +       if (err)
74317 +               return err;
74319 +       rst = NULL;
74320 +       lsnr = le64_to_cpu(rh->this_lsn);
74322 +       if (lsnc != lsnr) {
74323 +               /* If the lsn values don't match, then the disk is corrupt */
74324 +               err = -EINVAL;
74325 +               goto out;
74326 +       }
74328 +       *lsn = lsnr;
74329 +       len = le32_to_cpu(rh->client_data_len);
74331 +       if (!len) {
74332 +               err = 0;
74333 +               goto out;
74334 +       }
74336 +       if (len < sizeof(struct NTFS_RESTART)) {
74337 +               err = -EINVAL;
74338 +               goto out;
74339 +       }
74341 +       rst = ntfs_malloc(len);
74342 +       if (!rst) {
74343 +               err = -ENOMEM;
74344 +               goto out;
74345 +       }
74347 +       /* Copy the data into the 'rst' buffer */
74348 +       err = read_log_rec_buf(log, rh, rst);
74349 +       if (err)
74350 +               goto out;
74352 +       *rst_ = rst;
74353 +       rst = NULL;
74355 +out:
74356 +       ntfs_free(rh);
74357 +       ntfs_free(rst);
74359 +       return err;
74362 +static int find_log_rec(struct ntfs_log *log, u64 lsn, struct lcb *lcb)
74364 +       int err;
74365 +       struct LFS_RECORD_HDR *rh = lcb->lrh;
74366 +       u32 rec_len, len;
74368 +       /* Read the record header for this lsn */
74369 +       if (!rh) {
74370 +               err = read_log_page(log, lsn_to_vbo(log, lsn),
74371 +                                   (struct RECORD_PAGE_HDR **)&rh, NULL);
74373 +               lcb->lrh = rh;
74374 +               if (err)
74375 +                       return err;
74376 +       }
74378 +       /*
74379 +        * If the lsn the log record doesn't match the desired
74380 +        * lsn then the disk is corrupt
74381 +        */
74382 +       if (lsn != le64_to_cpu(rh->this_lsn))
74383 +               return -EINVAL;
74385 +       len = le32_to_cpu(rh->client_data_len);
74387 +       /*
74388 +        * check that the length field isn't greater than the total
74389 +        * available space the log file
74390 +        */
74391 +       rec_len = len + log->record_header_len;
74392 +       if (rec_len >= log->total_avail)
74393 +               return -EINVAL;
74395 +       /*
74396 +        * If the entire log record is on this log page,
74397 +        * put a pointer to the log record the context block
74398 +        */
74399 +       if (rh->flags & LOG_RECORD_MULTI_PAGE) {
74400 +               void *lr = ntfs_malloc(len);
74402 +               if (!lr)
74403 +                       return -ENOMEM;
74405 +               lcb->log_rec = lr;
74406 +               lcb->alloc = true;
74408 +               /* Copy the data into the buffer returned */
74409 +               err = read_log_rec_buf(log, rh, lr);
74410 +               if (err)
74411 +                       return err;
74412 +       } else {
74413 +               /* If beyond the end of the current page -> an error */
74414 +               u32 page_off = lsn_to_page_off(log, lsn);
74416 +               if (page_off + len + log->record_header_len > log->page_size)
74417 +                       return -EINVAL;
74419 +               lcb->log_rec = Add2Ptr(rh, sizeof(struct LFS_RECORD_HDR));
74420 +               lcb->alloc = false;
74421 +       }
74423 +       return 0;
74427 + * read_log_rec_lcb
74428 + *
74429 + * initiates the query operation.
74430 + */
74431 +static int read_log_rec_lcb(struct ntfs_log *log, u64 lsn, u32 ctx_mode,
74432 +                           struct lcb **lcb_)
74434 +       int err;
74435 +       const struct CLIENT_REC *cr;
74436 +       struct lcb *lcb;
74438 +       switch (ctx_mode) {
74439 +       case lcb_ctx_undo_next:
74440 +       case lcb_ctx_prev:
74441 +       case lcb_ctx_next:
74442 +               break;
74443 +       default:
74444 +               return -EINVAL;
74445 +       }
74447 +       /* check that the given lsn is the legal range for this client */
74448 +       cr = Add2Ptr(log->ra, le16_to_cpu(log->ra->client_off));
74450 +       if (!verify_client_lsn(log, cr, lsn))
74451 +               return -EINVAL;
74453 +       lcb = ntfs_zalloc(sizeof(struct lcb));
74454 +       if (!lcb)
74455 +               return -ENOMEM;
74456 +       lcb->client = log->client_id;
74457 +       lcb->ctx_mode = ctx_mode;
74459 +       /* Find the log record indicated by the given lsn */
74460 +       err = find_log_rec(log, lsn, lcb);
74461 +       if (err)
74462 +               goto out;
74464 +       *lcb_ = lcb;
74465 +       return 0;
74467 +out:
74468 +       lcb_put(lcb);
74469 +       *lcb_ = NULL;
74470 +       return err;
74474 + * find_client_next_lsn
74475 + *
74476 + * attempt to find the next lsn to return to a client based on the context mode.
74477 + */
74478 +static int find_client_next_lsn(struct ntfs_log *log, struct lcb *lcb, u64 *lsn)
74480 +       int err;
74481 +       u64 next_lsn;
74482 +       struct LFS_RECORD_HDR *hdr;
74484 +       hdr = lcb->lrh;
74485 +       *lsn = 0;
74487 +       if (lcb_ctx_next != lcb->ctx_mode)
74488 +               goto check_undo_next;
74490 +       /* Loop as long as another lsn can be found */
74491 +       for (;;) {
74492 +               u64 current_lsn;
74494 +               err = next_log_lsn(log, hdr, &current_lsn);
74495 +               if (err)
74496 +                       goto out;
74498 +               if (!current_lsn)
74499 +                       break;
74501 +               if (hdr != lcb->lrh)
74502 +                       ntfs_free(hdr);
74504 +               hdr = NULL;
74505 +               err = read_log_page(log, lsn_to_vbo(log, current_lsn),
74506 +                                   (struct RECORD_PAGE_HDR **)&hdr, NULL);
74507 +               if (err)
74508 +                       goto out;
74510 +               if (memcmp(&hdr->client, &lcb->client,
74511 +                          sizeof(struct CLIENT_ID))) {
74512 +                       /*err = -EINVAL; */
74513 +               } else if (LfsClientRecord == hdr->record_type) {
74514 +                       ntfs_free(lcb->lrh);
74515 +                       lcb->lrh = hdr;
74516 +                       *lsn = current_lsn;
74517 +                       return 0;
74518 +               }
74519 +       }
74521 +out:
74522 +       if (hdr != lcb->lrh)
74523 +               ntfs_free(hdr);
74524 +       return err;
74526 +check_undo_next:
74527 +       if (lcb_ctx_undo_next == lcb->ctx_mode)
74528 +               next_lsn = le64_to_cpu(hdr->client_undo_next_lsn);
74529 +       else if (lcb_ctx_prev == lcb->ctx_mode)
74530 +               next_lsn = le64_to_cpu(hdr->client_prev_lsn);
74531 +       else
74532 +               return 0;
74534 +       if (!next_lsn)
74535 +               return 0;
74537 +       if (!verify_client_lsn(
74538 +                   log, Add2Ptr(log->ra, le16_to_cpu(log->ra->client_off)),
74539 +                   next_lsn))
74540 +               return 0;
74542 +       hdr = NULL;
74543 +       err = read_log_page(log, lsn_to_vbo(log, next_lsn),
74544 +                           (struct RECORD_PAGE_HDR **)&hdr, NULL);
74545 +       if (err)
74546 +               return err;
74547 +       ntfs_free(lcb->lrh);
74548 +       lcb->lrh = hdr;
74550 +       *lsn = next_lsn;
74552 +       return 0;
74555 +static int read_next_log_rec(struct ntfs_log *log, struct lcb *lcb, u64 *lsn)
74557 +       int err;
74559 +       err = find_client_next_lsn(log, lcb, lsn);
74560 +       if (err)
74561 +               return err;
74563 +       if (!*lsn)
74564 +               return 0;
74566 +       if (lcb->alloc)
74567 +               ntfs_free(lcb->log_rec);
74569 +       lcb->log_rec = NULL;
74570 +       lcb->alloc = false;
74571 +       ntfs_free(lcb->lrh);
74572 +       lcb->lrh = NULL;
74574 +       return find_log_rec(log, *lsn, lcb);
74577 +static inline bool check_index_header(const struct INDEX_HDR *hdr, size_t bytes)
74579 +       __le16 mask;
74580 +       u32 min_de, de_off, used, total;
74581 +       const struct NTFS_DE *e;
74583 +       if (hdr_has_subnode(hdr)) {
74584 +               min_de = sizeof(struct NTFS_DE) + sizeof(u64);
74585 +               mask = NTFS_IE_HAS_SUBNODES;
74586 +       } else {
74587 +               min_de = sizeof(struct NTFS_DE);
74588 +               mask = 0;
74589 +       }
74591 +       de_off = le32_to_cpu(hdr->de_off);
74592 +       used = le32_to_cpu(hdr->used);
74593 +       total = le32_to_cpu(hdr->total);
74595 +       if (de_off > bytes - min_de || used > bytes || total > bytes ||
74596 +           de_off + min_de > used || used > total) {
74597 +               return false;
74598 +       }
74600 +       e = Add2Ptr(hdr, de_off);
74601 +       for (;;) {
74602 +               u16 esize = le16_to_cpu(e->size);
74603 +               struct NTFS_DE *next = Add2Ptr(e, esize);
74605 +               if (esize < min_de || PtrOffset(hdr, next) > used ||
74606 +                   (e->flags & NTFS_IE_HAS_SUBNODES) != mask) {
74607 +                       return false;
74608 +               }
74610 +               if (de_is_last(e))
74611 +                       break;
74613 +               e = next;
74614 +       }
74616 +       return true;
74619 +static inline bool check_index_buffer(const struct INDEX_BUFFER *ib, u32 bytes)
74621 +       u16 fo;
74622 +       const struct NTFS_RECORD_HEADER *r = &ib->rhdr;
74624 +       if (r->sign != NTFS_INDX_SIGNATURE)
74625 +               return false;
74627 +       fo = (SECTOR_SIZE - ((bytes >> SECTOR_SHIFT) + 1) * sizeof(short));
74629 +       if (le16_to_cpu(r->fix_off) > fo)
74630 +               return false;
74632 +       if ((le16_to_cpu(r->fix_num) - 1) * SECTOR_SIZE != bytes)
74633 +               return false;
74635 +       return check_index_header(&ib->ihdr,
74636 +                                 bytes - offsetof(struct INDEX_BUFFER, ihdr));
74639 +static inline bool check_index_root(const struct ATTRIB *attr,
74640 +                                   struct ntfs_sb_info *sbi)
74642 +       bool ret;
74643 +       const struct INDEX_ROOT *root = resident_data(attr);
74644 +       u8 index_bits = le32_to_cpu(root->index_block_size) >= sbi->cluster_size
74645 +                               ? sbi->cluster_bits
74646 +                               : SECTOR_SHIFT;
74647 +       u8 block_clst = root->index_block_clst;
74649 +       if (le32_to_cpu(attr->res.data_size) < sizeof(struct INDEX_ROOT) ||
74650 +           (root->type != ATTR_NAME && root->type != ATTR_ZERO) ||
74651 +           (root->type == ATTR_NAME &&
74652 +            root->rule != NTFS_COLLATION_TYPE_FILENAME) ||
74653 +           (le32_to_cpu(root->index_block_size) !=
74654 +            (block_clst << index_bits)) ||
74655 +           (block_clst != 1 && block_clst != 2 && block_clst != 4 &&
74656 +            block_clst != 8 && block_clst != 0x10 && block_clst != 0x20 &&
74657 +            block_clst != 0x40 && block_clst != 0x80)) {
74658 +               return false;
74659 +       }
74661 +       ret = check_index_header(&root->ihdr,
74662 +                                le32_to_cpu(attr->res.data_size) -
74663 +                                        offsetof(struct INDEX_ROOT, ihdr));
74664 +       return ret;
74667 +static inline bool check_attr(const struct MFT_REC *rec,
74668 +                             const struct ATTRIB *attr,
74669 +                             struct ntfs_sb_info *sbi)
74671 +       u32 asize = le32_to_cpu(attr->size);
74672 +       u32 rsize = 0;
74673 +       u64 dsize, svcn, evcn;
74674 +       u16 run_off;
74676 +       /* Check the fixed part of the attribute record header */
74677 +       if (asize >= sbi->record_size ||
74678 +           asize + PtrOffset(rec, attr) >= sbi->record_size ||
74679 +           (attr->name_len &&
74680 +            le16_to_cpu(attr->name_off) + attr->name_len * sizeof(short) >
74681 +                    asize)) {
74682 +               return false;
74683 +       }
74685 +       /* Check the attribute fields */
74686 +       switch (attr->non_res) {
74687 +       case 0:
74688 +               rsize = le32_to_cpu(attr->res.data_size);
74689 +               if (rsize >= asize ||
74690 +                   le16_to_cpu(attr->res.data_off) + rsize > asize) {
74691 +                       return false;
74692 +               }
74693 +               break;
74695 +       case 1:
74696 +               dsize = le64_to_cpu(attr->nres.data_size);
74697 +               svcn = le64_to_cpu(attr->nres.svcn);
74698 +               evcn = le64_to_cpu(attr->nres.evcn);
74699 +               run_off = le16_to_cpu(attr->nres.run_off);
74701 +               if (svcn > evcn + 1 || run_off >= asize ||
74702 +                   le64_to_cpu(attr->nres.valid_size) > dsize ||
74703 +                   dsize > le64_to_cpu(attr->nres.alloc_size)) {
74704 +                       return false;
74705 +               }
74707 +               if (run_unpack(NULL, sbi, 0, svcn, evcn, svcn,
74708 +                              Add2Ptr(attr, run_off), asize - run_off) < 0) {
74709 +                       return false;
74710 +               }
74712 +               return true;
74714 +       default:
74715 +               return false;
74716 +       }
74718 +       switch (attr->type) {
74719 +       case ATTR_NAME:
74720 +               if (fname_full_size(Add2Ptr(
74721 +                           attr, le16_to_cpu(attr->res.data_off))) > asize) {
74722 +                       return false;
74723 +               }
74724 +               break;
74726 +       case ATTR_ROOT:
74727 +               return check_index_root(attr, sbi);
74729 +       case ATTR_STD:
74730 +               if (rsize < sizeof(struct ATTR_STD_INFO5) &&
74731 +                   rsize != sizeof(struct ATTR_STD_INFO)) {
74732 +                       return false;
74733 +               }
74734 +               break;
74736 +       case ATTR_LIST:
74737 +       case ATTR_ID:
74738 +       case ATTR_SECURE:
74739 +       case ATTR_LABEL:
74740 +       case ATTR_VOL_INFO:
74741 +       case ATTR_DATA:
74742 +       case ATTR_ALLOC:
74743 +       case ATTR_BITMAP:
74744 +       case ATTR_REPARSE:
74745 +       case ATTR_EA_INFO:
74746 +       case ATTR_EA:
74747 +       case ATTR_PROPERTYSET:
74748 +       case ATTR_LOGGED_UTILITY_STREAM:
74749 +               break;
74751 +       default:
74752 +               return false;
74753 +       }
74755 +       return true;
74758 +static inline bool check_file_record(const struct MFT_REC *rec,
74759 +                                    const struct MFT_REC *rec2,
74760 +                                    struct ntfs_sb_info *sbi)
74762 +       const struct ATTRIB *attr;
74763 +       u16 fo = le16_to_cpu(rec->rhdr.fix_off);
74764 +       u16 fn = le16_to_cpu(rec->rhdr.fix_num);
74765 +       u16 ao = le16_to_cpu(rec->attr_off);
74766 +       u32 rs = sbi->record_size;
74768 +       /* check the file record header for consistency */
74769 +       if (rec->rhdr.sign != NTFS_FILE_SIGNATURE ||
74770 +           fo > (SECTOR_SIZE - ((rs >> SECTOR_SHIFT) + 1) * sizeof(short)) ||
74771 +           (fn - 1) * SECTOR_SIZE != rs || ao < MFTRECORD_FIXUP_OFFSET_1 ||
74772 +           ao > sbi->record_size - SIZEOF_RESIDENT || !is_rec_inuse(rec) ||
74773 +           le32_to_cpu(rec->total) != rs) {
74774 +               return false;
74775 +       }
74777 +       /* Loop to check all of the attributes */
74778 +       for (attr = Add2Ptr(rec, ao); attr->type != ATTR_END;
74779 +            attr = Add2Ptr(attr, le32_to_cpu(attr->size))) {
74780 +               if (check_attr(rec, attr, sbi))
74781 +                       continue;
74782 +               return false;
74783 +       }
74785 +       return true;
74788 +static inline int check_lsn(const struct NTFS_RECORD_HEADER *hdr,
74789 +                           const u64 *rlsn)
74791 +       u64 lsn;
74793 +       if (!rlsn)
74794 +               return true;
74796 +       lsn = le64_to_cpu(hdr->lsn);
74798 +       if (hdr->sign == NTFS_HOLE_SIGNATURE)
74799 +               return false;
74801 +       if (*rlsn > lsn)
74802 +               return true;
74804 +       return false;
74807 +static inline bool check_if_attr(const struct MFT_REC *rec,
74808 +                                const struct LOG_REC_HDR *lrh)
74810 +       u16 ro = le16_to_cpu(lrh->record_off);
74811 +       u16 o = le16_to_cpu(rec->attr_off);
74812 +       const struct ATTRIB *attr = Add2Ptr(rec, o);
74814 +       while (o < ro) {
74815 +               u32 asize;
74817 +               if (attr->type == ATTR_END)
74818 +                       break;
74820 +               asize = le32_to_cpu(attr->size);
74821 +               if (!asize)
74822 +                       break;
74824 +               o += asize;
74825 +               attr = Add2Ptr(attr, asize);
74826 +       }
74828 +       return o == ro;
74831 +static inline bool check_if_index_root(const struct MFT_REC *rec,
74832 +                                      const struct LOG_REC_HDR *lrh)
74834 +       u16 ro = le16_to_cpu(lrh->record_off);
74835 +       u16 o = le16_to_cpu(rec->attr_off);
74836 +       const struct ATTRIB *attr = Add2Ptr(rec, o);
74838 +       while (o < ro) {
74839 +               u32 asize;
74841 +               if (attr->type == ATTR_END)
74842 +                       break;
74844 +               asize = le32_to_cpu(attr->size);
74845 +               if (!asize)
74846 +                       break;
74848 +               o += asize;
74849 +               attr = Add2Ptr(attr, asize);
74850 +       }
74852 +       return o == ro && attr->type == ATTR_ROOT;
74855 +static inline bool check_if_root_index(const struct ATTRIB *attr,
74856 +                                      const struct INDEX_HDR *hdr,
74857 +                                      const struct LOG_REC_HDR *lrh)
74859 +       u16 ao = le16_to_cpu(lrh->attr_off);
74860 +       u32 de_off = le32_to_cpu(hdr->de_off);
74861 +       u32 o = PtrOffset(attr, hdr) + de_off;
74862 +       const struct NTFS_DE *e = Add2Ptr(hdr, de_off);
74863 +       u32 asize = le32_to_cpu(attr->size);
74865 +       while (o < ao) {
74866 +               u16 esize;
74868 +               if (o >= asize)
74869 +                       break;
74871 +               esize = le16_to_cpu(e->size);
74872 +               if (!esize)
74873 +                       break;
74875 +               o += esize;
74876 +               e = Add2Ptr(e, esize);
74877 +       }
74879 +       return o == ao;
74882 +static inline bool check_if_alloc_index(const struct INDEX_HDR *hdr,
74883 +                                       u32 attr_off)
74885 +       u32 de_off = le32_to_cpu(hdr->de_off);
74886 +       u32 o = offsetof(struct INDEX_BUFFER, ihdr) + de_off;
74887 +       const struct NTFS_DE *e = Add2Ptr(hdr, de_off);
74888 +       u32 used = le32_to_cpu(hdr->used);
74890 +       while (o < attr_off) {
74891 +               u16 esize;
74893 +               if (de_off >= used)
74894 +                       break;
74896 +               esize = le16_to_cpu(e->size);
74897 +               if (!esize)
74898 +                       break;
74900 +               o += esize;
74901 +               de_off += esize;
74902 +               e = Add2Ptr(e, esize);
74903 +       }
74905 +       return o == attr_off;
74908 +static inline void change_attr_size(struct MFT_REC *rec, struct ATTRIB *attr,
74909 +                                   u32 nsize)
74911 +       u32 asize = le32_to_cpu(attr->size);
74912 +       int dsize = nsize - asize;
74913 +       u8 *next = Add2Ptr(attr, asize);
74914 +       u32 used = le32_to_cpu(rec->used);
74916 +       memmove(Add2Ptr(attr, nsize), next, used - PtrOffset(rec, next));
74918 +       rec->used = cpu_to_le32(used + dsize);
74919 +       attr->size = cpu_to_le32(nsize);
74922 +struct OpenAttr {
74923 +       struct ATTRIB *attr;
74924 +       struct runs_tree *run1;
74925 +       struct runs_tree run0;
74926 +       struct ntfs_inode *ni;
74927 +       // CLST rno;
74930 +/* Returns 0 if 'attr' has the same type and name */
74931 +static inline int cmp_type_and_name(const struct ATTRIB *a1,
74932 +                                   const struct ATTRIB *a2)
74934 +       return a1->type != a2->type || a1->name_len != a2->name_len ||
74935 +              (a1->name_len && memcmp(attr_name(a1), attr_name(a2),
74936 +                                      a1->name_len * sizeof(short)));
74939 +static struct OpenAttr *find_loaded_attr(struct ntfs_log *log,
74940 +                                        const struct ATTRIB *attr, CLST rno)
74942 +       struct OPEN_ATTR_ENRTY *oe = NULL;
74944 +       while ((oe = enum_rstbl(log->open_attr_tbl, oe))) {
74945 +               struct OpenAttr *op_attr;
74947 +               if (ino_get(&oe->ref) != rno)
74948 +                       continue;
74950 +               op_attr = (struct OpenAttr *)oe->ptr;
74951 +               if (!cmp_type_and_name(op_attr->attr, attr))
74952 +                       return op_attr;
74953 +       }
74954 +       return NULL;
74957 +static struct ATTRIB *attr_create_nonres_log(struct ntfs_sb_info *sbi,
74958 +                                            enum ATTR_TYPE type, u64 size,
74959 +                                            const u16 *name, size_t name_len,
74960 +                                            __le16 flags)
74962 +       struct ATTRIB *attr;
74963 +       u32 name_size = QuadAlign(name_len * sizeof(short));
74964 +       bool is_ext = flags & (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED);
74965 +       u32 asize = name_size +
74966 +                   (is_ext ? SIZEOF_NONRESIDENT_EX : SIZEOF_NONRESIDENT);
74968 +       attr = ntfs_zalloc(asize);
74969 +       if (!attr)
74970 +               return NULL;
74972 +       attr->type = type;
74973 +       attr->size = cpu_to_le32(asize);
74974 +       attr->flags = flags;
74975 +       attr->non_res = 1;
74976 +       attr->name_len = name_len;
74978 +       attr->nres.evcn = cpu_to_le64((u64)bytes_to_cluster(sbi, size) - 1);
74979 +       attr->nres.alloc_size = cpu_to_le64(ntfs_up_cluster(sbi, size));
74980 +       attr->nres.data_size = cpu_to_le64(size);
74981 +       attr->nres.valid_size = attr->nres.data_size;
74982 +       if (is_ext) {
74983 +               attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
74984 +               if (is_attr_compressed(attr))
74985 +                       attr->nres.c_unit = COMPRESSION_UNIT;
74987 +               attr->nres.run_off =
74988 +                       cpu_to_le16(SIZEOF_NONRESIDENT_EX + name_size);
74989 +               memcpy(Add2Ptr(attr, SIZEOF_NONRESIDENT_EX), name,
74990 +                      name_len * sizeof(short));
74991 +       } else {
74992 +               attr->name_off = SIZEOF_NONRESIDENT_LE;
74993 +               attr->nres.run_off =
74994 +                       cpu_to_le16(SIZEOF_NONRESIDENT + name_size);
74995 +               memcpy(Add2Ptr(attr, SIZEOF_NONRESIDENT), name,
74996 +                      name_len * sizeof(short));
74997 +       }
74999 +       return attr;
75003 + * do_action
75004 + *
75005 + * common routine for the Redo and Undo Passes
75006 + * If rlsn is NULL then undo
75007 + */
75008 +static int do_action(struct ntfs_log *log, struct OPEN_ATTR_ENRTY *oe,
75009 +                    const struct LOG_REC_HDR *lrh, u32 op, void *data,
75010 +                    u32 dlen, u32 rec_len, const u64 *rlsn)
75012 +       int err = 0;
75013 +       struct ntfs_sb_info *sbi = log->ni->mi.sbi;
75014 +       struct inode *inode = NULL, *inode_parent;
75015 +       struct mft_inode *mi = NULL, *mi2_child = NULL;
75016 +       CLST rno = 0, rno_base = 0;
75017 +       struct INDEX_BUFFER *ib = NULL;
75018 +       struct MFT_REC *rec = NULL;
75019 +       struct ATTRIB *attr = NULL, *attr2;
75020 +       struct INDEX_HDR *hdr;
75021 +       struct INDEX_ROOT *root;
75022 +       struct NTFS_DE *e, *e1, *e2;
75023 +       struct NEW_ATTRIBUTE_SIZES *new_sz;
75024 +       struct ATTR_FILE_NAME *fname;
75025 +       struct OpenAttr *oa, *oa2;
75026 +       u32 nsize, t32, asize, used, esize, bmp_off, bmp_bits;
75027 +       u16 id, id2;
75028 +       u32 record_size = sbi->record_size;
75029 +       u64 t64;
75030 +       u16 roff = le16_to_cpu(lrh->record_off);
75031 +       u16 aoff = le16_to_cpu(lrh->attr_off);
75032 +       u64 lco = 0;
75033 +       u64 cbo = (u64)le16_to_cpu(lrh->cluster_off) << SECTOR_SHIFT;
75034 +       u64 tvo = le64_to_cpu(lrh->target_vcn) << sbi->cluster_bits;
75035 +       u64 vbo = cbo + tvo;
75036 +       void *buffer_le = NULL;
75037 +       u32 bytes = 0;
75038 +       bool a_dirty = false;
75039 +       u16 data_off;
75041 +       oa = oe->ptr;
75043 +       /* Big switch to prepare */
75044 +       switch (op) {
75045 +       /* ============================================================
75046 +        * Process MFT records, as described by the current log record
75047 +        * ============================================================
75048 +        */
75049 +       case InitializeFileRecordSegment:
75050 +       case DeallocateFileRecordSegment:
75051 +       case WriteEndOfFileRecordSegment:
75052 +       case CreateAttribute:
75053 +       case DeleteAttribute:
75054 +       case UpdateResidentValue:
75055 +       case UpdateMappingPairs:
75056 +       case SetNewAttributeSizes:
75057 +       case AddIndexEntryRoot:
75058 +       case DeleteIndexEntryRoot:
75059 +       case SetIndexEntryVcnRoot:
75060 +       case UpdateFileNameRoot:
75061 +       case UpdateRecordDataRoot:
75062 +       case ZeroEndOfFileRecord:
75063 +               rno = vbo >> sbi->record_bits;
75064 +               inode = ilookup(sbi->sb, rno);
75065 +               if (inode) {
75066 +                       mi = &ntfs_i(inode)->mi;
75067 +               } else if (op == InitializeFileRecordSegment) {
75068 +                       mi = ntfs_zalloc(sizeof(struct mft_inode));
75069 +                       if (!mi)
75070 +                               return -ENOMEM;
75071 +                       err = mi_format_new(mi, sbi, rno, 0, false);
75072 +                       if (err)
75073 +                               goto out;
75074 +               } else {
75075 +                       /* read from disk */
75076 +                       err = mi_get(sbi, rno, &mi);
75077 +                       if (err)
75078 +                               return err;
75079 +               }
75080 +               rec = mi->mrec;
75082 +               if (op == DeallocateFileRecordSegment)
75083 +                       goto skip_load_parent;
75085 +               if (InitializeFileRecordSegment != op) {
75086 +                       if (rec->rhdr.sign == NTFS_BAAD_SIGNATURE)
75087 +                               goto dirty_vol;
75088 +                       if (!check_lsn(&rec->rhdr, rlsn))
75089 +                               goto out;
75090 +                       if (!check_file_record(rec, NULL, sbi))
75091 +                               goto dirty_vol;
75092 +                       attr = Add2Ptr(rec, roff);
75093 +               }
75095 +               if (is_rec_base(rec) || InitializeFileRecordSegment == op) {
75096 +                       rno_base = rno;
75097 +                       goto skip_load_parent;
75098 +               }
75100 +               rno_base = ino_get(&rec->parent_ref);
75101 +               inode_parent = ntfs_iget5(sbi->sb, &rec->parent_ref, NULL);
75102 +               if (IS_ERR(inode_parent))
75103 +                       goto skip_load_parent;
75105 +               if (is_bad_inode(inode_parent)) {
75106 +                       iput(inode_parent);
75107 +                       goto skip_load_parent;
75108 +               }
75110 +               if (ni_load_mi_ex(ntfs_i(inode_parent), rno, &mi2_child)) {
75111 +                       iput(inode_parent);
75112 +               } else {
75113 +                       if (mi2_child->mrec != mi->mrec)
75114 +                               memcpy(mi2_child->mrec, mi->mrec,
75115 +                                      sbi->record_size);
75117 +                       if (inode)
75118 +                               iput(inode);
75119 +                       else if (mi)
75120 +                               mi_put(mi);
75122 +                       inode = inode_parent;
75123 +                       mi = mi2_child;
75124 +                       rec = mi2_child->mrec;
75125 +                       attr = Add2Ptr(rec, roff);
75126 +               }
75128 +skip_load_parent:
75129 +               inode_parent = NULL;
75130 +               break;
75132 +       /* ============================================================
75133 +        * Process attributes, as described by the current log record
75134 +        * ============================================================
75135 +        */
75136 +       case UpdateNonresidentValue:
75137 +       case AddIndexEntryAllocation:
75138 +       case DeleteIndexEntryAllocation:
75139 +       case WriteEndOfIndexBuffer:
75140 +       case SetIndexEntryVcnAllocation:
75141 +       case UpdateFileNameAllocation:
75142 +       case SetBitsInNonresidentBitMap:
75143 +       case ClearBitsInNonresidentBitMap:
75144 +       case UpdateRecordDataAllocation:
75145 +               attr = oa->attr;
75146 +               bytes = UpdateNonresidentValue == op ? dlen : 0;
75147 +               lco = (u64)le16_to_cpu(lrh->lcns_follow) << sbi->cluster_bits;
75149 +               if (attr->type == ATTR_ALLOC) {
75150 +                       t32 = le32_to_cpu(oe->bytes_per_index);
75151 +                       if (bytes < t32)
75152 +                               bytes = t32;
75153 +               }
75155 +               if (!bytes)
75156 +                       bytes = lco - cbo;
75158 +               bytes += roff;
75159 +               if (attr->type == ATTR_ALLOC)
75160 +                       bytes = (bytes + 511) & ~511; // align
75162 +               buffer_le = ntfs_malloc(bytes);
75163 +               if (!buffer_le)
75164 +                       return -ENOMEM;
75166 +               err = ntfs_read_run_nb(sbi, oa->run1, vbo, buffer_le, bytes,
75167 +                                      NULL);
75168 +               if (err)
75169 +                       goto out;
75171 +               if (attr->type == ATTR_ALLOC && *(int *)buffer_le)
75172 +                       ntfs_fix_post_read(buffer_le, bytes, false);
75173 +               break;
75175 +       default:
75176 +               WARN_ON(1);
75177 +       }
75179 +       /* Big switch to do operation */
75180 +       switch (op) {
75181 +       case InitializeFileRecordSegment:
75182 +               if (roff + dlen > record_size)
75183 +                       goto dirty_vol;
75185 +               memcpy(Add2Ptr(rec, roff), data, dlen);
75186 +               mi->dirty = true;
75187 +               break;
75189 +       case DeallocateFileRecordSegment:
75190 +               clear_rec_inuse(rec);
75191 +               le16_add_cpu(&rec->seq, 1);
75192 +               mi->dirty = true;
75193 +               break;
75195 +       case WriteEndOfFileRecordSegment:
75196 +               attr2 = (struct ATTRIB *)data;
75197 +               if (!check_if_attr(rec, lrh) || roff + dlen > record_size)
75198 +                       goto dirty_vol;
75200 +               memmove(attr, attr2, dlen);
75201 +               rec->used = cpu_to_le32(QuadAlign(roff + dlen));
75203 +               mi->dirty = true;
75204 +               break;
75206 +       case CreateAttribute:
75207 +               attr2 = (struct ATTRIB *)data;
75208 +               asize = le32_to_cpu(attr2->size);
75209 +               used = le32_to_cpu(rec->used);
75211 +               if (!check_if_attr(rec, lrh) || dlen < SIZEOF_RESIDENT ||
75212 +                   !IsQuadAligned(asize) ||
75213 +                   Add2Ptr(attr2, asize) > Add2Ptr(lrh, rec_len) ||
75214 +                   dlen > record_size - used) {
75215 +                       goto dirty_vol;
75216 +               }
75218 +               memmove(Add2Ptr(attr, asize), attr, used - roff);
75219 +               memcpy(attr, attr2, asize);
75221 +               rec->used = cpu_to_le32(used + asize);
75222 +               id = le16_to_cpu(rec->next_attr_id);
75223 +               id2 = le16_to_cpu(attr2->id);
75224 +               if (id <= id2)
75225 +                       rec->next_attr_id = cpu_to_le16(id2 + 1);
75226 +               if (is_attr_indexed(attr))
75227 +                       le16_add_cpu(&rec->hard_links, 1);
75229 +               oa2 = find_loaded_attr(log, attr, rno_base);
75230 +               if (oa2) {
75231 +                       void *p2 = ntfs_memdup(attr, le32_to_cpu(attr->size));
75233 +                       if (p2) {
75234 +                               // run_close(oa2->run1);
75235 +                               ntfs_free(oa2->attr);
75236 +                               oa2->attr = p2;
75237 +                       }
75238 +               }
75240 +               mi->dirty = true;
75241 +               break;
75243 +       case DeleteAttribute:
75244 +               asize = le32_to_cpu(attr->size);
75245 +               used = le32_to_cpu(rec->used);
75247 +               if (!check_if_attr(rec, lrh))
75248 +                       goto dirty_vol;
75250 +               rec->used = cpu_to_le32(used - asize);
75251 +               if (is_attr_indexed(attr))
75252 +                       le16_add_cpu(&rec->hard_links, -1);
75254 +               memmove(attr, Add2Ptr(attr, asize), used - asize - roff);
75256 +               mi->dirty = true;
75257 +               break;
75259 +       case UpdateResidentValue:
75260 +               nsize = aoff + dlen;
75262 +               if (!check_if_attr(rec, lrh))
75263 +                       goto dirty_vol;
75265 +               asize = le32_to_cpu(attr->size);
75266 +               used = le32_to_cpu(rec->used);
75268 +               if (lrh->redo_len == lrh->undo_len) {
75269 +                       if (nsize > asize)
75270 +                               goto dirty_vol;
75271 +                       goto move_data;
75272 +               }
75274 +               if (nsize > asize && nsize - asize > record_size - used)
75275 +                       goto dirty_vol;
75277 +               nsize = QuadAlign(nsize);
75278 +               data_off = le16_to_cpu(attr->res.data_off);
75280 +               if (nsize < asize) {
75281 +                       memmove(Add2Ptr(attr, aoff), data, dlen);
75282 +                       data = NULL; // To skip below memmove
75283 +               }
75285 +               memmove(Add2Ptr(attr, nsize), Add2Ptr(attr, asize),
75286 +                       used - le16_to_cpu(lrh->record_off) - asize);
75288 +               rec->used = cpu_to_le32(used + nsize - asize);
75289 +               attr->size = cpu_to_le32(nsize);
75290 +               attr->res.data_size = cpu_to_le32(aoff + dlen - data_off);
75292 +move_data:
75293 +               if (data)
75294 +                       memmove(Add2Ptr(attr, aoff), data, dlen);
75296 +               oa2 = find_loaded_attr(log, attr, rno_base);
75297 +               if (oa2) {
75298 +                       void *p2 = ntfs_memdup(attr, le32_to_cpu(attr->size));
75300 +                       if (p2) {
75301 +                               // run_close(&oa2->run0);
75302 +                               oa2->run1 = &oa2->run0;
75303 +                               ntfs_free(oa2->attr);
75304 +                               oa2->attr = p2;
75305 +                       }
75306 +               }
75308 +               mi->dirty = true;
75309 +               break;
75311 +       case UpdateMappingPairs:
75312 +               nsize = aoff + dlen;
75313 +               asize = le32_to_cpu(attr->size);
75314 +               used = le32_to_cpu(rec->used);
75316 +               if (!check_if_attr(rec, lrh) || !attr->non_res ||
75317 +                   aoff < le16_to_cpu(attr->nres.run_off) || aoff > asize ||
75318 +                   (nsize > asize && nsize - asize > record_size - used)) {
75319 +                       goto dirty_vol;
75320 +               }
75322 +               nsize = QuadAlign(nsize);
75324 +               memmove(Add2Ptr(attr, nsize), Add2Ptr(attr, asize),
75325 +                       used - le16_to_cpu(lrh->record_off) - asize);
75326 +               rec->used = cpu_to_le32(used + nsize - asize);
75327 +               attr->size = cpu_to_le32(nsize);
75328 +               memmove(Add2Ptr(attr, aoff), data, dlen);
75330 +               if (run_get_highest_vcn(le64_to_cpu(attr->nres.svcn),
75331 +                                       attr_run(attr), &t64)) {
75332 +                       goto dirty_vol;
75333 +               }
75335 +               attr->nres.evcn = cpu_to_le64(t64);
75336 +               oa2 = find_loaded_attr(log, attr, rno_base);
75337 +               if (oa2 && oa2->attr->non_res)
75338 +                       oa2->attr->nres.evcn = attr->nres.evcn;
75340 +               mi->dirty = true;
75341 +               break;
75343 +       case SetNewAttributeSizes:
75344 +               new_sz = data;
75345 +               if (!check_if_attr(rec, lrh) || !attr->non_res)
75346 +                       goto dirty_vol;
75348 +               attr->nres.alloc_size = new_sz->alloc_size;
75349 +               attr->nres.data_size = new_sz->data_size;
75350 +               attr->nres.valid_size = new_sz->valid_size;
75352 +               if (dlen >= sizeof(struct NEW_ATTRIBUTE_SIZES))
75353 +                       attr->nres.total_size = new_sz->total_size;
75355 +               oa2 = find_loaded_attr(log, attr, rno_base);
75356 +               if (oa2) {
75357 +                       void *p2 = ntfs_memdup(attr, le32_to_cpu(attr->size));
75359 +                       if (p2) {
75360 +                               ntfs_free(oa2->attr);
75361 +                               oa2->attr = p2;
75362 +                       }
75363 +               }
75364 +               mi->dirty = true;
75365 +               break;
75367 +       case AddIndexEntryRoot:
75368 +               e = (struct NTFS_DE *)data;
75369 +               esize = le16_to_cpu(e->size);
75370 +               root = resident_data(attr);
75371 +               hdr = &root->ihdr;
75372 +               used = le32_to_cpu(hdr->used);
75374 +               if (!check_if_index_root(rec, lrh) ||
75375 +                   !check_if_root_index(attr, hdr, lrh) ||
75376 +                   Add2Ptr(data, esize) > Add2Ptr(lrh, rec_len) ||
75377 +                   esize > le32_to_cpu(rec->total) - le32_to_cpu(rec->used)) {
75378 +                       goto dirty_vol;
75379 +               }
75381 +               e1 = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
75383 +               change_attr_size(rec, attr, le32_to_cpu(attr->size) + esize);
75385 +               memmove(Add2Ptr(e1, esize), e1,
75386 +                       PtrOffset(e1, Add2Ptr(hdr, used)));
75387 +               memmove(e1, e, esize);
75389 +               le32_add_cpu(&attr->res.data_size, esize);
75390 +               hdr->used = cpu_to_le32(used + esize);
75391 +               le32_add_cpu(&hdr->total, esize);
75393 +               mi->dirty = true;
75394 +               break;
75396 +       case DeleteIndexEntryRoot:
75397 +               root = resident_data(attr);
75398 +               hdr = &root->ihdr;
75399 +               used = le32_to_cpu(hdr->used);
75401 +               if (!check_if_index_root(rec, lrh) ||
75402 +                   !check_if_root_index(attr, hdr, lrh)) {
75403 +                       goto dirty_vol;
75404 +               }
75406 +               e1 = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
75407 +               esize = le16_to_cpu(e1->size);
75408 +               e2 = Add2Ptr(e1, esize);
75410 +               memmove(e1, e2, PtrOffset(e2, Add2Ptr(hdr, used)));
75412 +               le32_sub_cpu(&attr->res.data_size, esize);
75413 +               hdr->used = cpu_to_le32(used - esize);
75414 +               le32_sub_cpu(&hdr->total, esize);
75416 +               change_attr_size(rec, attr, le32_to_cpu(attr->size) - esize);
75418 +               mi->dirty = true;
75419 +               break;
75421 +       case SetIndexEntryVcnRoot:
75422 +               root = resident_data(attr);
75423 +               hdr = &root->ihdr;
75425 +               if (!check_if_index_root(rec, lrh) ||
75426 +                   !check_if_root_index(attr, hdr, lrh)) {
75427 +                       goto dirty_vol;
75428 +               }
75430 +               e = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
75432 +               de_set_vbn_le(e, *(__le64 *)data);
75433 +               mi->dirty = true;
75434 +               break;
75436 +       case UpdateFileNameRoot:
75437 +               root = resident_data(attr);
75438 +               hdr = &root->ihdr;
75440 +               if (!check_if_index_root(rec, lrh) ||
75441 +                   !check_if_root_index(attr, hdr, lrh)) {
75442 +                       goto dirty_vol;
75443 +               }
75445 +               e = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
75446 +               fname = (struct ATTR_FILE_NAME *)(e + 1);
75447 +               memmove(&fname->dup, data, sizeof(fname->dup)); //
75448 +               mi->dirty = true;
75449 +               break;
75451 +       case UpdateRecordDataRoot:
75452 +               root = resident_data(attr);
75453 +               hdr = &root->ihdr;
75455 +               if (!check_if_index_root(rec, lrh) ||
75456 +                   !check_if_root_index(attr, hdr, lrh)) {
75457 +                       goto dirty_vol;
75458 +               }
75460 +               e = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
75462 +               memmove(Add2Ptr(e, le16_to_cpu(e->view.data_off)), data, dlen);
75464 +               mi->dirty = true;
75465 +               break;
75467 +       case ZeroEndOfFileRecord:
75468 +               if (roff + dlen > record_size)
75469 +                       goto dirty_vol;
75471 +               memset(attr, 0, dlen);
75472 +               mi->dirty = true;
75473 +               break;
75475 +       case UpdateNonresidentValue:
75476 +               if (lco < cbo + roff + dlen)
75477 +                       goto dirty_vol;
75479 +               memcpy(Add2Ptr(buffer_le, roff), data, dlen);
75481 +               a_dirty = true;
75482 +               if (attr->type == ATTR_ALLOC)
75483 +                       ntfs_fix_pre_write(buffer_le, bytes);
75484 +               break;
75486 +       case AddIndexEntryAllocation:
75487 +               ib = Add2Ptr(buffer_le, roff);
75488 +               hdr = &ib->ihdr;
75489 +               e = data;
75490 +               esize = le16_to_cpu(e->size);
75491 +               e1 = Add2Ptr(ib, aoff);
75493 +               if (is_baad(&ib->rhdr))
75494 +                       goto dirty_vol;
75495 +               if (!check_lsn(&ib->rhdr, rlsn))
75496 +                       goto out;
75498 +               used = le32_to_cpu(hdr->used);
75500 +               if (!check_index_buffer(ib, bytes) ||
75501 +                   !check_if_alloc_index(hdr, aoff) ||
75502 +                   Add2Ptr(e, esize) > Add2Ptr(lrh, rec_len) ||
75503 +                   used + esize > le32_to_cpu(hdr->total)) {
75504 +                       goto dirty_vol;
75505 +               }
75507 +               memmove(Add2Ptr(e1, esize), e1,
75508 +                       PtrOffset(e1, Add2Ptr(hdr, used)));
75509 +               memcpy(e1, e, esize);
75511 +               hdr->used = cpu_to_le32(used + esize);
75513 +               a_dirty = true;
75515 +               ntfs_fix_pre_write(&ib->rhdr, bytes);
75516 +               break;
75518 +       case DeleteIndexEntryAllocation:
75519 +               ib = Add2Ptr(buffer_le, roff);
75520 +               hdr = &ib->ihdr;
75521 +               e = Add2Ptr(ib, aoff);
75522 +               esize = le16_to_cpu(e->size);
75524 +               if (is_baad(&ib->rhdr))
75525 +                       goto dirty_vol;
75526 +               if (!check_lsn(&ib->rhdr, rlsn))
75527 +                       goto out;
75529 +               if (!check_index_buffer(ib, bytes) ||
75530 +                   !check_if_alloc_index(hdr, aoff)) {
75531 +                       goto dirty_vol;
75532 +               }
75534 +               e1 = Add2Ptr(e, esize);
75535 +               nsize = esize;
75536 +               used = le32_to_cpu(hdr->used);
75538 +               memmove(e, e1, PtrOffset(e1, Add2Ptr(hdr, used)));
75540 +               hdr->used = cpu_to_le32(used - nsize);
75542 +               a_dirty = true;
75544 +               ntfs_fix_pre_write(&ib->rhdr, bytes);
75545 +               break;
75547 +       case WriteEndOfIndexBuffer:
75548 +               ib = Add2Ptr(buffer_le, roff);
75549 +               hdr = &ib->ihdr;
75550 +               e = Add2Ptr(ib, aoff);
75552 +               if (is_baad(&ib->rhdr))
75553 +                       goto dirty_vol;
75554 +               if (!check_lsn(&ib->rhdr, rlsn))
75555 +                       goto out;
75556 +               if (!check_index_buffer(ib, bytes) ||
75557 +                   !check_if_alloc_index(hdr, aoff) ||
75558 +                   aoff + dlen > offsetof(struct INDEX_BUFFER, ihdr) +
75559 +                                         le32_to_cpu(hdr->total)) {
75560 +                       goto dirty_vol;
75561 +               }
75563 +               hdr->used = cpu_to_le32(dlen + PtrOffset(hdr, e));
75564 +               memmove(e, data, dlen);
75566 +               a_dirty = true;
75567 +               ntfs_fix_pre_write(&ib->rhdr, bytes);
75568 +               break;
75570 +       case SetIndexEntryVcnAllocation:
75571 +               ib = Add2Ptr(buffer_le, roff);
75572 +               hdr = &ib->ihdr;
75573 +               e = Add2Ptr(ib, aoff);
75575 +               if (is_baad(&ib->rhdr))
75576 +                       goto dirty_vol;
75578 +               if (!check_lsn(&ib->rhdr, rlsn))
75579 +                       goto out;
75580 +               if (!check_index_buffer(ib, bytes) ||
75581 +                   !check_if_alloc_index(hdr, aoff)) {
75582 +                       goto dirty_vol;
75583 +               }
75585 +               de_set_vbn_le(e, *(__le64 *)data);
75587 +               a_dirty = true;
75588 +               ntfs_fix_pre_write(&ib->rhdr, bytes);
75589 +               break;
75591 +       case UpdateFileNameAllocation:
75592 +               ib = Add2Ptr(buffer_le, roff);
75593 +               hdr = &ib->ihdr;
75594 +               e = Add2Ptr(ib, aoff);
75596 +               if (is_baad(&ib->rhdr))
75597 +                       goto dirty_vol;
75599 +               if (!check_lsn(&ib->rhdr, rlsn))
75600 +                       goto out;
75601 +               if (!check_index_buffer(ib, bytes) ||
75602 +                   !check_if_alloc_index(hdr, aoff)) {
75603 +                       goto dirty_vol;
75604 +               }
75606 +               fname = (struct ATTR_FILE_NAME *)(e + 1);
75607 +               memmove(&fname->dup, data, sizeof(fname->dup));
75609 +               a_dirty = true;
75610 +               ntfs_fix_pre_write(&ib->rhdr, bytes);
75611 +               break;
75613 +       case SetBitsInNonresidentBitMap:
75614 +               bmp_off =
75615 +                       le32_to_cpu(((struct BITMAP_RANGE *)data)->bitmap_off);
75616 +               bmp_bits = le32_to_cpu(((struct BITMAP_RANGE *)data)->bits);
75618 +               if (cbo + (bmp_off + 7) / 8 > lco ||
75619 +                   cbo + ((bmp_off + bmp_bits + 7) / 8) > lco) {
75620 +                       goto dirty_vol;
75621 +               }
75623 +               __bitmap_set(Add2Ptr(buffer_le, roff), bmp_off, bmp_bits);
75624 +               a_dirty = true;
75625 +               break;
75627 +       case ClearBitsInNonresidentBitMap:
75628 +               bmp_off =
75629 +                       le32_to_cpu(((struct BITMAP_RANGE *)data)->bitmap_off);
75630 +               bmp_bits = le32_to_cpu(((struct BITMAP_RANGE *)data)->bits);
75632 +               if (cbo + (bmp_off + 7) / 8 > lco ||
75633 +                   cbo + ((bmp_off + bmp_bits + 7) / 8) > lco) {
75634 +                       goto dirty_vol;
75635 +               }
75637 +               __bitmap_clear(Add2Ptr(buffer_le, roff), bmp_off, bmp_bits);
75638 +               a_dirty = true;
75639 +               break;
75641 +       case UpdateRecordDataAllocation:
75642 +               ib = Add2Ptr(buffer_le, roff);
75643 +               hdr = &ib->ihdr;
75644 +               e = Add2Ptr(ib, aoff);
75646 +               if (is_baad(&ib->rhdr))
75647 +                       goto dirty_vol;
75649 +               if (!check_lsn(&ib->rhdr, rlsn))
75650 +                       goto out;
75651 +               if (!check_index_buffer(ib, bytes) ||
75652 +                   !check_if_alloc_index(hdr, aoff)) {
75653 +                       goto dirty_vol;
75654 +               }
75656 +               memmove(Add2Ptr(e, le16_to_cpu(e->view.data_off)), data, dlen);
75658 +               a_dirty = true;
75659 +               ntfs_fix_pre_write(&ib->rhdr, bytes);
75660 +               break;
75662 +       default:
75663 +               WARN_ON(1);
75664 +       }
75666 +       if (rlsn) {
75667 +               __le64 t64 = cpu_to_le64(*rlsn);
75669 +               if (rec)
75670 +                       rec->rhdr.lsn = t64;
75671 +               if (ib)
75672 +                       ib->rhdr.lsn = t64;
75673 +       }
75675 +       if (mi && mi->dirty) {
75676 +               err = mi_write(mi, 0);
75677 +               if (err)
75678 +                       goto out;
75679 +       }
75681 +       if (a_dirty) {
75682 +               attr = oa->attr;
75683 +               err = ntfs_sb_write_run(sbi, oa->run1, vbo, buffer_le, bytes);
75684 +               if (err)
75685 +                       goto out;
75686 +       }
75688 +out:
75690 +       if (inode)
75691 +               iput(inode);
75692 +       else if (mi != mi2_child)
75693 +               mi_put(mi);
75695 +       ntfs_free(buffer_le);
75697 +       return err;
75699 +dirty_vol:
75700 +       log->set_dirty = true;
75701 +       goto out;
75705 + * log_replay
75706 + *
75707 + * this function is called during mount operation
75708 + * it replays log and empties it
75709 + * initialized is set false if logfile contains '-1'
75710 + */
75711 +int log_replay(struct ntfs_inode *ni, bool *initialized)
75713 +       int err;
75714 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
75715 +       struct ntfs_log *log;
75717 +       struct restart_info rst_info, rst_info2;
75718 +       u64 rec_lsn, ra_lsn, checkpt_lsn = 0, rlsn = 0;
75719 +       struct ATTR_NAME_ENTRY *attr_names = NULL;
75720 +       struct ATTR_NAME_ENTRY *ane;
75721 +       struct RESTART_TABLE *dptbl = NULL;
75722 +       struct RESTART_TABLE *trtbl = NULL;
75723 +       const struct RESTART_TABLE *rt;
75724 +       struct RESTART_TABLE *oatbl = NULL;
75725 +       struct inode *inode;
75726 +       struct OpenAttr *oa;
75727 +       struct ntfs_inode *ni_oe;
75728 +       struct ATTRIB *attr = NULL;
75729 +       u64 size, vcn, undo_next_lsn;
75730 +       CLST rno, lcn, lcn0, len0, clen;
75731 +       void *data;
75732 +       struct NTFS_RESTART *rst = NULL;
75733 +       struct lcb *lcb = NULL;
75734 +       struct OPEN_ATTR_ENRTY *oe;
75735 +       struct TRANSACTION_ENTRY *tr;
75736 +       struct DIR_PAGE_ENTRY *dp;
75737 +       u32 i, bytes_per_attr_entry;
75738 +       u32 l_size = ni->vfs_inode.i_size;
75739 +       u32 orig_file_size = l_size;
75740 +       u32 page_size, vbo, tail, off, dlen;
75741 +       u32 saved_len, rec_len, transact_id;
75742 +       bool use_second_page;
75743 +       struct RESTART_AREA *ra2, *ra = NULL;
75744 +       struct CLIENT_REC *ca, *cr;
75745 +       __le16 client;
75746 +       struct RESTART_HDR *rh;
75747 +       const struct LFS_RECORD_HDR *frh;
75748 +       const struct LOG_REC_HDR *lrh;
75749 +       bool is_mapped;
75750 +       bool is_ro = sb_rdonly(sbi->sb);
75751 +       u64 t64;
75752 +       u16 t16;
75753 +       u32 t32;
75755 +       /* Get the size of page. NOTE: To replay we can use default page */
75756 +#if PAGE_SIZE >= DefaultLogPageSize && PAGE_SIZE <= DefaultLogPageSize * 2
75757 +       page_size = norm_file_page(PAGE_SIZE, &l_size, true);
75758 +#else
75759 +       page_size = norm_file_page(PAGE_SIZE, &l_size, false);
75760 +#endif
75761 +       if (!page_size)
75762 +               return -EINVAL;
75764 +       log = ntfs_zalloc(sizeof(struct ntfs_log));
75765 +       if (!log)
75766 +               return -ENOMEM;
75768 +       log->ni = ni;
75769 +       log->l_size = l_size;
75770 +       log->one_page_buf = ntfs_malloc(page_size);
75772 +       if (!log->one_page_buf) {
75773 +               err = -ENOMEM;
75774 +               goto out;
75775 +       }
75777 +       log->page_size = page_size;
75778 +       log->page_mask = page_size - 1;
75779 +       log->page_bits = blksize_bits(page_size);
75781 +       /* Look for a restart area on the disk */
75782 +       err = log_read_rst(log, l_size, true, &rst_info);
75783 +       if (err)
75784 +               goto out;
75786 +       /* remember 'initialized' */
75787 +       *initialized = rst_info.initialized;
75789 +       if (!rst_info.restart) {
75790 +               if (rst_info.initialized) {
75791 +                       /* no restart area but the file is not initialized */
75792 +                       err = -EINVAL;
75793 +                       goto out;
75794 +               }
75796 +               log_init_pg_hdr(log, page_size, page_size, 1, 1);
75797 +               log_create(log, l_size, 0, get_random_int(), false, false);
75799 +               log->ra = ra;
75801 +               ra = log_create_ra(log);
75802 +               if (!ra) {
75803 +                       err = -ENOMEM;
75804 +                       goto out;
75805 +               }
75806 +               log->ra = ra;
75807 +               log->init_ra = true;
75809 +               goto process_log;
75810 +       }
75812 +       /*
75813 +        * If the restart offset above wasn't zero then we won't
75814 +        * look for a second restart
75815 +        */
75816 +       if (rst_info.vbo)
75817 +               goto check_restart_area;
75819 +       err = log_read_rst(log, l_size, false, &rst_info2);
75821 +       /* Determine which restart area to use */
75822 +       if (!rst_info2.restart || rst_info2.last_lsn <= rst_info.last_lsn)
75823 +               goto use_first_page;
75825 +       use_second_page = true;
75827 +       if (rst_info.chkdsk_was_run && page_size != rst_info.vbo) {
75828 +               struct RECORD_PAGE_HDR *sp = NULL;
75829 +               bool usa_error;
75831 +               if (!read_log_page(log, page_size, &sp, &usa_error) &&
75832 +                   sp->rhdr.sign == NTFS_CHKD_SIGNATURE) {
75833 +                       use_second_page = false;
75834 +               }
75835 +               ntfs_free(sp);
75836 +       }
75838 +       if (use_second_page) {
75839 +               ntfs_free(rst_info.r_page);
75840 +               memcpy(&rst_info, &rst_info2, sizeof(struct restart_info));
75841 +               rst_info2.r_page = NULL;
75842 +       }
75844 +use_first_page:
75845 +       ntfs_free(rst_info2.r_page);
75847 +check_restart_area:
75848 +       /* If the restart area is at offset 0, we want to write the second restart area first */
75849 +       log->init_ra = !!rst_info.vbo;
75851 +       /* If we have a valid page then grab a pointer to the restart area */
75852 +       ra2 = rst_info.valid_page
75853 +                     ? Add2Ptr(rst_info.r_page,
75854 +                               le16_to_cpu(rst_info.r_page->ra_off))
75855 +                     : NULL;
75857 +       if (rst_info.chkdsk_was_run ||
75858 +           (ra2 && ra2->client_idx[1] == LFS_NO_CLIENT_LE)) {
75859 +               bool wrapped = false;
75860 +               bool use_multi_page = false;
75861 +               u32 open_log_count;
75863 +               /* Do some checks based on whether we have a valid log page */
75864 +               if (!rst_info.valid_page) {
75865 +                       open_log_count = get_random_int();
75866 +                       goto init_log_instance;
75867 +               }
75868 +               open_log_count = le32_to_cpu(ra2->open_log_count);
75870 +               /*
75871 +                * If the restart page size isn't changing then we want to
75872 +                * check how much work we need to do
75873 +                */
75874 +               if (page_size != le32_to_cpu(rst_info.r_page->sys_page_size))
75875 +                       goto init_log_instance;
75877 +init_log_instance:
75878 +               log_init_pg_hdr(log, page_size, page_size, 1, 1);
75880 +               log_create(log, l_size, rst_info.last_lsn, open_log_count,
75881 +                          wrapped, use_multi_page);
75883 +               ra = log_create_ra(log);
75884 +               if (!ra) {
75885 +                       err = -ENOMEM;
75886 +                       goto out;
75887 +               }
75888 +               log->ra = ra;
75890 +               /* Put the restart areas and initialize the log file as required */
75891 +               goto process_log;
75892 +       }
75894 +       if (!ra2) {
75895 +               err = -EINVAL;
75896 +               goto out;
75897 +       }
75899 +       /*
75900 +        * If the log page or the system page sizes have changed, we can't use the log file
75901 +        * We must use the system page size instead of the default size
75902 +        * if there is not a clean shutdown
75903 +        */
75904 +       t32 = le32_to_cpu(rst_info.r_page->sys_page_size);
75905 +       if (page_size != t32) {
75906 +               l_size = orig_file_size;
75907 +               page_size =
75908 +                       norm_file_page(t32, &l_size, t32 == DefaultLogPageSize);
75909 +       }
75911 +       if (page_size != t32 ||
75912 +           page_size != le32_to_cpu(rst_info.r_page->page_size)) {
75913 +               err = -EINVAL;
75914 +               goto out;
75915 +       }
75917 +       /* If the file size has shrunk then we won't mount it */
75918 +       if (l_size < le64_to_cpu(ra2->l_size)) {
75919 +               err = -EINVAL;
75920 +               goto out;
75921 +       }
75923 +       log_init_pg_hdr(log, page_size, page_size,
75924 +                       le16_to_cpu(rst_info.r_page->major_ver),
75925 +                       le16_to_cpu(rst_info.r_page->minor_ver));
75927 +       log->l_size = le64_to_cpu(ra2->l_size);
75928 +       log->seq_num_bits = le32_to_cpu(ra2->seq_num_bits);
75929 +       log->file_data_bits = sizeof(u64) * 8 - log->seq_num_bits;
75930 +       log->seq_num_mask = (8 << log->file_data_bits) - 1;
75931 +       log->last_lsn = le64_to_cpu(ra2->current_lsn);
75932 +       log->seq_num = log->last_lsn >> log->file_data_bits;
75933 +       log->ra_off = le16_to_cpu(rst_info.r_page->ra_off);
75934 +       log->restart_size = log->sys_page_size - log->ra_off;
75935 +       log->record_header_len = le16_to_cpu(ra2->rec_hdr_len);
75936 +       log->ra_size = le16_to_cpu(ra2->ra_len);
75937 +       log->data_off = le16_to_cpu(ra2->data_off);
75938 +       log->data_size = log->page_size - log->data_off;
75939 +       log->reserved = log->data_size - log->record_header_len;
75941 +       vbo = lsn_to_vbo(log, log->last_lsn);
75943 +       if (vbo < log->first_page) {
75944 +               /* This is a pseudo lsn */
75945 +               log->l_flags |= NTFSLOG_NO_LAST_LSN;
75946 +               log->next_page = log->first_page;
75947 +               goto find_oldest;
75948 +       }
75950 +       /* Find the end of this log record */
75951 +       off = final_log_off(log, log->last_lsn,
75952 +                           le32_to_cpu(ra2->last_lsn_data_len));
75954 +       /* If we wrapped the file then increment the sequence number */
75955 +       if (off <= vbo) {
75956 +               log->seq_num += 1;
75957 +               log->l_flags |= NTFSLOG_WRAPPED;
75958 +       }
75960 +       /* Now compute the next log page to use */
75961 +       vbo &= ~log->sys_page_mask;
75962 +       tail = log->page_size - (off & log->page_mask) - 1;
75964 +       /* If we can fit another log record on the page, move back a page the log file */
75965 +       if (tail >= log->record_header_len) {
75966 +               log->l_flags |= NTFSLOG_REUSE_TAIL;
75967 +               log->next_page = vbo;
75968 +       } else {
75969 +               log->next_page = next_page_off(log, vbo);
75970 +       }
75972 +find_oldest:
75973 +       /* Find the oldest client lsn. Use the last flushed lsn as a starting point */
75974 +       log->oldest_lsn = log->last_lsn;
75975 +       oldest_client_lsn(Add2Ptr(ra2, le16_to_cpu(ra2->client_off)),
75976 +                         ra2->client_idx[1], &log->oldest_lsn);
75977 +       log->oldest_lsn_off = lsn_to_vbo(log, log->oldest_lsn);
75979 +       if (log->oldest_lsn_off < log->first_page)
75980 +               log->l_flags |= NTFSLOG_NO_OLDEST_LSN;
75982 +       if (!(ra2->flags & RESTART_SINGLE_PAGE_IO))
75983 +               log->l_flags |= NTFSLOG_WRAPPED | NTFSLOG_MULTIPLE_PAGE_IO;
75985 +       log->current_openlog_count = le32_to_cpu(ra2->open_log_count);
75986 +       log->total_avail_pages = log->l_size - log->first_page;
75987 +       log->total_avail = log->total_avail_pages >> log->page_bits;
75988 +       log->max_current_avail = log->total_avail * log->reserved;
75989 +       log->total_avail = log->total_avail * log->data_size;
75991 +       log->current_avail = current_log_avail(log);
75993 +       ra = ntfs_zalloc(log->restart_size);
75994 +       if (!ra) {
75995 +               err = -ENOMEM;
75996 +               goto out;
75997 +       }
75998 +       log->ra = ra;
76000 +       t16 = le16_to_cpu(ra2->client_off);
76001 +       if (t16 == offsetof(struct RESTART_AREA, clients)) {
76002 +               memcpy(ra, ra2, log->ra_size);
76003 +       } else {
76004 +               memcpy(ra, ra2, offsetof(struct RESTART_AREA, clients));
76005 +               memcpy(ra->clients, Add2Ptr(ra2, t16),
76006 +                      le16_to_cpu(ra2->ra_len) - t16);
76008 +               log->current_openlog_count = get_random_int();
76009 +               ra->open_log_count = cpu_to_le32(log->current_openlog_count);
76010 +               log->ra_size = offsetof(struct RESTART_AREA, clients) +
76011 +                              sizeof(struct CLIENT_REC);
76012 +               ra->client_off =
76013 +                       cpu_to_le16(offsetof(struct RESTART_AREA, clients));
76014 +               ra->ra_len = cpu_to_le16(log->ra_size);
76015 +       }
76017 +       le32_add_cpu(&ra->open_log_count, 1);
76019 +       /* Now we need to walk through looking for the last lsn */
76020 +       err = last_log_lsn(log);
76021 +       if (err)
76022 +               goto out;
76024 +       log->current_avail = current_log_avail(log);
76026 +       /* Remember which restart area to write first */
76027 +       log->init_ra = rst_info.vbo;
76029 +process_log:
76030 +       /* 1.0, 1.1, 2.0 log->major_ver/minor_ver - short values */
76031 +       switch ((log->major_ver << 16) + log->minor_ver) {
76032 +       case 0x10000:
76033 +       case 0x10001:
76034 +       case 0x20000:
76035 +               break;
76036 +       default:
76037 +               ntfs_warn(sbi->sb, "\x24LogFile version %d.%d is not supported",
76038 +                         log->major_ver, log->minor_ver);
76039 +               err = -EOPNOTSUPP;
76040 +               log->set_dirty = true;
76041 +               goto out;
76042 +       }
76044 +       /* One client "NTFS" per logfile */
76045 +       ca = Add2Ptr(ra, le16_to_cpu(ra->client_off));
76047 +       for (client = ra->client_idx[1];; client = cr->next_client) {
76048 +               if (client == LFS_NO_CLIENT_LE) {
76049 +                       /* Insert "NTFS" client LogFile */
76050 +                       client = ra->client_idx[0];
76051 +                       if (client == LFS_NO_CLIENT_LE)
76052 +                               return -EINVAL;
76054 +                       t16 = le16_to_cpu(client);
76055 +                       cr = ca + t16;
76057 +                       remove_client(ca, cr, &ra->client_idx[0]);
76059 +                       cr->restart_lsn = 0;
76060 +                       cr->oldest_lsn = cpu_to_le64(log->oldest_lsn);
76061 +                       cr->name_bytes = cpu_to_le32(8);
76062 +                       cr->name[0] = cpu_to_le16('N');
76063 +                       cr->name[1] = cpu_to_le16('T');
76064 +                       cr->name[2] = cpu_to_le16('F');
76065 +                       cr->name[3] = cpu_to_le16('S');
76067 +                       add_client(ca, t16, &ra->client_idx[1]);
76068 +                       break;
76069 +               }
76071 +               cr = ca + le16_to_cpu(client);
76073 +               if (cpu_to_le32(8) == cr->name_bytes &&
76074 +                   cpu_to_le16('N') == cr->name[0] &&
76075 +                   cpu_to_le16('T') == cr->name[1] &&
76076 +                   cpu_to_le16('F') == cr->name[2] &&
76077 +                   cpu_to_le16('S') == cr->name[3])
76078 +                       break;
76079 +       }
76081 +       /* Update the client handle with the client block information */
76082 +       log->client_id.seq_num = cr->seq_num;
76083 +       log->client_id.client_idx = client;
76085 +       err = read_rst_area(log, &rst, &ra_lsn);
76086 +       if (err)
76087 +               goto out;
76089 +       if (!rst)
76090 +               goto out;
76092 +       bytes_per_attr_entry = !rst->major_ver ? 0x2C : 0x28;
76094 +       checkpt_lsn = le64_to_cpu(rst->check_point_start);
76095 +       if (!checkpt_lsn)
76096 +               checkpt_lsn = ra_lsn;
76098 +       /* Allocate and Read the Transaction Table */
76099 +       if (!rst->transact_table_len)
76100 +               goto check_dirty_page_table;
76102 +       t64 = le64_to_cpu(rst->transact_table_lsn);
76103 +       err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
76104 +       if (err)
76105 +               goto out;
76107 +       lrh = lcb->log_rec;
76108 +       frh = lcb->lrh;
76109 +       rec_len = le32_to_cpu(frh->client_data_len);
76111 +       if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id),
76112 +                          bytes_per_attr_entry)) {
76113 +               err = -EINVAL;
76114 +               goto out;
76115 +       }
76117 +       t16 = le16_to_cpu(lrh->redo_off);
76119 +       rt = Add2Ptr(lrh, t16);
76120 +       t32 = rec_len - t16;
76122 +       /* Now check that this is a valid restart table */
76123 +       if (!check_rstbl(rt, t32)) {
76124 +               err = -EINVAL;
76125 +               goto out;
76126 +       }
76128 +       trtbl = ntfs_memdup(rt, t32);
76129 +       if (!trtbl) {
76130 +               err = -ENOMEM;
76131 +               goto out;
76132 +       }
76134 +       lcb_put(lcb);
76135 +       lcb = NULL;
76137 +check_dirty_page_table:
76138 +       /* The next record back should be the Dirty Pages Table */
76139 +       if (!rst->dirty_pages_len)
76140 +               goto check_attribute_names;
76142 +       t64 = le64_to_cpu(rst->dirty_pages_table_lsn);
76143 +       err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
76144 +       if (err)
76145 +               goto out;
76147 +       lrh = lcb->log_rec;
76148 +       frh = lcb->lrh;
76149 +       rec_len = le32_to_cpu(frh->client_data_len);
76151 +       if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id),
76152 +                          bytes_per_attr_entry)) {
76153 +               err = -EINVAL;
76154 +               goto out;
76155 +       }
76157 +       t16 = le16_to_cpu(lrh->redo_off);
76159 +       rt = Add2Ptr(lrh, t16);
76160 +       t32 = rec_len - t16;
76162 +       /* Now check that this is a valid restart table */
76163 +       if (!check_rstbl(rt, t32)) {
76164 +               err = -EINVAL;
76165 +               goto out;
76166 +       }
76168 +       dptbl = ntfs_memdup(rt, t32);
76169 +       if (!dptbl) {
76170 +               err = -ENOMEM;
76171 +               goto out;
76172 +       }
76174 +       /* Convert Ra version '0' into version '1' */
76175 +       if (rst->major_ver)
76176 +               goto end_conv_1;
76178 +       dp = NULL;
76179 +       while ((dp = enum_rstbl(dptbl, dp))) {
76180 +               struct DIR_PAGE_ENTRY_32 *dp0 = (struct DIR_PAGE_ENTRY_32 *)dp;
76181 +               // NOTE: Danger. Check for of boundary
76182 +               memmove(&dp->vcn, &dp0->vcn_low,
76183 +                       2 * sizeof(u64) +
76184 +                               le32_to_cpu(dp->lcns_follow) * sizeof(u64));
76185 +       }
76187 +end_conv_1:
76188 +       lcb_put(lcb);
76189 +       lcb = NULL;
76191 +       /* Go through the table and remove the duplicates, remembering the oldest lsn values */
76192 +       if (sbi->cluster_size <= log->page_size)
76193 +               goto trace_dp_table;
76195 +       dp = NULL;
76196 +       while ((dp = enum_rstbl(dptbl, dp))) {
76197 +               struct DIR_PAGE_ENTRY *next = dp;
76199 +               while ((next = enum_rstbl(dptbl, next))) {
76200 +                       if (next->target_attr == dp->target_attr &&
76201 +                           next->vcn == dp->vcn) {
76202 +                               if (le64_to_cpu(next->oldest_lsn) <
76203 +                                   le64_to_cpu(dp->oldest_lsn)) {
76204 +                                       dp->oldest_lsn = next->oldest_lsn;
76205 +                               }
76207 +                               free_rsttbl_idx(dptbl, PtrOffset(dptbl, next));
76208 +                       }
76209 +               }
76210 +       }
76211 +trace_dp_table:
76212 +check_attribute_names:
76213 +       /* The next record should be the Attribute Names */
76214 +       if (!rst->attr_names_len)
76215 +               goto check_attr_table;
76217 +       t64 = le64_to_cpu(rst->attr_names_lsn);
76218 +       err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
76219 +       if (err)
76220 +               goto out;
76222 +       lrh = lcb->log_rec;
76223 +       frh = lcb->lrh;
76224 +       rec_len = le32_to_cpu(frh->client_data_len);
76226 +       if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id),
76227 +                          bytes_per_attr_entry)) {
76228 +               err = -EINVAL;
76229 +               goto out;
76230 +       }
76232 +       t32 = lrh_length(lrh);
76233 +       rec_len -= t32;
76235 +       attr_names = ntfs_memdup(Add2Ptr(lrh, t32), rec_len);
76237 +       lcb_put(lcb);
76238 +       lcb = NULL;
76240 +check_attr_table:
76241 +       /* The next record should be the attribute Table */
76242 +       if (!rst->open_attr_len)
76243 +               goto check_attribute_names2;
76245 +       t64 = le64_to_cpu(rst->open_attr_table_lsn);
76246 +       err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
76247 +       if (err)
76248 +               goto out;
76250 +       lrh = lcb->log_rec;
76251 +       frh = lcb->lrh;
76252 +       rec_len = le32_to_cpu(frh->client_data_len);
76254 +       if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id),
76255 +                          bytes_per_attr_entry)) {
76256 +               err = -EINVAL;
76257 +               goto out;
76258 +       }
76260 +       t16 = le16_to_cpu(lrh->redo_off);
76262 +       rt = Add2Ptr(lrh, t16);
76263 +       t32 = rec_len - t16;
76265 +       if (!check_rstbl(rt, t32)) {
76266 +               err = -EINVAL;
76267 +               goto out;
76268 +       }
76270 +       oatbl = ntfs_memdup(rt, t32);
76271 +       if (!oatbl) {
76272 +               err = -ENOMEM;
76273 +               goto out;
76274 +       }
76276 +       log->open_attr_tbl = oatbl;
76278 +       /* Clear all of the Attr pointers */
76279 +       oe = NULL;
76280 +       while ((oe = enum_rstbl(oatbl, oe))) {
76281 +               if (!rst->major_ver) {
76282 +                       struct OPEN_ATTR_ENRTY_32 oe0;
76284 +                       /* Really 'oe' points to OPEN_ATTR_ENRTY_32 */
76285 +                       memcpy(&oe0, oe, SIZEOF_OPENATTRIBUTEENTRY0);
76287 +                       oe->bytes_per_index = oe0.bytes_per_index;
76288 +                       oe->type = oe0.type;
76289 +                       oe->is_dirty_pages = oe0.is_dirty_pages;
76290 +                       oe->name_len = 0;
76291 +                       oe->ref = oe0.ref;
76292 +                       oe->open_record_lsn = oe0.open_record_lsn;
76293 +               }
76295 +               oe->is_attr_name = 0;
76296 +               oe->ptr = NULL;
76297 +       }
76299 +       lcb_put(lcb);
76300 +       lcb = NULL;
76302 +check_attribute_names2:
76303 +       if (!rst->attr_names_len)
76304 +               goto trace_attribute_table;
76306 +       ane = attr_names;
76307 +       if (!oatbl)
76308 +               goto trace_attribute_table;
76309 +       while (ane->off) {
76310 +               /* TODO: Clear table on exit! */
76311 +               oe = Add2Ptr(oatbl, le16_to_cpu(ane->off));
76312 +               t16 = le16_to_cpu(ane->name_bytes);
76313 +               oe->name_len = t16 / sizeof(short);
76314 +               oe->ptr = ane->name;
76315 +               oe->is_attr_name = 2;
76316 +               ane = Add2Ptr(ane, sizeof(struct ATTR_NAME_ENTRY) + t16);
76317 +       }
76319 +trace_attribute_table:
76320 +       /*
76321 +        * If the checkpt_lsn is zero, then this is a freshly
76322 +        * formatted disk and we have no work to do
76323 +        */
76324 +       if (!checkpt_lsn) {
76325 +               err = 0;
76326 +               goto out;
76327 +       }
76329 +       if (!oatbl) {
76330 +               oatbl = init_rsttbl(bytes_per_attr_entry, 8);
76331 +               if (!oatbl) {
76332 +                       err = -ENOMEM;
76333 +                       goto out;
76334 +               }
76335 +       }
76337 +       log->open_attr_tbl = oatbl;
76339 +       /* Start the analysis pass from the Checkpoint lsn. */
76340 +       rec_lsn = checkpt_lsn;
76342 +       /* Read the first lsn */
76343 +       err = read_log_rec_lcb(log, checkpt_lsn, lcb_ctx_next, &lcb);
76344 +       if (err)
76345 +               goto out;
76347 +       /* Loop to read all subsequent records to the end of the log file */
76348 +next_log_record_analyze:
76349 +       err = read_next_log_rec(log, lcb, &rec_lsn);
76350 +       if (err)
76351 +               goto out;
76353 +       if (!rec_lsn)
76354 +               goto end_log_records_enumerate;
76356 +       frh = lcb->lrh;
76357 +       transact_id = le32_to_cpu(frh->transact_id);
76358 +       rec_len = le32_to_cpu(frh->client_data_len);
76359 +       lrh = lcb->log_rec;
76361 +       if (!check_log_rec(lrh, rec_len, transact_id, bytes_per_attr_entry)) {
76362 +               err = -EINVAL;
76363 +               goto out;
76364 +       }
76366 +       /*
76367 +        * The first lsn after the previous lsn remembered
76368 +        * the checkpoint is the first candidate for the rlsn
76369 +        */
76370 +       if (!rlsn)
76371 +               rlsn = rec_lsn;
76373 +       if (LfsClientRecord != frh->record_type)
76374 +               goto next_log_record_analyze;
76376 +       /*
76377 +        * Now update the Transaction Table for this transaction
76378 +        * If there is no entry present or it is unallocated we allocate the entry
76379 +        */
76380 +       if (!trtbl) {
76381 +               trtbl = init_rsttbl(sizeof(struct TRANSACTION_ENTRY),
76382 +                                   INITIAL_NUMBER_TRANSACTIONS);
76383 +               if (!trtbl) {
76384 +                       err = -ENOMEM;
76385 +                       goto out;
76386 +               }
76387 +       }
76389 +       tr = Add2Ptr(trtbl, transact_id);
76391 +       if (transact_id >= bytes_per_rt(trtbl) ||
76392 +           tr->next != RESTART_ENTRY_ALLOCATED_LE) {
76393 +               tr = alloc_rsttbl_from_idx(&trtbl, transact_id);
76394 +               if (!tr) {
76395 +                       err = -ENOMEM;
76396 +                       goto out;
76397 +               }
76398 +               tr->transact_state = TransactionActive;
76399 +               tr->first_lsn = cpu_to_le64(rec_lsn);
76400 +       }
76402 +       tr->prev_lsn = tr->undo_next_lsn = cpu_to_le64(rec_lsn);
76404 +       /*
76405 +        * If this is a compensation log record, then change
76406 +        * the undo_next_lsn to be the undo_next_lsn of this record
76407 +        */
76408 +       if (lrh->undo_op == cpu_to_le16(CompensationLogRecord))
76409 +               tr->undo_next_lsn = frh->client_undo_next_lsn;
76411 +       /* Dispatch to handle log record depending on type */
76412 +       switch (le16_to_cpu(lrh->redo_op)) {
76413 +       case InitializeFileRecordSegment:
76414 +       case DeallocateFileRecordSegment:
76415 +       case WriteEndOfFileRecordSegment:
76416 +       case CreateAttribute:
76417 +       case DeleteAttribute:
76418 +       case UpdateResidentValue:
76419 +       case UpdateNonresidentValue:
76420 +       case UpdateMappingPairs:
76421 +       case SetNewAttributeSizes:
76422 +       case AddIndexEntryRoot:
76423 +       case DeleteIndexEntryRoot:
76424 +       case AddIndexEntryAllocation:
76425 +       case DeleteIndexEntryAllocation:
76426 +       case WriteEndOfIndexBuffer:
76427 +       case SetIndexEntryVcnRoot:
76428 +       case SetIndexEntryVcnAllocation:
76429 +       case UpdateFileNameRoot:
76430 +       case UpdateFileNameAllocation:
76431 +       case SetBitsInNonresidentBitMap:
76432 +       case ClearBitsInNonresidentBitMap:
76433 +       case UpdateRecordDataRoot:
76434 +       case UpdateRecordDataAllocation:
76435 +       case ZeroEndOfFileRecord:
76436 +               t16 = le16_to_cpu(lrh->target_attr);
76437 +               t64 = le64_to_cpu(lrh->target_vcn);
76438 +               dp = find_dp(dptbl, t16, t64);
76440 +               if (dp)
76441 +                       goto copy_lcns;
76443 +               /*
76444 +                * Calculate the number of clusters per page the system
76445 +                * which wrote the checkpoint, possibly creating the table
76446 +                */
76447 +               if (dptbl) {
76448 +                       t32 = (le16_to_cpu(dptbl->size) -
76449 +                              sizeof(struct DIR_PAGE_ENTRY)) /
76450 +                             sizeof(u64);
76451 +               } else {
76452 +                       t32 = log->clst_per_page;
76453 +                       ntfs_free(dptbl);
76454 +                       dptbl = init_rsttbl(struct_size(dp, page_lcns, t32),
76455 +                                           32);
76456 +                       if (!dptbl) {
76457 +                               err = -ENOMEM;
76458 +                               goto out;
76459 +                       }
76460 +               }
76462 +               dp = alloc_rsttbl_idx(&dptbl);
76463 +               dp->target_attr = cpu_to_le32(t16);
76464 +               dp->transfer_len = cpu_to_le32(t32 << sbi->cluster_bits);
76465 +               dp->lcns_follow = cpu_to_le32(t32);
76466 +               dp->vcn = cpu_to_le64(t64 & ~((u64)t32 - 1));
76467 +               dp->oldest_lsn = cpu_to_le64(rec_lsn);
76469 +copy_lcns:
76470 +               /*
76471 +                * Copy the Lcns from the log record into the Dirty Page Entry
76472 +                * TODO: for different page size support, must somehow make
76473 +                * whole routine a loop, case Lcns do not fit below
76474 +                */
76475 +               t16 = le16_to_cpu(lrh->lcns_follow);
76476 +               for (i = 0; i < t16; i++) {
76477 +                       size_t j = (size_t)(le64_to_cpu(lrh->target_vcn) -
76478 +                                           le64_to_cpu(dp->vcn));
76479 +                       dp->page_lcns[j + i] = lrh->page_lcns[i];
76480 +               }
76482 +               goto next_log_record_analyze;
76484 +       case DeleteDirtyClusters: {
76485 +               u32 range_count =
76486 +                       le16_to_cpu(lrh->redo_len) / sizeof(struct LCN_RANGE);
76487 +               const struct LCN_RANGE *r =
76488 +                       Add2Ptr(lrh, le16_to_cpu(lrh->redo_off));
76490 +               /* Loop through all of the Lcn ranges this log record */
76491 +               for (i = 0; i < range_count; i++, r++) {
76492 +                       u64 lcn0 = le64_to_cpu(r->lcn);
76493 +                       u64 lcn_e = lcn0 + le64_to_cpu(r->len) - 1;
76495 +                       dp = NULL;
76496 +                       while ((dp = enum_rstbl(dptbl, dp))) {
76497 +                               u32 j;
76499 +                               t32 = le32_to_cpu(dp->lcns_follow);
76500 +                               for (j = 0; j < t32; j++) {
76501 +                                       t64 = le64_to_cpu(dp->page_lcns[j]);
76502 +                                       if (t64 >= lcn0 && t64 <= lcn_e)
76503 +                                               dp->page_lcns[j] = 0;
76504 +                               }
76505 +                       }
76506 +               }
76507 +               goto next_log_record_analyze;
76508 +               ;
76509 +       }
76511 +       case OpenNonresidentAttribute:
76512 +               t16 = le16_to_cpu(lrh->target_attr);
76513 +               if (t16 >= bytes_per_rt(oatbl)) {
76514 +                       /*
76515 +                        * Compute how big the table needs to be.
76516 +                        * Add 10 extra entries for some cushion
76517 +                        */
76518 +                       u32 new_e = t16 / le16_to_cpu(oatbl->size);
76520 +                       new_e += 10 - le16_to_cpu(oatbl->used);
76522 +                       oatbl = extend_rsttbl(oatbl, new_e, ~0u);
76523 +                       log->open_attr_tbl = oatbl;
76524 +                       if (!oatbl) {
76525 +                               err = -ENOMEM;
76526 +                               goto out;
76527 +                       }
76528 +               }
76530 +               /* Point to the entry being opened */
76531 +               oe = alloc_rsttbl_from_idx(&oatbl, t16);
76532 +               log->open_attr_tbl = oatbl;
76533 +               if (!oe) {
76534 +                       err = -ENOMEM;
76535 +                       goto out;
76536 +               }
76538 +               /* Initialize this entry from the log record */
76539 +               t16 = le16_to_cpu(lrh->redo_off);
76540 +               if (!rst->major_ver) {
76541 +                       /* Convert version '0' into version '1' */
76542 +                       struct OPEN_ATTR_ENRTY_32 *oe0 = Add2Ptr(lrh, t16);
76544 +                       oe->bytes_per_index = oe0->bytes_per_index;
76545 +                       oe->type = oe0->type;
76546 +                       oe->is_dirty_pages = oe0->is_dirty_pages;
76547 +                       oe->name_len = 0; //oe0.name_len;
76548 +                       oe->ref = oe0->ref;
76549 +                       oe->open_record_lsn = oe0->open_record_lsn;
76550 +               } else {
76551 +                       memcpy(oe, Add2Ptr(lrh, t16), bytes_per_attr_entry);
76552 +               }
76554 +               t16 = le16_to_cpu(lrh->undo_len);
76555 +               if (t16) {
76556 +                       oe->ptr = ntfs_malloc(t16);
76557 +                       if (!oe->ptr) {
76558 +                               err = -ENOMEM;
76559 +                               goto out;
76560 +                       }
76561 +                       oe->name_len = t16 / sizeof(short);
76562 +                       memcpy(oe->ptr,
76563 +                              Add2Ptr(lrh, le16_to_cpu(lrh->undo_off)), t16);
76564 +                       oe->is_attr_name = 1;
76565 +               } else {
76566 +                       oe->ptr = NULL;
76567 +                       oe->is_attr_name = 0;
76568 +               }
76570 +               goto next_log_record_analyze;
76572 +       case HotFix:
76573 +               t16 = le16_to_cpu(lrh->target_attr);
76574 +               t64 = le64_to_cpu(lrh->target_vcn);
76575 +               dp = find_dp(dptbl, t16, t64);
76576 +               if (dp) {
76577 +                       size_t j = le64_to_cpu(lrh->target_vcn) -
76578 +                                  le64_to_cpu(dp->vcn);
76579 +                       if (dp->page_lcns[j])
76580 +                               dp->page_lcns[j] = lrh->page_lcns[0];
76581 +               }
76582 +               goto next_log_record_analyze;
76584 +       case EndTopLevelAction:
76585 +               tr = Add2Ptr(trtbl, transact_id);
76586 +               tr->prev_lsn = cpu_to_le64(rec_lsn);
76587 +               tr->undo_next_lsn = frh->client_undo_next_lsn;
76588 +               goto next_log_record_analyze;
76590 +       case PrepareTransaction:
76591 +               tr = Add2Ptr(trtbl, transact_id);
76592 +               tr->transact_state = TransactionPrepared;
76593 +               goto next_log_record_analyze;
76595 +       case CommitTransaction:
76596 +               tr = Add2Ptr(trtbl, transact_id);
76597 +               tr->transact_state = TransactionCommitted;
76598 +               goto next_log_record_analyze;
76600 +       case ForgetTransaction:
76601 +               free_rsttbl_idx(trtbl, transact_id);
76602 +               goto next_log_record_analyze;
76604 +       case Noop:
76605 +       case OpenAttributeTableDump:
76606 +       case AttributeNamesDump:
76607 +       case DirtyPageTableDump:
76608 +       case TransactionTableDump:
76609 +               /* The following cases require no action the Analysis Pass */
76610 +               goto next_log_record_analyze;
76612 +       default:
76613 +               /*
76614 +                * All codes will be explicitly handled.
76615 +                * If we see a code we do not expect, then we are trouble
76616 +                */
76617 +               goto next_log_record_analyze;
76618 +       }
76620 +end_log_records_enumerate:
76621 +       lcb_put(lcb);
76622 +       lcb = NULL;
76624 +       /*
76625 +        * Scan the Dirty Page Table and Transaction Table for
76626 +        * the lowest lsn, and return it as the Redo lsn
76627 +        */
76628 +       dp = NULL;
76629 +       while ((dp = enum_rstbl(dptbl, dp))) {
76630 +               t64 = le64_to_cpu(dp->oldest_lsn);
76631 +               if (t64 && t64 < rlsn)
76632 +                       rlsn = t64;
76633 +       }
76635 +       tr = NULL;
76636 +       while ((tr = enum_rstbl(trtbl, tr))) {
76637 +               t64 = le64_to_cpu(tr->first_lsn);
76638 +               if (t64 && t64 < rlsn)
76639 +                       rlsn = t64;
76640 +       }
76642 +       /* Only proceed if the Dirty Page Table or Transaction table are not empty */
76643 +       if ((!dptbl || !dptbl->total) && (!trtbl || !trtbl->total))
76644 +               goto end_reply;
76646 +       sbi->flags |= NTFS_FLAGS_NEED_REPLAY;
76647 +       if (is_ro)
76648 +               goto out;
76650 +       /* Reopen all of the attributes with dirty pages */
76651 +       oe = NULL;
76652 +next_open_attribute:
76654 +       oe = enum_rstbl(oatbl, oe);
76655 +       if (!oe) {
76656 +               err = 0;
76657 +               dp = NULL;
76658 +               goto next_dirty_page;
76659 +       }
76661 +       oa = ntfs_zalloc(sizeof(struct OpenAttr));
76662 +       if (!oa) {
76663 +               err = -ENOMEM;
76664 +               goto out;
76665 +       }
76667 +       inode = ntfs_iget5(sbi->sb, &oe->ref, NULL);
76668 +       if (IS_ERR(inode))
76669 +               goto fake_attr;
76671 +       if (is_bad_inode(inode)) {
76672 +               iput(inode);
76673 +fake_attr:
76674 +               if (oa->ni) {
76675 +                       iput(&oa->ni->vfs_inode);
76676 +                       oa->ni = NULL;
76677 +               }
76679 +               attr = attr_create_nonres_log(sbi, oe->type, 0, oe->ptr,
76680 +                                             oe->name_len, 0);
76681 +               if (!attr) {
76682 +                       ntfs_free(oa);
76683 +                       err = -ENOMEM;
76684 +                       goto out;
76685 +               }
76686 +               oa->attr = attr;
76687 +               oa->run1 = &oa->run0;
76688 +               goto final_oe;
76689 +       }
76691 +       ni_oe = ntfs_i(inode);
76692 +       oa->ni = ni_oe;
76694 +       attr = ni_find_attr(ni_oe, NULL, NULL, oe->type, oe->ptr, oe->name_len,
76695 +                           NULL, NULL);
76697 +       if (!attr)
76698 +               goto fake_attr;
76700 +       t32 = le32_to_cpu(attr->size);
76701 +       oa->attr = ntfs_memdup(attr, t32);
76702 +       if (!oa->attr)
76703 +               goto fake_attr;
76705 +       if (!S_ISDIR(inode->i_mode)) {
76706 +               if (attr->type == ATTR_DATA && !attr->name_len) {
76707 +                       oa->run1 = &ni_oe->file.run;
76708 +                       goto final_oe;
76709 +               }
76710 +       } else {
76711 +               if (attr->type == ATTR_ALLOC &&
76712 +                   attr->name_len == ARRAY_SIZE(I30_NAME) &&
76713 +                   !memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME))) {
76714 +                       oa->run1 = &ni_oe->dir.alloc_run;
76715 +                       goto final_oe;
76716 +               }
76717 +       }
76719 +       if (attr->non_res) {
76720 +               u16 roff = le16_to_cpu(attr->nres.run_off);
76721 +               CLST svcn = le64_to_cpu(attr->nres.svcn);
76723 +               err = run_unpack(&oa->run0, sbi, inode->i_ino, svcn,
76724 +                                le64_to_cpu(attr->nres.evcn), svcn,
76725 +                                Add2Ptr(attr, roff), t32 - roff);
76726 +               if (err < 0) {
76727 +                       ntfs_free(oa->attr);
76728 +                       oa->attr = NULL;
76729 +                       goto fake_attr;
76730 +               }
76731 +               err = 0;
76732 +       }
76733 +       oa->run1 = &oa->run0;
76734 +       attr = oa->attr;
76736 +final_oe:
76737 +       if (oe->is_attr_name == 1)
76738 +               ntfs_free(oe->ptr);
76739 +       oe->is_attr_name = 0;
76740 +       oe->ptr = oa;
76741 +       oe->name_len = attr->name_len;
76743 +       goto next_open_attribute;
76745 +       /*
76746 +        * Now loop through the dirty page table to extract all of the Vcn/Lcn
76747 +        * Mapping that we have, and insert it into the appropriate run
76748 +        */
76749 +next_dirty_page:
76750 +       dp = enum_rstbl(dptbl, dp);
76751 +       if (!dp)
76752 +               goto do_redo_1;
76754 +       oe = Add2Ptr(oatbl, le32_to_cpu(dp->target_attr));
76756 +       if (oe->next != RESTART_ENTRY_ALLOCATED_LE)
76757 +               goto next_dirty_page;
76759 +       oa = oe->ptr;
76760 +       if (!oa)
76761 +               goto next_dirty_page;
76763 +       i = -1;
76764 +next_dirty_page_vcn:
76765 +       i += 1;
76766 +       if (i >= le32_to_cpu(dp->lcns_follow))
76767 +               goto next_dirty_page;
76769 +       vcn = le64_to_cpu(dp->vcn) + i;
76770 +       size = (vcn + 1) << sbi->cluster_bits;
76772 +       if (!dp->page_lcns[i])
76773 +               goto next_dirty_page_vcn;
76775 +       rno = ino_get(&oe->ref);
76776 +       if (rno <= MFT_REC_MIRR &&
76777 +           size < (MFT_REC_VOL + 1) * sbi->record_size &&
76778 +           oe->type == ATTR_DATA) {
76779 +               goto next_dirty_page_vcn;
76780 +       }
76782 +       lcn = le64_to_cpu(dp->page_lcns[i]);
76784 +       if ((!run_lookup_entry(oa->run1, vcn, &lcn0, &len0, NULL) ||
76785 +            lcn0 != lcn) &&
76786 +           !run_add_entry(oa->run1, vcn, lcn, 1, false)) {
76787 +               err = -ENOMEM;
76788 +               goto out;
76789 +       }
76790 +       attr = oa->attr;
76791 +       t64 = le64_to_cpu(attr->nres.alloc_size);
76792 +       if (size > t64) {
76793 +               attr->nres.valid_size = attr->nres.data_size =
76794 +                       attr->nres.alloc_size = cpu_to_le64(size);
76795 +       }
76796 +       goto next_dirty_page_vcn;
76798 +do_redo_1:
76799 +       /*
76800 +        * Perform the Redo Pass, to restore all of the dirty pages to the same
76801 +        * contents that they had immediately before the crash
76802 +        * If the dirty page table is empty, then we can skip the entire Redo Pass
76803 +        */
76804 +       if (!dptbl || !dptbl->total)
76805 +               goto do_undo_action;
76807 +       rec_lsn = rlsn;
76809 +       /*
76810 +        * Read the record at the Redo lsn, before falling
76811 +        * into common code to handle each record
76812 +        */
76813 +       err = read_log_rec_lcb(log, rlsn, lcb_ctx_next, &lcb);
76814 +       if (err)
76815 +               goto out;
76817 +       /*
76818 +        * Now loop to read all of our log records forwards,
76819 +        * until we hit the end of the file, cleaning up at the end
76820 +        */
76821 +do_action_next:
76822 +       frh = lcb->lrh;
76824 +       if (LfsClientRecord != frh->record_type)
76825 +               goto read_next_log_do_action;
76827 +       transact_id = le32_to_cpu(frh->transact_id);
76828 +       rec_len = le32_to_cpu(frh->client_data_len);
76829 +       lrh = lcb->log_rec;
76831 +       if (!check_log_rec(lrh, rec_len, transact_id, bytes_per_attr_entry)) {
76832 +               err = -EINVAL;
76833 +               goto out;
76834 +       }
76836 +       /* Ignore log records that do not update pages */
76837 +       if (lrh->lcns_follow)
76838 +               goto find_dirty_page;
76840 +       goto read_next_log_do_action;
76842 +find_dirty_page:
76843 +       t16 = le16_to_cpu(lrh->target_attr);
76844 +       t64 = le64_to_cpu(lrh->target_vcn);
76845 +       dp = find_dp(dptbl, t16, t64);
76847 +       if (!dp)
76848 +               goto read_next_log_do_action;
76850 +       if (rec_lsn < le64_to_cpu(dp->oldest_lsn))
76851 +               goto read_next_log_do_action;
76853 +       t16 = le16_to_cpu(lrh->target_attr);
76854 +       if (t16 >= bytes_per_rt(oatbl)) {
76855 +               err = -EINVAL;
76856 +               goto out;
76857 +       }
76859 +       oe = Add2Ptr(oatbl, t16);
76861 +       if (oe->next != RESTART_ENTRY_ALLOCATED_LE) {
76862 +               err = -EINVAL;
76863 +               goto out;
76864 +       }
76866 +       oa = oe->ptr;
76868 +       if (!oa) {
76869 +               err = -EINVAL;
76870 +               goto out;
76871 +       }
76872 +       attr = oa->attr;
76874 +       vcn = le64_to_cpu(lrh->target_vcn);
76876 +       if (!run_lookup_entry(oa->run1, vcn, &lcn, NULL, NULL) ||
76877 +           lcn == SPARSE_LCN) {
76878 +               goto read_next_log_do_action;
76879 +       }
76881 +       /* Point to the Redo data and get its length */
76882 +       data = Add2Ptr(lrh, le16_to_cpu(lrh->redo_off));
76883 +       dlen = le16_to_cpu(lrh->redo_len);
76885 +       /* Shorten length by any Lcns which were deleted */
76886 +       saved_len = dlen;
76888 +       for (i = le16_to_cpu(lrh->lcns_follow); i; i--) {
76889 +               size_t j;
76890 +               u32 alen, voff;
76892 +               voff = le16_to_cpu(lrh->record_off) +
76893 +                      le16_to_cpu(lrh->attr_off);
76894 +               voff += le16_to_cpu(lrh->cluster_off) << SECTOR_SHIFT;
76896 +               /* If the Vcn question is allocated, we can just get out.*/
76897 +               j = le64_to_cpu(lrh->target_vcn) - le64_to_cpu(dp->vcn);
76898 +               if (dp->page_lcns[j + i - 1])
76899 +                       break;
76901 +               if (!saved_len)
76902 +                       saved_len = 1;
76904 +               /*
76905 +                * Calculate the allocated space left relative to the
76906 +                * log record Vcn, after removing this unallocated Vcn
76907 +                */
76908 +               alen = (i - 1) << sbi->cluster_bits;
76910 +               /*
76911 +                * If the update described this log record goes beyond
76912 +                * the allocated space, then we will have to reduce the length
76913 +                */
76914 +               if (voff >= alen)
76915 +                       dlen = 0;
76916 +               else if (voff + dlen > alen)
76917 +                       dlen = alen - voff;
76918 +       }
76920 +       /* If the resulting dlen from above is now zero, we can skip this log record */
76921 +       if (!dlen && saved_len)
76922 +               goto read_next_log_do_action;
76924 +       t16 = le16_to_cpu(lrh->redo_op);
76925 +       if (can_skip_action(t16))
76926 +               goto read_next_log_do_action;
76928 +       /* Apply the Redo operation a common routine */
76929 +       err = do_action(log, oe, lrh, t16, data, dlen, rec_len, &rec_lsn);
76930 +       if (err)
76931 +               goto out;
76933 +       /* Keep reading and looping back until end of file */
76934 +read_next_log_do_action:
76935 +       err = read_next_log_rec(log, lcb, &rec_lsn);
76936 +       if (!err && rec_lsn)
76937 +               goto do_action_next;
76939 +       lcb_put(lcb);
76940 +       lcb = NULL;
76942 +do_undo_action:
76943 +       /* Scan Transaction Table */
76944 +       tr = NULL;
76945 +transaction_table_next:
76946 +       tr = enum_rstbl(trtbl, tr);
76947 +       if (!tr)
76948 +               goto undo_action_done;
76950 +       if (TransactionActive != tr->transact_state || !tr->undo_next_lsn) {
76951 +               free_rsttbl_idx(trtbl, PtrOffset(trtbl, tr));
76952 +               goto transaction_table_next;
76953 +       }
76955 +       log->transaction_id = PtrOffset(trtbl, tr);
76956 +       undo_next_lsn = le64_to_cpu(tr->undo_next_lsn);
76958 +       /*
76959 +        * We only have to do anything if the transaction has
76960 +        * something its undo_next_lsn field
76961 +        */
76962 +       if (!undo_next_lsn)
76963 +               goto commit_undo;
76965 +       /* Read the first record to be undone by this transaction */
76966 +       err = read_log_rec_lcb(log, undo_next_lsn, lcb_ctx_undo_next, &lcb);
76967 +       if (err)
76968 +               goto out;
76970 +       /*
76971 +        * Now loop to read all of our log records forwards,
76972 +        * until we hit the end of the file, cleaning up at the end
76973 +        */
76974 +undo_action_next:
76976 +       lrh = lcb->log_rec;
76977 +       frh = lcb->lrh;
76978 +       transact_id = le32_to_cpu(frh->transact_id);
76979 +       rec_len = le32_to_cpu(frh->client_data_len);
76981 +       if (!check_log_rec(lrh, rec_len, transact_id, bytes_per_attr_entry)) {
76982 +               err = -EINVAL;
76983 +               goto out;
76984 +       }
76986 +       if (lrh->undo_op == cpu_to_le16(Noop))
76987 +               goto read_next_log_undo_action;
76989 +       oe = Add2Ptr(oatbl, le16_to_cpu(lrh->target_attr));
76990 +       oa = oe->ptr;
76992 +       t16 = le16_to_cpu(lrh->lcns_follow);
76993 +       if (!t16)
76994 +               goto add_allocated_vcns;
76996 +       is_mapped = run_lookup_entry(oa->run1, le64_to_cpu(lrh->target_vcn),
76997 +                                    &lcn, &clen, NULL);
76999 +       /*
77000 +        * If the mapping isn't already the table or the  mapping
77001 +        * corresponds to a hole the mapping, we need to make sure
77002 +        * there is no partial page already memory
77003 +        */
77004 +       if (is_mapped && lcn != SPARSE_LCN && clen >= t16)
77005 +               goto add_allocated_vcns;
77007 +       vcn = le64_to_cpu(lrh->target_vcn);
77008 +       vcn &= ~(log->clst_per_page - 1);
77010 +add_allocated_vcns:
77011 +       for (i = 0, vcn = le64_to_cpu(lrh->target_vcn),
77012 +           size = (vcn + 1) << sbi->cluster_bits;
77013 +            i < t16; i++, vcn += 1, size += sbi->cluster_size) {
77014 +               attr = oa->attr;
77015 +               if (!attr->non_res) {
77016 +                       if (size > le32_to_cpu(attr->res.data_size))
77017 +                               attr->res.data_size = cpu_to_le32(size);
77018 +               } else {
77019 +                       if (size > le64_to_cpu(attr->nres.data_size))
77020 +                               attr->nres.valid_size = attr->nres.data_size =
77021 +                                       attr->nres.alloc_size =
77022 +                                               cpu_to_le64(size);
77023 +               }
77024 +       }
77026 +       t16 = le16_to_cpu(lrh->undo_op);
77027 +       if (can_skip_action(t16))
77028 +               goto read_next_log_undo_action;
77030 +       /* Point to the Redo data and get its length */
77031 +       data = Add2Ptr(lrh, le16_to_cpu(lrh->undo_off));
77032 +       dlen = le16_to_cpu(lrh->undo_len);
77034 +       /* it is time to apply the undo action */
77035 +       err = do_action(log, oe, lrh, t16, data, dlen, rec_len, NULL);
77037 +read_next_log_undo_action:
77038 +       /*
77039 +        * Keep reading and looping back until we have read the
77040 +        * last record for this transaction
77041 +        */
77042 +       err = read_next_log_rec(log, lcb, &rec_lsn);
77043 +       if (err)
77044 +               goto out;
77046 +       if (rec_lsn)
77047 +               goto undo_action_next;
77049 +       lcb_put(lcb);
77050 +       lcb = NULL;
77052 +commit_undo:
77053 +       free_rsttbl_idx(trtbl, log->transaction_id);
77055 +       log->transaction_id = 0;
77057 +       goto transaction_table_next;
77059 +undo_action_done:
77061 +       ntfs_update_mftmirr(sbi, 0);
77063 +       sbi->flags &= ~NTFS_FLAGS_NEED_REPLAY;
77065 +end_reply:
77067 +       err = 0;
77068 +       if (is_ro)
77069 +               goto out;
77071 +       rh = ntfs_zalloc(log->page_size);
77072 +       if (!rh) {
77073 +               err = -ENOMEM;
77074 +               goto out;
77075 +       }
77077 +       rh->rhdr.sign = NTFS_RSTR_SIGNATURE;
77078 +       rh->rhdr.fix_off = cpu_to_le16(offsetof(struct RESTART_HDR, fixups));
77079 +       t16 = (log->page_size >> SECTOR_SHIFT) + 1;
77080 +       rh->rhdr.fix_num = cpu_to_le16(t16);
77081 +       rh->sys_page_size = cpu_to_le32(log->page_size);
77082 +       rh->page_size = cpu_to_le32(log->page_size);
77084 +       t16 = QuadAlign(offsetof(struct RESTART_HDR, fixups) +
77085 +                       sizeof(short) * t16);
77086 +       rh->ra_off = cpu_to_le16(t16);
77087 +       rh->minor_ver = cpu_to_le16(1); // 0x1A:
77088 +       rh->major_ver = cpu_to_le16(1); // 0x1C:
77090 +       ra2 = Add2Ptr(rh, t16);
77091 +       memcpy(ra2, ra, sizeof(struct RESTART_AREA));
77093 +       ra2->client_idx[0] = 0;
77094 +       ra2->client_idx[1] = LFS_NO_CLIENT_LE;
77095 +       ra2->flags = cpu_to_le16(2);
77097 +       le32_add_cpu(&ra2->open_log_count, 1);
77099 +       ntfs_fix_pre_write(&rh->rhdr, log->page_size);
77101 +       err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rh, log->page_size);
77102 +       if (!err)
77103 +               err = ntfs_sb_write_run(sbi, &log->ni->file.run, log->page_size,
77104 +                                       rh, log->page_size);
77106 +       ntfs_free(rh);
77107 +       if (err)
77108 +               goto out;
77110 +out:
77111 +       ntfs_free(rst);
77112 +       if (lcb)
77113 +               lcb_put(lcb);
77115 +       /* Scan the Open Attribute Table to close all of the open attributes */
77116 +       oe = NULL;
77117 +       while ((oe = enum_rstbl(oatbl, oe))) {
77118 +               rno = ino_get(&oe->ref);
77120 +               if (oe->is_attr_name == 1) {
77121 +                       ntfs_free(oe->ptr);
77122 +                       oe->ptr = NULL;
77123 +                       continue;
77124 +               }
77126 +               if (oe->is_attr_name)
77127 +                       continue;
77129 +               oa = oe->ptr;
77130 +               if (!oa)
77131 +                       continue;
77133 +               run_close(&oa->run0);
77134 +               ntfs_free(oa->attr);
77135 +               if (oa->ni)
77136 +                       iput(&oa->ni->vfs_inode);
77137 +               ntfs_free(oa);
77138 +       }
77140 +       ntfs_free(trtbl);
77141 +       ntfs_free(oatbl);
77142 +       ntfs_free(dptbl);
77143 +       ntfs_free(attr_names);
77144 +       ntfs_free(rst_info.r_page);
77146 +       ntfs_free(ra);
77147 +       ntfs_free(log->one_page_buf);
77149 +       if (err)
77150 +               sbi->flags |= NTFS_FLAGS_NEED_REPLAY;
77152 +       if (err == -EROFS)
77153 +               err = 0;
77154 +       else if (log->set_dirty)
77155 +               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
77157 +       ntfs_free(log);
77159 +       return err;
77161 diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
77162 new file mode 100644
77163 index 000000000000..327356b08187
77164 --- /dev/null
77165 +++ b/fs/ntfs3/fsntfs.c
77166 @@ -0,0 +1,2542 @@
77167 +// SPDX-License-Identifier: GPL-2.0
77169 + *
77170 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
77171 + *
77172 + */
77174 +#include <linux/blkdev.h>
77175 +#include <linux/buffer_head.h>
77176 +#include <linux/fs.h>
77177 +#include <linux/nls.h>
77179 +#include "debug.h"
77180 +#include "ntfs.h"
77181 +#include "ntfs_fs.h"
77183 +// clang-format off
77184 +const struct cpu_str NAME_MFT = {
77185 +       4, 0, { '$', 'M', 'F', 'T' },
77187 +const struct cpu_str NAME_MIRROR = {
77188 +       8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
77190 +const struct cpu_str NAME_LOGFILE = {
77191 +       8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
77193 +const struct cpu_str NAME_VOLUME = {
77194 +       7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
77196 +const struct cpu_str NAME_ATTRDEF = {
77197 +       8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
77199 +const struct cpu_str NAME_ROOT = {
77200 +       1, 0, { '.' },
77202 +const struct cpu_str NAME_BITMAP = {
77203 +       7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
77205 +const struct cpu_str NAME_BOOT = {
77206 +       5, 0, { '$', 'B', 'o', 'o', 't' },
77208 +const struct cpu_str NAME_BADCLUS = {
77209 +       8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
77211 +const struct cpu_str NAME_QUOTA = {
77212 +       6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
77214 +const struct cpu_str NAME_SECURE = {
77215 +       7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
77217 +const struct cpu_str NAME_UPCASE = {
77218 +       7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
77220 +const struct cpu_str NAME_EXTEND = {
77221 +       7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
77223 +const struct cpu_str NAME_OBJID = {
77224 +       6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
77226 +const struct cpu_str NAME_REPARSE = {
77227 +       8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
77229 +const struct cpu_str NAME_USNJRNL = {
77230 +       8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
77232 +const __le16 BAD_NAME[4] = {
77233 +       cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
77235 +const __le16 I30_NAME[4] = {
77236 +       cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
77238 +const __le16 SII_NAME[4] = {
77239 +       cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
77241 +const __le16 SDH_NAME[4] = {
77242 +       cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
77244 +const __le16 SDS_NAME[4] = {
77245 +       cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
77247 +const __le16 SO_NAME[2] = {
77248 +       cpu_to_le16('$'), cpu_to_le16('O'),
77250 +const __le16 SQ_NAME[2] = {
77251 +       cpu_to_le16('$'), cpu_to_le16('Q'),
77253 +const __le16 SR_NAME[2] = {
77254 +       cpu_to_le16('$'), cpu_to_le16('R'),
77257 +#ifdef CONFIG_NTFS3_LZX_XPRESS
77258 +const __le16 WOF_NAME[17] = {
77259 +       cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
77260 +       cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
77261 +       cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
77262 +       cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
77263 +       cpu_to_le16('a'),
77265 +#endif
77267 +// clang-format on
77270 + * ntfs_fix_pre_write
77271 + *
77272 + * inserts fixups into 'rhdr' before writing to disk
77273 + */
77274 +bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
77276 +       u16 *fixup, *ptr;
77277 +       u16 sample;
77278 +       u16 fo = le16_to_cpu(rhdr->fix_off);
77279 +       u16 fn = le16_to_cpu(rhdr->fix_num);
77281 +       if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
77282 +           fn * SECTOR_SIZE > bytes) {
77283 +               return false;
77284 +       }
77286 +       /* Get fixup pointer */
77287 +       fixup = Add2Ptr(rhdr, fo);
77289 +       if (*fixup >= 0x7FFF)
77290 +               *fixup = 1;
77291 +       else
77292 +               *fixup += 1;
77294 +       sample = *fixup;
77296 +       ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
77298 +       while (fn--) {
77299 +               *++fixup = *ptr;
77300 +               *ptr = sample;
77301 +               ptr += SECTOR_SIZE / sizeof(short);
77302 +       }
77303 +       return true;
77307 + * ntfs_fix_post_read
77308 + *
77309 + * remove fixups after reading from disk
77310 + * Returns < 0 if error, 0 if ok, 1 if need to update fixups
77311 + */
77312 +int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
77313 +                      bool simple)
77315 +       int ret;
77316 +       u16 *fixup, *ptr;
77317 +       u16 sample, fo, fn;
77319 +       fo = le16_to_cpu(rhdr->fix_off);
77320 +       fn = simple ? ((bytes >> SECTOR_SHIFT) + 1)
77321 +                   : le16_to_cpu(rhdr->fix_num);
77323 +       /* Check errors */
77324 +       if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
77325 +           fn * SECTOR_SIZE > bytes) {
77326 +               return -EINVAL; /* native chkntfs returns ok! */
77327 +       }
77329 +       /* Get fixup pointer */
77330 +       fixup = Add2Ptr(rhdr, fo);
77331 +       sample = *fixup;
77332 +       ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
77333 +       ret = 0;
77335 +       while (fn--) {
77336 +               /* Test current word */
77337 +               if (*ptr != sample) {
77338 +                       /* Fixup does not match! Is it serious error? */
77339 +                       ret = -E_NTFS_FIXUP;
77340 +               }
77342 +               /* Replace fixup */
77343 +               *ptr = *++fixup;
77344 +               ptr += SECTOR_SIZE / sizeof(short);
77345 +       }
77347 +       return ret;
77351 + * ntfs_extend_init
77352 + *
77353 + * loads $Extend file
77354 + */
77355 +int ntfs_extend_init(struct ntfs_sb_info *sbi)
77357 +       int err;
77358 +       struct super_block *sb = sbi->sb;
77359 +       struct inode *inode, *inode2;
77360 +       struct MFT_REF ref;
77362 +       if (sbi->volume.major_ver < 3) {
77363 +               ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
77364 +               return 0;
77365 +       }
77367 +       ref.low = cpu_to_le32(MFT_REC_EXTEND);
77368 +       ref.high = 0;
77369 +       ref.seq = cpu_to_le16(MFT_REC_EXTEND);
77370 +       inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
77371 +       if (IS_ERR(inode)) {
77372 +               err = PTR_ERR(inode);
77373 +               ntfs_err(sb, "Failed to load $Extend.");
77374 +               inode = NULL;
77375 +               goto out;
77376 +       }
77378 +       /* if ntfs_iget5 reads from disk it never returns bad inode */
77379 +       if (!S_ISDIR(inode->i_mode)) {
77380 +               err = -EINVAL;
77381 +               goto out;
77382 +       }
77384 +       /* Try to find $ObjId */
77385 +       inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
77386 +       if (inode2 && !IS_ERR(inode2)) {
77387 +               if (is_bad_inode(inode2)) {
77388 +                       iput(inode2);
77389 +               } else {
77390 +                       sbi->objid.ni = ntfs_i(inode2);
77391 +                       sbi->objid_no = inode2->i_ino;
77392 +               }
77393 +       }
77395 +       /* Try to find $Quota */
77396 +       inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
77397 +       if (inode2 && !IS_ERR(inode2)) {
77398 +               sbi->quota_no = inode2->i_ino;
77399 +               iput(inode2);
77400 +       }
77402 +       /* Try to find $Reparse */
77403 +       inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
77404 +       if (inode2 && !IS_ERR(inode2)) {
77405 +               sbi->reparse.ni = ntfs_i(inode2);
77406 +               sbi->reparse_no = inode2->i_ino;
77407 +       }
77409 +       /* Try to find $UsnJrnl */
77410 +       inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
77411 +       if (inode2 && !IS_ERR(inode2)) {
77412 +               sbi->usn_jrnl_no = inode2->i_ino;
77413 +               iput(inode2);
77414 +       }
77416 +       err = 0;
77417 +out:
77418 +       iput(inode);
77419 +       return err;
77422 +int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
77424 +       int err = 0;
77425 +       struct super_block *sb = sbi->sb;
77426 +       bool initialized = false;
77427 +       struct MFT_REF ref;
77428 +       struct inode *inode;
77430 +       /* Check for 4GB */
77431 +       if (ni->vfs_inode.i_size >= 0x100000000ull) {
77432 +               ntfs_err(sb, "\x24LogFile is too big");
77433 +               err = -EINVAL;
77434 +               goto out;
77435 +       }
77437 +       sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
77439 +       ref.low = cpu_to_le32(MFT_REC_MFT);
77440 +       ref.high = 0;
77441 +       ref.seq = cpu_to_le16(1);
77443 +       inode = ntfs_iget5(sb, &ref, NULL);
77445 +       if (IS_ERR(inode))
77446 +               inode = NULL;
77448 +       if (!inode) {
77449 +               /* Try to use mft copy */
77450 +               u64 t64 = sbi->mft.lbo;
77452 +               sbi->mft.lbo = sbi->mft.lbo2;
77453 +               inode = ntfs_iget5(sb, &ref, NULL);
77454 +               sbi->mft.lbo = t64;
77455 +               if (IS_ERR(inode))
77456 +                       inode = NULL;
77457 +       }
77459 +       if (!inode) {
77460 +               err = -EINVAL;
77461 +               ntfs_err(sb, "Failed to load $MFT.");
77462 +               goto out;
77463 +       }
77465 +       sbi->mft.ni = ntfs_i(inode);
77467 +       /* LogFile should not contains attribute list */
77468 +       err = ni_load_all_mi(sbi->mft.ni);
77469 +       if (!err)
77470 +               err = log_replay(ni, &initialized);
77472 +       iput(inode);
77473 +       sbi->mft.ni = NULL;
77475 +       sync_blockdev(sb->s_bdev);
77476 +       invalidate_bdev(sb->s_bdev);
77478 +       if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
77479 +               err = 0;
77480 +               goto out;
77481 +       }
77483 +       if (sb_rdonly(sb) || !initialized)
77484 +               goto out;
77486 +       /* fill LogFile by '-1' if it is initialized */
77487 +       err = ntfs_bio_fill_1(sbi, &ni->file.run);
77489 +out:
77490 +       sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
77492 +       return err;
77496 + * ntfs_query_def
77497 + *
77498 + * returns current ATTR_DEF_ENTRY for given attribute type
77499 + */
77500 +const struct ATTR_DEF_ENTRY *ntfs_query_def(struct ntfs_sb_info *sbi,
77501 +                                           enum ATTR_TYPE type)
77503 +       int type_in = le32_to_cpu(type);
77504 +       size_t min_idx = 0;
77505 +       size_t max_idx = sbi->def_entries - 1;
77507 +       while (min_idx <= max_idx) {
77508 +               size_t i = min_idx + ((max_idx - min_idx) >> 1);
77509 +               const struct ATTR_DEF_ENTRY *entry = sbi->def_table + i;
77510 +               int diff = le32_to_cpu(entry->type) - type_in;
77512 +               if (!diff)
77513 +                       return entry;
77514 +               if (diff < 0)
77515 +                       min_idx = i + 1;
77516 +               else if (i)
77517 +                       max_idx = i - 1;
77518 +               else
77519 +                       return NULL;
77520 +       }
77521 +       return NULL;
77525 + * ntfs_look_for_free_space
77526 + *
77527 + * looks for a free space in bitmap
77528 + */
77529 +int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
77530 +                            CLST *new_lcn, CLST *new_len,
77531 +                            enum ALLOCATE_OPT opt)
77533 +       int err;
77534 +       struct super_block *sb = sbi->sb;
77535 +       size_t a_lcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
77536 +       struct wnd_bitmap *wnd = &sbi->used.bitmap;
77538 +       down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
77539 +       if (opt & ALLOCATE_MFT) {
77540 +               CLST alen;
77542 +               zlen = wnd_zone_len(wnd);
77544 +               if (!zlen) {
77545 +                       err = ntfs_refresh_zone(sbi);
77546 +                       if (err)
77547 +                               goto out;
77549 +                       zlen = wnd_zone_len(wnd);
77551 +                       if (!zlen) {
77552 +                               ntfs_err(sbi->sb,
77553 +                                        "no free space to extend mft");
77554 +                               err = -ENOSPC;
77555 +                               goto out;
77556 +                       }
77557 +               }
77559 +               lcn = wnd_zone_bit(wnd);
77560 +               alen = zlen > len ? len : zlen;
77562 +               wnd_zone_set(wnd, lcn + alen, zlen - alen);
77564 +               err = wnd_set_used(wnd, lcn, alen);
77565 +               if (err)
77566 +                       goto out;
77568 +               *new_lcn = lcn;
77569 +               *new_len = alen;
77570 +               goto ok;
77571 +       }
77573 +       /*
77574 +        * 'Cause cluster 0 is always used this value means that we should use
77575 +        * cached value of 'next_free_lcn' to improve performance
77576 +        */
77577 +       if (!lcn)
77578 +               lcn = sbi->used.next_free_lcn;
77580 +       if (lcn >= wnd->nbits)
77581 +               lcn = 0;
77583 +       *new_len = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &a_lcn);
77584 +       if (*new_len) {
77585 +               *new_lcn = a_lcn;
77586 +               goto ok;
77587 +       }
77589 +       /* Try to use clusters from MftZone */
77590 +       zlen = wnd_zone_len(wnd);
77591 +       zeroes = wnd_zeroes(wnd);
77593 +       /* Check too big request */
77594 +       if (len > zeroes + zlen)
77595 +               goto no_space;
77597 +       if (zlen <= NTFS_MIN_MFT_ZONE)
77598 +               goto no_space;
77600 +       /* How many clusters to cat from zone */
77601 +       zlcn = wnd_zone_bit(wnd);
77602 +       zlen2 = zlen >> 1;
77603 +       ztrim = len > zlen ? zlen : (len > zlen2 ? len : zlen2);
77604 +       new_zlen = zlen - ztrim;
77606 +       if (new_zlen < NTFS_MIN_MFT_ZONE) {
77607 +               new_zlen = NTFS_MIN_MFT_ZONE;
77608 +               if (new_zlen > zlen)
77609 +                       new_zlen = zlen;
77610 +       }
77612 +       wnd_zone_set(wnd, zlcn, new_zlen);
77614 +       /* allocate continues clusters */
77615 +       *new_len =
77616 +               wnd_find(wnd, len, 0,
77617 +                        BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &a_lcn);
77618 +       if (*new_len) {
77619 +               *new_lcn = a_lcn;
77620 +               goto ok;
77621 +       }
77623 +no_space:
77624 +       up_write(&wnd->rw_lock);
77626 +       return -ENOSPC;
77628 +ok:
77629 +       err = 0;
77631 +       ntfs_unmap_meta(sb, *new_lcn, *new_len);
77633 +       if (opt & ALLOCATE_MFT)
77634 +               goto out;
77636 +       /* Set hint for next requests */
77637 +       sbi->used.next_free_lcn = *new_lcn + *new_len;
77639 +out:
77640 +       up_write(&wnd->rw_lock);
77641 +       return err;
77645 + * ntfs_extend_mft
77646 + *
77647 + * allocates additional MFT records
77648 + * sbi->mft.bitmap is locked for write
77649 + *
77650 + * NOTE: recursive:
77651 + *     ntfs_look_free_mft ->
77652 + *     ntfs_extend_mft ->
77653 + *     attr_set_size ->
77654 + *     ni_insert_nonresident ->
77655 + *     ni_insert_attr ->
77656 + *     ni_ins_attr_ext ->
77657 + *     ntfs_look_free_mft ->
77658 + *     ntfs_extend_mft
77659 + * To avoid recursive always allocate space for two new mft records
77660 + * see attrib.c: "at least two mft to avoid recursive loop"
77661 + */
77662 +static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
77664 +       int err;
77665 +       struct ntfs_inode *ni = sbi->mft.ni;
77666 +       size_t new_mft_total;
77667 +       u64 new_mft_bytes, new_bitmap_bytes;
77668 +       struct ATTRIB *attr;
77669 +       struct wnd_bitmap *wnd = &sbi->mft.bitmap;
77671 +       new_mft_total = (wnd->nbits + MFT_INCREASE_CHUNK + 127) & (CLST)~127;
77672 +       new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
77674 +       /* Step 1: Resize $MFT::DATA */
77675 +       down_write(&ni->file.run_lock);
77676 +       err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
77677 +                           new_mft_bytes, NULL, false, &attr);
77679 +       if (err) {
77680 +               up_write(&ni->file.run_lock);
77681 +               goto out;
77682 +       }
77684 +       attr->nres.valid_size = attr->nres.data_size;
77685 +       new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
77686 +       ni->mi.dirty = true;
77688 +       /* Step 2: Resize $MFT::BITMAP */
77689 +       new_bitmap_bytes = bitmap_size(new_mft_total);
77691 +       err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
77692 +                           new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
77694 +       /* Refresh Mft Zone if necessary */
77695 +       down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
77697 +       ntfs_refresh_zone(sbi);
77699 +       up_write(&sbi->used.bitmap.rw_lock);
77700 +       up_write(&ni->file.run_lock);
77702 +       if (err)
77703 +               goto out;
77705 +       err = wnd_extend(wnd, new_mft_total);
77707 +       if (err)
77708 +               goto out;
77710 +       ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
77712 +       err = _ni_write_inode(&ni->vfs_inode, 0);
77713 +out:
77714 +       return err;
77718 + * ntfs_look_free_mft
77719 + *
77720 + * looks for a free MFT record
77721 + */
77722 +int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
77723 +                      struct ntfs_inode *ni, struct mft_inode **mi)
77725 +       int err = 0;
77726 +       size_t zbit, zlen, from, to, fr;
77727 +       size_t mft_total;
77728 +       struct MFT_REF ref;
77729 +       struct super_block *sb = sbi->sb;
77730 +       struct wnd_bitmap *wnd = &sbi->mft.bitmap;
77731 +       u32 ir;
77733 +       static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
77734 +                     MFT_REC_FREE - MFT_REC_RESERVED);
77736 +       if (!mft)
77737 +               down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
77739 +       zlen = wnd_zone_len(wnd);
77741 +       /* Always reserve space for MFT */
77742 +       if (zlen) {
77743 +               if (mft) {
77744 +                       zbit = wnd_zone_bit(wnd);
77745 +                       *rno = zbit;
77746 +                       wnd_zone_set(wnd, zbit + 1, zlen - 1);
77747 +               }
77748 +               goto found;
77749 +       }
77751 +       /* No MFT zone. find the nearest to '0' free MFT */
77752 +       if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
77753 +               /* Resize MFT */
77754 +               mft_total = wnd->nbits;
77756 +               err = ntfs_extend_mft(sbi);
77757 +               if (!err) {
77758 +                       zbit = mft_total;
77759 +                       goto reserve_mft;
77760 +               }
77762 +               if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
77763 +                       goto out;
77765 +               err = 0;
77767 +               /*
77768 +                * Look for free record reserved area [11-16) ==
77769 +                * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
77770 +                * marks it as used
77771 +                */
77772 +               if (!sbi->mft.reserved_bitmap) {
77773 +                       /* Once per session create internal bitmap for 5 bits */
77774 +                       sbi->mft.reserved_bitmap = 0xFF;
77776 +                       ref.high = 0;
77777 +                       for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
77778 +                               struct inode *i;
77779 +                               struct ntfs_inode *ni;
77780 +                               struct MFT_REC *mrec;
77782 +                               ref.low = cpu_to_le32(ir);
77783 +                               ref.seq = cpu_to_le16(ir);
77785 +                               i = ntfs_iget5(sb, &ref, NULL);
77786 +                               if (IS_ERR(i)) {
77787 +next:
77788 +                                       ntfs_notice(
77789 +                                               sb,
77790 +                                               "Invalid reserved record %x",
77791 +                                               ref.low);
77792 +                                       continue;
77793 +                               }
77794 +                               if (is_bad_inode(i)) {
77795 +                                       iput(i);
77796 +                                       goto next;
77797 +                               }
77799 +                               ni = ntfs_i(i);
77801 +                               mrec = ni->mi.mrec;
77803 +                               if (!is_rec_base(mrec))
77804 +                                       goto next;
77806 +                               if (mrec->hard_links)
77807 +                                       goto next;
77809 +                               if (!ni_std(ni))
77810 +                                       goto next;
77812 +                               if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
77813 +                                                NULL, 0, NULL, NULL))
77814 +                                       goto next;
77816 +                               __clear_bit(ir - MFT_REC_RESERVED,
77817 +                                           &sbi->mft.reserved_bitmap);
77818 +                       }
77819 +               }
77821 +               /* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
77822 +               zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
77823 +                                         MFT_REC_FREE, MFT_REC_RESERVED);
77824 +               if (zbit >= MFT_REC_FREE) {
77825 +                       sbi->mft.next_reserved = MFT_REC_FREE;
77826 +                       goto out;
77827 +               }
77829 +               zlen = 1;
77830 +               sbi->mft.next_reserved = zbit;
77831 +       } else {
77832 +reserve_mft:
77833 +               zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
77834 +               if (zbit + zlen > wnd->nbits)
77835 +                       zlen = wnd->nbits - zbit;
77837 +               while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
77838 +                       zlen -= 1;
77840 +               /* [zbit, zbit + zlen) will be used for Mft itself */
77841 +               from = sbi->mft.used;
77842 +               if (from < zbit)
77843 +                       from = zbit;
77844 +               to = zbit + zlen;
77845 +               if (from < to) {
77846 +                       ntfs_clear_mft_tail(sbi, from, to);
77847 +                       sbi->mft.used = to;
77848 +               }
77849 +       }
77851 +       if (mft) {
77852 +               *rno = zbit;
77853 +               zbit += 1;
77854 +               zlen -= 1;
77855 +       }
77857 +       wnd_zone_set(wnd, zbit, zlen);
77859 +found:
77860 +       if (!mft) {
77861 +               /* The request to get record for general purpose */
77862 +               if (sbi->mft.next_free < MFT_REC_USER)
77863 +                       sbi->mft.next_free = MFT_REC_USER;
77865 +               for (;;) {
77866 +                       if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
77867 +                       } else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
77868 +                               sbi->mft.next_free = sbi->mft.bitmap.nbits;
77869 +                       } else {
77870 +                               *rno = fr;
77871 +                               sbi->mft.next_free = *rno + 1;
77872 +                               break;
77873 +                       }
77875 +                       err = ntfs_extend_mft(sbi);
77876 +                       if (err)
77877 +                               goto out;
77878 +               }
77879 +       }
77881 +       if (ni && !ni_add_subrecord(ni, *rno, mi)) {
77882 +               err = -ENOMEM;
77883 +               goto out;
77884 +       }
77886 +       /* We have found a record that are not reserved for next MFT */
77887 +       if (*rno >= MFT_REC_FREE)
77888 +               wnd_set_used(wnd, *rno, 1);
77889 +       else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
77890 +               __set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
77892 +out:
77893 +       if (!mft)
77894 +               up_write(&wnd->rw_lock);
77896 +       return err;
77900 + * ntfs_mark_rec_free
77901 + *
77902 + * marks record as free
77903 + */
77904 +void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno)
77906 +       struct wnd_bitmap *wnd = &sbi->mft.bitmap;
77908 +       down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
77909 +       if (rno >= wnd->nbits)
77910 +               goto out;
77912 +       if (rno >= MFT_REC_FREE) {
77913 +               if (!wnd_is_used(wnd, rno, 1))
77914 +                       ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
77915 +               else
77916 +                       wnd_set_free(wnd, rno, 1);
77917 +       } else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
77918 +               __clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
77919 +       }
77921 +       if (rno < wnd_zone_bit(wnd))
77922 +               wnd_zone_set(wnd, rno, 1);
77923 +       else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
77924 +               sbi->mft.next_free = rno;
77926 +out:
77927 +       up_write(&wnd->rw_lock);
77931 + * ntfs_clear_mft_tail
77932 + *
77933 + * formats empty records [from, to)
77934 + * sbi->mft.bitmap is locked for write
77935 + */
77936 +int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
77938 +       int err;
77939 +       u32 rs;
77940 +       u64 vbo;
77941 +       struct runs_tree *run;
77942 +       struct ntfs_inode *ni;
77944 +       if (from >= to)
77945 +               return 0;
77947 +       rs = sbi->record_size;
77948 +       ni = sbi->mft.ni;
77949 +       run = &ni->file.run;
77951 +       down_read(&ni->file.run_lock);
77952 +       vbo = (u64)from * rs;
77953 +       for (; from < to; from++, vbo += rs) {
77954 +               struct ntfs_buffers nb;
77956 +               err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
77957 +               if (err)
77958 +                       goto out;
77960 +               err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
77961 +               nb_put(&nb);
77962 +               if (err)
77963 +                       goto out;
77964 +       }
77966 +out:
77967 +       sbi->mft.used = from;
77968 +       up_read(&ni->file.run_lock);
77969 +       return err;
77973 + * ntfs_refresh_zone
77974 + *
77975 + * refreshes Mft zone
77976 + * sbi->used.bitmap is locked for rw
77977 + * sbi->mft.bitmap is locked for write
77978 + * sbi->mft.ni->file.run_lock for write
77979 + */
77980 +int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
77982 +       CLST zone_limit, zone_max, lcn, vcn, len;
77983 +       size_t lcn_s, zlen;
77984 +       struct wnd_bitmap *wnd = &sbi->used.bitmap;
77985 +       struct ntfs_inode *ni = sbi->mft.ni;
77987 +       /* Do not change anything unless we have non empty Mft zone */
77988 +       if (wnd_zone_len(wnd))
77989 +               return 0;
77991 +       /*
77992 +        * Compute the mft zone at two steps
77993 +        * It would be nice if we are able to allocate
77994 +        * 1/8 of total clusters for MFT but not more then 512 MB
77995 +        */
77996 +       zone_limit = (512 * 1024 * 1024) >> sbi->cluster_bits;
77997 +       zone_max = wnd->nbits >> 3;
77998 +       if (zone_max > zone_limit)
77999 +               zone_max = zone_limit;
78001 +       vcn = bytes_to_cluster(sbi,
78002 +                              (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
78004 +       if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
78005 +               lcn = SPARSE_LCN;
78007 +       /* We should always find Last Lcn for MFT */
78008 +       if (lcn == SPARSE_LCN)
78009 +               return -EINVAL;
78011 +       lcn_s = lcn + 1;
78013 +       /* Try to allocate clusters after last MFT run */
78014 +       zlen = wnd_find(wnd, zone_max, lcn_s, 0, &lcn_s);
78015 +       if (!zlen) {
78016 +               ntfs_notice(sbi->sb, "MftZone: unavailable");
78017 +               return 0;
78018 +       }
78020 +       /* Truncate too large zone */
78021 +       wnd_zone_set(wnd, lcn_s, zlen);
78023 +       return 0;
78027 + * ntfs_update_mftmirr
78028 + *
78029 + * updates $MFTMirr data
78030 + */
78031 +int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
78033 +       int err;
78034 +       struct super_block *sb = sbi->sb;
78035 +       u32 blocksize = sb->s_blocksize;
78036 +       sector_t block1, block2;
78037 +       u32 bytes;
78039 +       if (!(sbi->flags & NTFS_FLAGS_MFTMIRR))
78040 +               return 0;
78042 +       err = 0;
78043 +       bytes = sbi->mft.recs_mirr << sbi->record_bits;
78044 +       block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
78045 +       block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
78047 +       for (; bytes >= blocksize; bytes -= blocksize) {
78048 +               struct buffer_head *bh1, *bh2;
78050 +               bh1 = sb_bread(sb, block1++);
78051 +               if (!bh1) {
78052 +                       err = -EIO;
78053 +                       goto out;
78054 +               }
78056 +               bh2 = sb_getblk(sb, block2++);
78057 +               if (!bh2) {
78058 +                       put_bh(bh1);
78059 +                       err = -EIO;
78060 +                       goto out;
78061 +               }
78063 +               if (buffer_locked(bh2))
78064 +                       __wait_on_buffer(bh2);
78066 +               lock_buffer(bh2);
78067 +               memcpy(bh2->b_data, bh1->b_data, blocksize);
78068 +               set_buffer_uptodate(bh2);
78069 +               mark_buffer_dirty(bh2);
78070 +               unlock_buffer(bh2);
78072 +               put_bh(bh1);
78073 +               bh1 = NULL;
78075 +               if (wait)
78076 +                       err = sync_dirty_buffer(bh2);
78078 +               put_bh(bh2);
78079 +               if (err)
78080 +                       goto out;
78081 +       }
78083 +       sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
78085 +out:
78086 +       return err;
78090 + * ntfs_set_state
78091 + *
78092 + * mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
78093 + * umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
78094 + * ntfs error: ntfs_set_state(NTFS_DIRTY_ERROR)
78095 + */
78096 +int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
78098 +       int err;
78099 +       struct ATTRIB *attr;
78100 +       struct VOLUME_INFO *info;
78101 +       struct mft_inode *mi;
78102 +       struct ntfs_inode *ni;
78104 +       /*
78105 +        * do not change state if fs was real_dirty
78106 +        * do not change state if fs already dirty(clear)
78107 +        * do not change any thing if mounted read only
78108 +        */
78109 +       if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
78110 +               return 0;
78112 +       /* Check cached value */
78113 +       if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
78114 +           (sbi->volume.flags & VOLUME_FLAG_DIRTY))
78115 +               return 0;
78117 +       ni = sbi->volume.ni;
78118 +       if (!ni)
78119 +               return -EINVAL;
78121 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
78123 +       attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
78124 +       if (!attr) {
78125 +               err = -EINVAL;
78126 +               goto out;
78127 +       }
78129 +       info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
78130 +       if (!info) {
78131 +               err = -EINVAL;
78132 +               goto out;
78133 +       }
78135 +       switch (dirty) {
78136 +       case NTFS_DIRTY_ERROR:
78137 +               ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
78138 +               sbi->volume.real_dirty = true;
78139 +               fallthrough;
78140 +       case NTFS_DIRTY_DIRTY:
78141 +               info->flags |= VOLUME_FLAG_DIRTY;
78142 +               break;
78143 +       case NTFS_DIRTY_CLEAR:
78144 +               info->flags &= ~VOLUME_FLAG_DIRTY;
78145 +               break;
78146 +       }
78147 +       /* cache current volume flags*/
78148 +       sbi->volume.flags = info->flags;
78149 +       mi->dirty = true;
78150 +       err = 0;
78152 +out:
78153 +       ni_unlock(ni);
78154 +       if (err)
78155 +               return err;
78157 +       mark_inode_dirty(&ni->vfs_inode);
78158 +       /*verify(!ntfs_update_mftmirr()); */
78159 +       err = sync_inode_metadata(&ni->vfs_inode, 1);
78161 +       return err;
78165 + * security_hash
78166 + *
78167 + * calculates a hash of security descriptor
78168 + */
78169 +static inline __le32 security_hash(const void *sd, size_t bytes)
78171 +       u32 hash = 0;
78172 +       const __le32 *ptr = sd;
78174 +       bytes >>= 2;
78175 +       while (bytes--)
78176 +               hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
78177 +       return cpu_to_le32(hash);
78180 +int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
78182 +       struct block_device *bdev = sb->s_bdev;
78183 +       u32 blocksize = sb->s_blocksize;
78184 +       u64 block = lbo >> sb->s_blocksize_bits;
78185 +       u32 off = lbo & (blocksize - 1);
78186 +       u32 op = blocksize - off;
78188 +       for (; bytes; block += 1, off = 0, op = blocksize) {
78189 +               struct buffer_head *bh = __bread(bdev, block, blocksize);
78191 +               if (!bh)
78192 +                       return -EIO;
78194 +               if (op > bytes)
78195 +                       op = bytes;
78197 +               memcpy(buffer, bh->b_data + off, op);
78199 +               put_bh(bh);
78201 +               bytes -= op;
78202 +               buffer = Add2Ptr(buffer, op);
78203 +       }
78205 +       return 0;
78208 +int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
78209 +                 const void *buf, int wait)
78211 +       u32 blocksize = sb->s_blocksize;
78212 +       struct block_device *bdev = sb->s_bdev;
78213 +       sector_t block = lbo >> sb->s_blocksize_bits;
78214 +       u32 off = lbo & (blocksize - 1);
78215 +       u32 op = blocksize - off;
78216 +       struct buffer_head *bh;
78218 +       if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
78219 +               wait = 1;
78221 +       for (; bytes; block += 1, off = 0, op = blocksize) {
78222 +               if (op > bytes)
78223 +                       op = bytes;
78225 +               if (op < blocksize) {
78226 +                       bh = __bread(bdev, block, blocksize);
78227 +                       if (!bh) {
78228 +                               ntfs_err(sb, "failed to read block %llx",
78229 +                                        (u64)block);
78230 +                               return -EIO;
78231 +                       }
78232 +               } else {
78233 +                       bh = __getblk(bdev, block, blocksize);
78234 +                       if (!bh)
78235 +                               return -ENOMEM;
78236 +               }
78238 +               if (buffer_locked(bh))
78239 +                       __wait_on_buffer(bh);
78241 +               lock_buffer(bh);
78242 +               if (buf) {
78243 +                       memcpy(bh->b_data + off, buf, op);
78244 +                       buf = Add2Ptr(buf, op);
78245 +               } else {
78246 +                       memset(bh->b_data + off, -1, op);
78247 +               }
78249 +               set_buffer_uptodate(bh);
78250 +               mark_buffer_dirty(bh);
78251 +               unlock_buffer(bh);
78253 +               if (wait) {
78254 +                       int err = sync_dirty_buffer(bh);
78256 +                       if (err) {
78257 +                               ntfs_err(
78258 +                                       sb,
78259 +                                       "failed to sync buffer at block %llx, error %d",
78260 +                                       (u64)block, err);
78261 +                               put_bh(bh);
78262 +                               return err;
78263 +                       }
78264 +               }
78266 +               put_bh(bh);
78268 +               bytes -= op;
78269 +       }
78270 +       return 0;
78273 +int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
78274 +                     u64 vbo, const void *buf, size_t bytes)
78276 +       struct super_block *sb = sbi->sb;
78277 +       u8 cluster_bits = sbi->cluster_bits;
78278 +       u32 off = vbo & sbi->cluster_mask;
78279 +       CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
78280 +       u64 lbo, len;
78281 +       size_t idx;
78283 +       if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
78284 +               return -ENOENT;
78286 +       if (lcn == SPARSE_LCN)
78287 +               return -EINVAL;
78289 +       lbo = ((u64)lcn << cluster_bits) + off;
78290 +       len = ((u64)clen << cluster_bits) - off;
78292 +       for (;;) {
78293 +               u32 op = len < bytes ? len : bytes;
78294 +               int err = ntfs_sb_write(sb, lbo, op, buf, 0);
78296 +               if (err)
78297 +                       return err;
78299 +               bytes -= op;
78300 +               if (!bytes)
78301 +                       break;
78303 +               vcn_next = vcn + clen;
78304 +               if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
78305 +                   vcn != vcn_next)
78306 +                       return -ENOENT;
78308 +               if (lcn == SPARSE_LCN)
78309 +                       return -EINVAL;
78311 +               if (buf)
78312 +                       buf = Add2Ptr(buf, op);
78314 +               lbo = ((u64)lcn << cluster_bits);
78315 +               len = ((u64)clen << cluster_bits);
78316 +       }
78318 +       return 0;
78321 +struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
78322 +                                  const struct runs_tree *run, u64 vbo)
78324 +       struct super_block *sb = sbi->sb;
78325 +       u8 cluster_bits = sbi->cluster_bits;
78326 +       CLST lcn;
78327 +       u64 lbo;
78329 +       if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
78330 +               return ERR_PTR(-ENOENT);
78332 +       lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
78334 +       return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
78337 +int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
78338 +                    u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
78340 +       int err;
78341 +       struct super_block *sb = sbi->sb;
78342 +       u32 blocksize = sb->s_blocksize;
78343 +       u8 cluster_bits = sbi->cluster_bits;
78344 +       u32 off = vbo & sbi->cluster_mask;
78345 +       u32 nbh = 0;
78346 +       CLST vcn_next, vcn = vbo >> cluster_bits;
78347 +       CLST lcn, clen;
78348 +       u64 lbo, len;
78349 +       size_t idx;
78350 +       struct buffer_head *bh;
78352 +       if (!run) {
78353 +               /* first reading of $Volume + $MFTMirr + LogFile goes here*/
78354 +               if (vbo > MFT_REC_VOL * sbi->record_size) {
78355 +                       err = -ENOENT;
78356 +                       goto out;
78357 +               }
78359 +               /* use absolute boot's 'MFTCluster' to read record */
78360 +               lbo = vbo + sbi->mft.lbo;
78361 +               len = sbi->record_size;
78362 +       } else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
78363 +               err = -ENOENT;
78364 +               goto out;
78365 +       } else {
78366 +               if (lcn == SPARSE_LCN) {
78367 +                       err = -EINVAL;
78368 +                       goto out;
78369 +               }
78371 +               lbo = ((u64)lcn << cluster_bits) + off;
78372 +               len = ((u64)clen << cluster_bits) - off;
78373 +       }
78375 +       off = lbo & (blocksize - 1);
78376 +       if (nb) {
78377 +               nb->off = off;
78378 +               nb->bytes = bytes;
78379 +       }
78381 +       for (;;) {
78382 +               u32 len32 = len >= bytes ? bytes : len;
78383 +               sector_t block = lbo >> sb->s_blocksize_bits;
78385 +               do {
78386 +                       u32 op = blocksize - off;
78388 +                       if (op > len32)
78389 +                               op = len32;
78391 +                       bh = ntfs_bread(sb, block);
78392 +                       if (!bh) {
78393 +                               err = -EIO;
78394 +                               goto out;
78395 +                       }
78397 +                       if (buf) {
78398 +                               memcpy(buf, bh->b_data + off, op);
78399 +                               buf = Add2Ptr(buf, op);
78400 +                       }
78402 +                       if (!nb) {
78403 +                               put_bh(bh);
78404 +                       } else if (nbh >= ARRAY_SIZE(nb->bh)) {
78405 +                               err = -EINVAL;
78406 +                               goto out;
78407 +                       } else {
78408 +                               nb->bh[nbh++] = bh;
78409 +                               nb->nbufs = nbh;
78410 +                       }
78412 +                       bytes -= op;
78413 +                       if (!bytes)
78414 +                               return 0;
78415 +                       len32 -= op;
78416 +                       block += 1;
78417 +                       off = 0;
78419 +               } while (len32);
78421 +               vcn_next = vcn + clen;
78422 +               if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
78423 +                   vcn != vcn_next) {
78424 +                       err = -ENOENT;
78425 +                       goto out;
78426 +               }
78428 +               if (lcn == SPARSE_LCN) {
78429 +                       err = -EINVAL;
78430 +                       goto out;
78431 +               }
78433 +               lbo = ((u64)lcn << cluster_bits);
78434 +               len = ((u64)clen << cluster_bits);
78435 +       }
78437 +out:
78438 +       if (!nbh)
78439 +               return err;
78441 +       while (nbh) {
78442 +               put_bh(nb->bh[--nbh]);
78443 +               nb->bh[nbh] = NULL;
78444 +       }
78446 +       nb->nbufs = 0;
78447 +       return err;
78450 +/* Returns < 0 if error, 0 if ok, '-E_NTFS_FIXUP' if need to update fixups */
78451 +int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
78452 +                struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
78453 +                struct ntfs_buffers *nb)
78455 +       int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
78457 +       if (err)
78458 +               return err;
78459 +       return ntfs_fix_post_read(rhdr, nb->bytes, true);
78462 +int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
78463 +               u32 bytes, struct ntfs_buffers *nb)
78465 +       int err = 0;
78466 +       struct super_block *sb = sbi->sb;
78467 +       u32 blocksize = sb->s_blocksize;
78468 +       u8 cluster_bits = sbi->cluster_bits;
78469 +       CLST vcn_next, vcn = vbo >> cluster_bits;
78470 +       u32 off;
78471 +       u32 nbh = 0;
78472 +       CLST lcn, clen;
78473 +       u64 lbo, len;
78474 +       size_t idx;
78476 +       nb->bytes = bytes;
78478 +       if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
78479 +               err = -ENOENT;
78480 +               goto out;
78481 +       }
78483 +       off = vbo & sbi->cluster_mask;
78484 +       lbo = ((u64)lcn << cluster_bits) + off;
78485 +       len = ((u64)clen << cluster_bits) - off;
78487 +       nb->off = off = lbo & (blocksize - 1);
78489 +       for (;;) {
78490 +               u32 len32 = len < bytes ? len : bytes;
78491 +               sector_t block = lbo >> sb->s_blocksize_bits;
78493 +               do {
78494 +                       u32 op;
78495 +                       struct buffer_head *bh;
78497 +                       if (nbh >= ARRAY_SIZE(nb->bh)) {
78498 +                               err = -EINVAL;
78499 +                               goto out;
78500 +                       }
78502 +                       op = blocksize - off;
78503 +                       if (op > len32)
78504 +                               op = len32;
78506 +                       if (op == blocksize) {
78507 +                               bh = sb_getblk(sb, block);
78508 +                               if (!bh) {
78509 +                                       err = -ENOMEM;
78510 +                                       goto out;
78511 +                               }
78512 +                               if (buffer_locked(bh))
78513 +                                       __wait_on_buffer(bh);
78514 +                               set_buffer_uptodate(bh);
78515 +                       } else {
78516 +                               bh = ntfs_bread(sb, block);
78517 +                               if (!bh) {
78518 +                                       err = -EIO;
78519 +                                       goto out;
78520 +                               }
78521 +                       }
78523 +                       nb->bh[nbh++] = bh;
78524 +                       bytes -= op;
78525 +                       if (!bytes) {
78526 +                               nb->nbufs = nbh;
78527 +                               return 0;
78528 +                       }
78530 +                       block += 1;
78531 +                       len32 -= op;
78532 +                       off = 0;
78533 +               } while (len32);
78535 +               vcn_next = vcn + clen;
78536 +               if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
78537 +                   vcn != vcn_next) {
78538 +                       err = -ENOENT;
78539 +                       goto out;
78540 +               }
78542 +               lbo = ((u64)lcn << cluster_bits);
78543 +               len = ((u64)clen << cluster_bits);
78544 +       }
78546 +out:
78547 +       while (nbh) {
78548 +               put_bh(nb->bh[--nbh]);
78549 +               nb->bh[nbh] = NULL;
78550 +       }
78552 +       nb->nbufs = 0;
78554 +       return err;
78557 +int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
78558 +                 struct ntfs_buffers *nb, int sync)
78560 +       int err = 0;
78561 +       struct super_block *sb = sbi->sb;
78562 +       u32 block_size = sb->s_blocksize;
78563 +       u32 bytes = nb->bytes;
78564 +       u32 off = nb->off;
78565 +       u16 fo = le16_to_cpu(rhdr->fix_off);
78566 +       u16 fn = le16_to_cpu(rhdr->fix_num);
78567 +       u32 idx;
78568 +       __le16 *fixup;
78569 +       __le16 sample;
78571 +       if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
78572 +           fn * SECTOR_SIZE > bytes) {
78573 +               return -EINVAL;
78574 +       }
78576 +       for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
78577 +               u32 op = block_size - off;
78578 +               char *bh_data;
78579 +               struct buffer_head *bh = nb->bh[idx];
78580 +               __le16 *ptr, *end_data;
78582 +               if (op > bytes)
78583 +                       op = bytes;
78585 +               if (buffer_locked(bh))
78586 +                       __wait_on_buffer(bh);
78588 +               lock_buffer(nb->bh[idx]);
78590 +               bh_data = bh->b_data + off;
78591 +               end_data = Add2Ptr(bh_data, op);
78592 +               memcpy(bh_data, rhdr, op);
78594 +               if (!idx) {
78595 +                       u16 t16;
78597 +                       fixup = Add2Ptr(bh_data, fo);
78598 +                       sample = *fixup;
78599 +                       t16 = le16_to_cpu(sample);
78600 +                       if (t16 >= 0x7FFF) {
78601 +                               sample = *fixup = cpu_to_le16(1);
78602 +                       } else {
78603 +                               sample = cpu_to_le16(t16 + 1);
78604 +                               *fixup = sample;
78605 +                       }
78607 +                       *(__le16 *)Add2Ptr(rhdr, fo) = sample;
78608 +               }
78610 +               ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
78612 +               do {
78613 +                       *++fixup = *ptr;
78614 +                       *ptr = sample;
78615 +                       ptr += SECTOR_SIZE / sizeof(short);
78616 +               } while (ptr < end_data);
78618 +               set_buffer_uptodate(bh);
78619 +               mark_buffer_dirty(bh);
78620 +               unlock_buffer(bh);
78622 +               if (sync) {
78623 +                       int err2 = sync_dirty_buffer(bh);
78625 +                       if (!err && err2)
78626 +                               err = err2;
78627 +               }
78629 +               bytes -= op;
78630 +               rhdr = Add2Ptr(rhdr, op);
78631 +       }
78633 +       return err;
78636 +static inline struct bio *ntfs_alloc_bio(u32 nr_vecs)
78638 +       struct bio *bio = bio_alloc(GFP_NOFS | __GFP_HIGH, nr_vecs);
78640 +       if (!bio && (current->flags & PF_MEMALLOC)) {
78641 +               while (!bio && (nr_vecs /= 2))
78642 +                       bio = bio_alloc(GFP_NOFS | __GFP_HIGH, nr_vecs);
78643 +       }
78644 +       return bio;
78647 +/* read/write pages from/to disk*/
78648 +int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
78649 +                  struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
78650 +                  u32 op)
78652 +       int err = 0;
78653 +       struct bio *new, *bio = NULL;
78654 +       struct super_block *sb = sbi->sb;
78655 +       struct block_device *bdev = sb->s_bdev;
78656 +       struct page *page;
78657 +       u8 cluster_bits = sbi->cluster_bits;
78658 +       CLST lcn, clen, vcn, vcn_next;
78659 +       u32 add, off, page_idx;
78660 +       u64 lbo, len;
78661 +       size_t run_idx;
78662 +       struct blk_plug plug;
78664 +       if (!bytes)
78665 +               return 0;
78667 +       blk_start_plug(&plug);
78669 +       /* align vbo and bytes to be 512 bytes aligned */
78670 +       lbo = (vbo + bytes + 511) & ~511ull;
78671 +       vbo = vbo & ~511ull;
78672 +       bytes = lbo - vbo;
78674 +       vcn = vbo >> cluster_bits;
78675 +       if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
78676 +               err = -ENOENT;
78677 +               goto out;
78678 +       }
78679 +       off = vbo & sbi->cluster_mask;
78680 +       page_idx = 0;
78681 +       page = pages[0];
78683 +       for (;;) {
78684 +               lbo = ((u64)lcn << cluster_bits) + off;
78685 +               len = ((u64)clen << cluster_bits) - off;
78686 +new_bio:
78687 +               new = ntfs_alloc_bio(nr_pages - page_idx);
78688 +               if (!new) {
78689 +                       err = -ENOMEM;
78690 +                       goto out;
78691 +               }
78692 +               if (bio) {
78693 +                       bio_chain(bio, new);
78694 +                       submit_bio(bio);
78695 +               }
78696 +               bio = new;
78697 +               bio_set_dev(bio, bdev);
78698 +               bio->bi_iter.bi_sector = lbo >> 9;
78699 +               bio->bi_opf = op;
78701 +               while (len) {
78702 +                       off = vbo & (PAGE_SIZE - 1);
78703 +                       add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
78705 +                       if (bio_add_page(bio, page, add, off) < add)
78706 +                               goto new_bio;
78708 +                       if (bytes <= add)
78709 +                               goto out;
78710 +                       bytes -= add;
78711 +                       vbo += add;
78713 +                       if (add + off == PAGE_SIZE) {
78714 +                               page_idx += 1;
78715 +                               if (WARN_ON(page_idx >= nr_pages)) {
78716 +                                       err = -EINVAL;
78717 +                                       goto out;
78718 +                               }
78719 +                               page = pages[page_idx];
78720 +                       }
78722 +                       if (len <= add)
78723 +                               break;
78724 +                       len -= add;
78725 +                       lbo += add;
78726 +               }
78728 +               vcn_next = vcn + clen;
78729 +               if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
78730 +                   vcn != vcn_next) {
78731 +                       err = -ENOENT;
78732 +                       goto out;
78733 +               }
78734 +               off = 0;
78735 +       }
78736 +out:
78737 +       if (bio) {
78738 +               if (!err)
78739 +                       err = submit_bio_wait(bio);
78740 +               bio_put(bio);
78741 +       }
78742 +       blk_finish_plug(&plug);
78744 +       return err;
78748 + * Helper for ntfs_loadlog_and_replay
78749 + * fill on-disk logfile range by (-1)
78750 + * this means empty logfile
78751 + */
78752 +int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
78754 +       int err = 0;
78755 +       struct super_block *sb = sbi->sb;
78756 +       struct block_device *bdev = sb->s_bdev;
78757 +       u8 cluster_bits = sbi->cluster_bits;
78758 +       struct bio *new, *bio = NULL;
78759 +       CLST lcn, clen;
78760 +       u64 lbo, len;
78761 +       size_t run_idx;
78762 +       struct page *fill;
78763 +       void *kaddr;
78764 +       struct blk_plug plug;
78766 +       fill = alloc_page(GFP_KERNEL);
78767 +       if (!fill)
78768 +               return -ENOMEM;
78770 +       kaddr = kmap_atomic(fill);
78771 +       memset(kaddr, -1, PAGE_SIZE);
78772 +       kunmap_atomic(kaddr);
78773 +       flush_dcache_page(fill);
78774 +       lock_page(fill);
78776 +       if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
78777 +               err = -ENOENT;
78778 +               goto out;
78779 +       }
78781 +       /*
78782 +        * TODO: try blkdev_issue_write_same
78783 +        */
78784 +       blk_start_plug(&plug);
78785 +       do {
78786 +               lbo = (u64)lcn << cluster_bits;
78787 +               len = (u64)clen << cluster_bits;
78788 +new_bio:
78789 +               new = ntfs_alloc_bio(BIO_MAX_VECS);
78790 +               if (!new) {
78791 +                       err = -ENOMEM;
78792 +                       break;
78793 +               }
78794 +               if (bio) {
78795 +                       bio_chain(bio, new);
78796 +                       submit_bio(bio);
78797 +               }
78798 +               bio = new;
78799 +               bio_set_dev(bio, bdev);
78800 +               bio->bi_opf = REQ_OP_WRITE;
78801 +               bio->bi_iter.bi_sector = lbo >> 9;
78803 +               for (;;) {
78804 +                       u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
78806 +                       if (bio_add_page(bio, fill, add, 0) < add)
78807 +                               goto new_bio;
78809 +                       lbo += add;
78810 +                       if (len <= add)
78811 +                               break;
78812 +                       len -= add;
78813 +               }
78814 +       } while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
78816 +       if (bio) {
78817 +               if (!err)
78818 +                       err = submit_bio_wait(bio);
78819 +               bio_put(bio);
78820 +       }
78821 +       blk_finish_plug(&plug);
78822 +out:
78823 +       unlock_page(fill);
78824 +       put_page(fill);
78826 +       return err;
78829 +int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
78830 +                   u64 vbo, u64 *lbo, u64 *bytes)
78832 +       u32 off;
78833 +       CLST lcn, len;
78834 +       u8 cluster_bits = sbi->cluster_bits;
78836 +       if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
78837 +               return -ENOENT;
78839 +       off = vbo & sbi->cluster_mask;
78840 +       *lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
78841 +       *bytes = ((u64)len << cluster_bits) - off;
78843 +       return 0;
78846 +struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno, bool dir)
78848 +       int err = 0;
78849 +       struct super_block *sb = sbi->sb;
78850 +       struct inode *inode = new_inode(sb);
78851 +       struct ntfs_inode *ni;
78853 +       if (!inode)
78854 +               return ERR_PTR(-ENOMEM);
78856 +       ni = ntfs_i(inode);
78858 +       err = mi_format_new(&ni->mi, sbi, rno, dir ? RECORD_FLAG_DIR : 0,
78859 +                           false);
78860 +       if (err)
78861 +               goto out;
78863 +       inode->i_ino = rno;
78864 +       if (insert_inode_locked(inode) < 0) {
78865 +               err = -EIO;
78866 +               goto out;
78867 +       }
78869 +out:
78870 +       if (err) {
78871 +               iput(inode);
78872 +               ni = ERR_PTR(err);
78873 +       }
78874 +       return ni;
78878 + * O:BAG:BAD:(A;OICI;FA;;;WD)
78879 + * owner S-1-5-32-544 (Administrators)
78880 + * group S-1-5-32-544 (Administrators)
78881 + * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
78882 + */
78883 +const u8 s_default_security[] __aligned(8) = {
78884 +       0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
78885 +       0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
78886 +       0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
78887 +       0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
78888 +       0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
78889 +       0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
78890 +       0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
78893 +static_assert(sizeof(s_default_security) == 0x50);
78895 +static inline u32 sid_length(const struct SID *sid)
78897 +       return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
78901 + * Thanks Mark Harmstone for idea
78902 + */
78903 +static bool is_acl_valid(const struct ACL *acl, u32 len)
78905 +       const struct ACE_HEADER *ace;
78906 +       u32 i;
78907 +       u16 ace_count, ace_size;
78909 +       if (acl->AclRevision != ACL_REVISION &&
78910 +           acl->AclRevision != ACL_REVISION_DS) {
78911 +               /*
78912 +                * This value should be ACL_REVISION, unless the ACL contains an
78913 +                * object-specific ACE, in which case this value must be ACL_REVISION_DS.
78914 +                * All ACEs in an ACL must be at the same revision level.
78915 +                */
78916 +               return false;
78917 +       }
78919 +       if (acl->Sbz1)
78920 +               return false;
78922 +       if (le16_to_cpu(acl->AclSize) > len)
78923 +               return false;
78925 +       if (acl->Sbz2)
78926 +               return false;
78928 +       len -= sizeof(struct ACL);
78929 +       ace = (struct ACE_HEADER *)&acl[1];
78930 +       ace_count = le16_to_cpu(acl->AceCount);
78932 +       for (i = 0; i < ace_count; i++) {
78933 +               if (len < sizeof(struct ACE_HEADER))
78934 +                       return false;
78936 +               ace_size = le16_to_cpu(ace->AceSize);
78937 +               if (len < ace_size)
78938 +                       return false;
78940 +               len -= ace_size;
78941 +               ace = Add2Ptr(ace, ace_size);
78942 +       }
78944 +       return true;
78947 +bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
78949 +       u32 sd_owner, sd_group, sd_sacl, sd_dacl;
78951 +       if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
78952 +               return false;
78954 +       if (sd->Revision != 1)
78955 +               return false;
78957 +       if (sd->Sbz1)
78958 +               return false;
78960 +       if (!(sd->Control & SE_SELF_RELATIVE))
78961 +               return false;
78963 +       sd_owner = le32_to_cpu(sd->Owner);
78964 +       if (sd_owner) {
78965 +               const struct SID *owner = Add2Ptr(sd, sd_owner);
78967 +               if (sd_owner + offsetof(struct SID, SubAuthority) > len)
78968 +                       return false;
78970 +               if (owner->Revision != 1)
78971 +                       return false;
78973 +               if (sd_owner + sid_length(owner) > len)
78974 +                       return false;
78975 +       }
78977 +       sd_group = le32_to_cpu(sd->Group);
78978 +       if (sd_group) {
78979 +               const struct SID *group = Add2Ptr(sd, sd_group);
78981 +               if (sd_group + offsetof(struct SID, SubAuthority) > len)
78982 +                       return false;
78984 +               if (group->Revision != 1)
78985 +                       return false;
78987 +               if (sd_group + sid_length(group) > len)
78988 +                       return false;
78989 +       }
78991 +       sd_sacl = le32_to_cpu(sd->Sacl);
78992 +       if (sd_sacl) {
78993 +               const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
78995 +               if (sd_sacl + sizeof(struct ACL) > len)
78996 +                       return false;
78998 +               if (!is_acl_valid(sacl, len - sd_sacl))
78999 +                       return false;
79000 +       }
79002 +       sd_dacl = le32_to_cpu(sd->Dacl);
79003 +       if (sd_dacl) {
79004 +               const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
79006 +               if (sd_dacl + sizeof(struct ACL) > len)
79007 +                       return false;
79009 +               if (!is_acl_valid(dacl, len - sd_dacl))
79010 +                       return false;
79011 +       }
79013 +       return true;
79017 + * ntfs_security_init
79018 + *
79019 + * loads and parse $Secure
79020 + */
79021 +int ntfs_security_init(struct ntfs_sb_info *sbi)
79023 +       int err;
79024 +       struct super_block *sb = sbi->sb;
79025 +       struct inode *inode;
79026 +       struct ntfs_inode *ni;
79027 +       struct MFT_REF ref;
79028 +       struct ATTRIB *attr;
79029 +       struct ATTR_LIST_ENTRY *le;
79030 +       u64 sds_size;
79031 +       size_t cnt, off;
79032 +       struct NTFS_DE *ne;
79033 +       struct NTFS_DE_SII *sii_e;
79034 +       struct ntfs_fnd *fnd_sii = NULL;
79035 +       const struct INDEX_ROOT *root_sii;
79036 +       const struct INDEX_ROOT *root_sdh;
79037 +       struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
79038 +       struct ntfs_index *indx_sii = &sbi->security.index_sii;
79040 +       ref.low = cpu_to_le32(MFT_REC_SECURE);
79041 +       ref.high = 0;
79042 +       ref.seq = cpu_to_le16(MFT_REC_SECURE);
79044 +       inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
79045 +       if (IS_ERR(inode)) {
79046 +               err = PTR_ERR(inode);
79047 +               ntfs_err(sb, "Failed to load $Secure.");
79048 +               inode = NULL;
79049 +               goto out;
79050 +       }
79052 +       ni = ntfs_i(inode);
79054 +       le = NULL;
79056 +       attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
79057 +                           ARRAY_SIZE(SDH_NAME), NULL, NULL);
79058 +       if (!attr) {
79059 +               err = -EINVAL;
79060 +               goto out;
79061 +       }
79063 +       root_sdh = resident_data(attr);
79064 +       if (root_sdh->type != ATTR_ZERO ||
79065 +           root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH) {
79066 +               err = -EINVAL;
79067 +               goto out;
79068 +       }
79070 +       err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
79071 +       if (err)
79072 +               goto out;
79074 +       attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
79075 +                           ARRAY_SIZE(SII_NAME), NULL, NULL);
79076 +       if (!attr) {
79077 +               err = -EINVAL;
79078 +               goto out;
79079 +       }
79081 +       root_sii = resident_data(attr);
79082 +       if (root_sii->type != ATTR_ZERO ||
79083 +           root_sii->rule != NTFS_COLLATION_TYPE_UINT) {
79084 +               err = -EINVAL;
79085 +               goto out;
79086 +       }
79088 +       err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
79089 +       if (err)
79090 +               goto out;
79092 +       fnd_sii = fnd_get();
79093 +       if (!fnd_sii) {
79094 +               err = -ENOMEM;
79095 +               goto out;
79096 +       }
79098 +       sds_size = inode->i_size;
79100 +       /* Find the last valid Id */
79101 +       sbi->security.next_id = SECURITY_ID_FIRST;
79102 +       /* Always write new security at the end of bucket */
79103 +       sbi->security.next_off =
79104 +               Quad2Align(sds_size - SecurityDescriptorsBlockSize);
79106 +       cnt = 0;
79107 +       off = 0;
79108 +       ne = NULL;
79110 +       for (;;) {
79111 +               u32 next_id;
79113 +               err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
79114 +               if (err || !ne)
79115 +                       break;
79117 +               sii_e = (struct NTFS_DE_SII *)ne;
79118 +               if (le16_to_cpu(ne->view.data_size) < SIZEOF_SECURITY_HDR)
79119 +                       continue;
79121 +               next_id = le32_to_cpu(sii_e->sec_id) + 1;
79122 +               if (next_id >= sbi->security.next_id)
79123 +                       sbi->security.next_id = next_id;
79125 +               cnt += 1;
79126 +       }
79128 +       sbi->security.ni = ni;
79129 +       inode = NULL;
79130 +out:
79131 +       iput(inode);
79132 +       fnd_put(fnd_sii);
79134 +       return err;
79138 + * ntfs_get_security_by_id
79139 + *
79140 + * reads security descriptor by id
79141 + */
79142 +int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
79143 +                           struct SECURITY_DESCRIPTOR_RELATIVE **sd,
79144 +                           size_t *size)
79146 +       int err;
79147 +       int diff;
79148 +       struct ntfs_inode *ni = sbi->security.ni;
79149 +       struct ntfs_index *indx = &sbi->security.index_sii;
79150 +       void *p = NULL;
79151 +       struct NTFS_DE_SII *sii_e;
79152 +       struct ntfs_fnd *fnd_sii;
79153 +       struct SECURITY_HDR d_security;
79154 +       const struct INDEX_ROOT *root_sii;
79155 +       u32 t32;
79157 +       *sd = NULL;
79159 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
79161 +       fnd_sii = fnd_get();
79162 +       if (!fnd_sii) {
79163 +               err = -ENOMEM;
79164 +               goto out;
79165 +       }
79167 +       root_sii = indx_get_root(indx, ni, NULL, NULL);
79168 +       if (!root_sii) {
79169 +               err = -EINVAL;
79170 +               goto out;
79171 +       }
79173 +       /* Try to find this SECURITY descriptor in SII indexes */
79174 +       err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
79175 +                       NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
79176 +       if (err)
79177 +               goto out;
79179 +       if (diff)
79180 +               goto out;
79182 +       t32 = le32_to_cpu(sii_e->sec_hdr.size);
79183 +       if (t32 < SIZEOF_SECURITY_HDR) {
79184 +               err = -EINVAL;
79185 +               goto out;
79186 +       }
79188 +       if (t32 > SIZEOF_SECURITY_HDR + 0x10000) {
79189 +               /*
79190 +                * looks like too big security. 0x10000 - is arbitrary big number
79191 +                */
79192 +               err = -EFBIG;
79193 +               goto out;
79194 +       }
79196 +       *size = t32 - SIZEOF_SECURITY_HDR;
79198 +       p = ntfs_malloc(*size);
79199 +       if (!p) {
79200 +               err = -ENOMEM;
79201 +               goto out;
79202 +       }
79204 +       err = ntfs_read_run_nb(sbi, &ni->file.run,
79205 +                              le64_to_cpu(sii_e->sec_hdr.off), &d_security,
79206 +                              sizeof(d_security), NULL);
79207 +       if (err)
79208 +               goto out;
79210 +       if (memcmp(&d_security, &sii_e->sec_hdr, SIZEOF_SECURITY_HDR)) {
79211 +               err = -EINVAL;
79212 +               goto out;
79213 +       }
79215 +       err = ntfs_read_run_nb(sbi, &ni->file.run,
79216 +                              le64_to_cpu(sii_e->sec_hdr.off) +
79217 +                                      SIZEOF_SECURITY_HDR,
79218 +                              p, *size, NULL);
79219 +       if (err)
79220 +               goto out;
79222 +       *sd = p;
79223 +       p = NULL;
79225 +out:
79226 +       ntfs_free(p);
79227 +       fnd_put(fnd_sii);
79228 +       ni_unlock(ni);
79230 +       return err;
79234 + * ntfs_insert_security
79235 + *
79236 + * inserts security descriptor into $Secure::SDS
79237 + *
79238 + * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
79239 + * and it contains a mirror copy of each security descriptor.  When writing
79240 + * to a security descriptor at location X, another copy will be written at
79241 + * location (X+256K).
79242 + * When writing a security descriptor that will cross the 256K boundary,
79243 + * the pointer will be advanced by 256K to skip
79244 + * over the mirror portion.
79245 + */
79246 +int ntfs_insert_security(struct ntfs_sb_info *sbi,
79247 +                        const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
79248 +                        u32 size_sd, __le32 *security_id, bool *inserted)
79250 +       int err, diff;
79251 +       struct ntfs_inode *ni = sbi->security.ni;
79252 +       struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
79253 +       struct ntfs_index *indx_sii = &sbi->security.index_sii;
79254 +       struct NTFS_DE_SDH *e;
79255 +       struct NTFS_DE_SDH sdh_e;
79256 +       struct NTFS_DE_SII sii_e;
79257 +       struct SECURITY_HDR *d_security;
79258 +       u32 new_sec_size = size_sd + SIZEOF_SECURITY_HDR;
79259 +       u32 aligned_sec_size = Quad2Align(new_sec_size);
79260 +       struct SECURITY_KEY hash_key;
79261 +       struct ntfs_fnd *fnd_sdh = NULL;
79262 +       const struct INDEX_ROOT *root_sdh;
79263 +       const struct INDEX_ROOT *root_sii;
79264 +       u64 mirr_off, new_sds_size;
79265 +       u32 next, left;
79267 +       static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
79268 +                     SecurityDescriptorsBlockSize);
79270 +       hash_key.hash = security_hash(sd, size_sd);
79271 +       hash_key.sec_id = SECURITY_ID_INVALID;
79273 +       if (inserted)
79274 +               *inserted = false;
79275 +       *security_id = SECURITY_ID_INVALID;
79277 +       /* Allocate a temporal buffer*/
79278 +       d_security = ntfs_zalloc(aligned_sec_size);
79279 +       if (!d_security)
79280 +               return -ENOMEM;
79282 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
79284 +       fnd_sdh = fnd_get();
79285 +       if (!fnd_sdh) {
79286 +               err = -ENOMEM;
79287 +               goto out;
79288 +       }
79290 +       root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
79291 +       if (!root_sdh) {
79292 +               err = -EINVAL;
79293 +               goto out;
79294 +       }
79296 +       root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
79297 +       if (!root_sii) {
79298 +               err = -EINVAL;
79299 +               goto out;
79300 +       }
79302 +       /*
79303 +        * Check if such security already exists
79304 +        * use "SDH" and hash -> to get the offset in "SDS"
79305 +        */
79306 +       err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
79307 +                       &d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
79308 +                       fnd_sdh);
79309 +       if (err)
79310 +               goto out;
79312 +       while (e) {
79313 +               if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
79314 +                       err = ntfs_read_run_nb(sbi, &ni->file.run,
79315 +                                              le64_to_cpu(e->sec_hdr.off),
79316 +                                              d_security, new_sec_size, NULL);
79317 +                       if (err)
79318 +                               goto out;
79320 +                       if (le32_to_cpu(d_security->size) == new_sec_size &&
79321 +                           d_security->key.hash == hash_key.hash &&
79322 +                           !memcmp(d_security + 1, sd, size_sd)) {
79323 +                               *security_id = d_security->key.sec_id;
79324 +                               /*such security already exists*/
79325 +                               err = 0;
79326 +                               goto out;
79327 +                       }
79328 +               }
79330 +               err = indx_find_sort(indx_sdh, ni, root_sdh,
79331 +                                    (struct NTFS_DE **)&e, fnd_sdh);
79332 +               if (err)
79333 +                       goto out;
79335 +               if (!e || e->key.hash != hash_key.hash)
79336 +                       break;
79337 +       }
79339 +       /* Zero unused space */
79340 +       next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
79341 +       left = SecurityDescriptorsBlockSize - next;
79343 +       /* Zero gap until SecurityDescriptorsBlockSize */
79344 +       if (left < new_sec_size) {
79345 +               /* zero "left" bytes from sbi->security.next_off */
79346 +               sbi->security.next_off += SecurityDescriptorsBlockSize + left;
79347 +       }
79349 +       /* Zero tail of previous security */
79350 +       //used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
79352 +       /*
79353 +        * Example:
79354 +        * 0x40438 == ni->vfs_inode.i_size
79355 +        * 0x00440 == sbi->security.next_off
79356 +        * need to zero [0x438-0x440)
79357 +        * if (next > used) {
79358 +        *  u32 tozero = next - used;
79359 +        *  zero "tozero" bytes from sbi->security.next_off - tozero
79360 +        */
79362 +       /* format new security descriptor */
79363 +       d_security->key.hash = hash_key.hash;
79364 +       d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
79365 +       d_security->off = cpu_to_le64(sbi->security.next_off);
79366 +       d_security->size = cpu_to_le32(new_sec_size);
79367 +       memcpy(d_security + 1, sd, size_sd);
79369 +       /* Write main SDS bucket */
79370 +       err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
79371 +                               d_security, aligned_sec_size);
79373 +       if (err)
79374 +               goto out;
79376 +       mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
79377 +       new_sds_size = mirr_off + aligned_sec_size;
79379 +       if (new_sds_size > ni->vfs_inode.i_size) {
79380 +               err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
79381 +                                   ARRAY_SIZE(SDS_NAME), &ni->file.run,
79382 +                                   new_sds_size, &new_sds_size, false, NULL);
79383 +               if (err)
79384 +                       goto out;
79385 +       }
79387 +       /* Write copy SDS bucket */
79388 +       err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
79389 +                               aligned_sec_size);
79390 +       if (err)
79391 +               goto out;
79393 +       /* Fill SII entry */
79394 +       sii_e.de.view.data_off =
79395 +               cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
79396 +       sii_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
79397 +       sii_e.de.view.res = 0;
79398 +       sii_e.de.size = cpu_to_le16(SIZEOF_SII_DIRENTRY);
79399 +       sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
79400 +       sii_e.de.flags = 0;
79401 +       sii_e.de.res = 0;
79402 +       sii_e.sec_id = d_security->key.sec_id;
79403 +       memcpy(&sii_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
79405 +       err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL);
79406 +       if (err)
79407 +               goto out;
79409 +       /* Fill SDH entry */
79410 +       sdh_e.de.view.data_off =
79411 +               cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
79412 +       sdh_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
79413 +       sdh_e.de.view.res = 0;
79414 +       sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
79415 +       sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
79416 +       sdh_e.de.flags = 0;
79417 +       sdh_e.de.res = 0;
79418 +       sdh_e.key.hash = d_security->key.hash;
79419 +       sdh_e.key.sec_id = d_security->key.sec_id;
79420 +       memcpy(&sdh_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
79421 +       sdh_e.magic[0] = cpu_to_le16('I');
79422 +       sdh_e.magic[1] = cpu_to_le16('I');
79424 +       fnd_clear(fnd_sdh);
79425 +       err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
79426 +                               fnd_sdh);
79427 +       if (err)
79428 +               goto out;
79430 +       *security_id = d_security->key.sec_id;
79431 +       if (inserted)
79432 +               *inserted = true;
79434 +       /* Update Id and offset for next descriptor */
79435 +       sbi->security.next_id += 1;
79436 +       sbi->security.next_off += aligned_sec_size;
79438 +out:
79439 +       fnd_put(fnd_sdh);
79440 +       mark_inode_dirty(&ni->vfs_inode);
79441 +       ni_unlock(ni);
79442 +       ntfs_free(d_security);
79444 +       return err;
79448 + * ntfs_reparse_init
79449 + *
79450 + * loads and parse $Extend/$Reparse
79451 + */
79452 +int ntfs_reparse_init(struct ntfs_sb_info *sbi)
79454 +       int err;
79455 +       struct ntfs_inode *ni = sbi->reparse.ni;
79456 +       struct ntfs_index *indx = &sbi->reparse.index_r;
79457 +       struct ATTRIB *attr;
79458 +       struct ATTR_LIST_ENTRY *le;
79459 +       const struct INDEX_ROOT *root_r;
79461 +       if (!ni)
79462 +               return 0;
79464 +       le = NULL;
79465 +       attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
79466 +                           ARRAY_SIZE(SR_NAME), NULL, NULL);
79467 +       if (!attr) {
79468 +               err = -EINVAL;
79469 +               goto out;
79470 +       }
79472 +       root_r = resident_data(attr);
79473 +       if (root_r->type != ATTR_ZERO ||
79474 +           root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
79475 +               err = -EINVAL;
79476 +               goto out;
79477 +       }
79479 +       err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
79480 +       if (err)
79481 +               goto out;
79483 +out:
79484 +       return err;
79488 + * ntfs_objid_init
79489 + *
79490 + * loads and parse $Extend/$ObjId
79491 + */
79492 +int ntfs_objid_init(struct ntfs_sb_info *sbi)
79494 +       int err;
79495 +       struct ntfs_inode *ni = sbi->objid.ni;
79496 +       struct ntfs_index *indx = &sbi->objid.index_o;
79497 +       struct ATTRIB *attr;
79498 +       struct ATTR_LIST_ENTRY *le;
79499 +       const struct INDEX_ROOT *root;
79501 +       if (!ni)
79502 +               return 0;
79504 +       le = NULL;
79505 +       attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
79506 +                           ARRAY_SIZE(SO_NAME), NULL, NULL);
79507 +       if (!attr) {
79508 +               err = -EINVAL;
79509 +               goto out;
79510 +       }
79512 +       root = resident_data(attr);
79513 +       if (root->type != ATTR_ZERO ||
79514 +           root->rule != NTFS_COLLATION_TYPE_UINTS) {
79515 +               err = -EINVAL;
79516 +               goto out;
79517 +       }
79519 +       err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
79520 +       if (err)
79521 +               goto out;
79523 +out:
79524 +       return err;
79527 +int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
79529 +       int err;
79530 +       struct ntfs_inode *ni = sbi->objid.ni;
79531 +       struct ntfs_index *indx = &sbi->objid.index_o;
79533 +       if (!ni)
79534 +               return -EINVAL;
79536 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
79538 +       err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
79540 +       mark_inode_dirty(&ni->vfs_inode);
79541 +       ni_unlock(ni);
79543 +       return err;
79546 +int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
79547 +                       const struct MFT_REF *ref)
79549 +       int err;
79550 +       struct ntfs_inode *ni = sbi->reparse.ni;
79551 +       struct ntfs_index *indx = &sbi->reparse.index_r;
79552 +       struct NTFS_DE_R re;
79554 +       if (!ni)
79555 +               return -EINVAL;
79557 +       memset(&re, 0, sizeof(re));
79559 +       re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
79560 +       re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
79561 +       re.de.key_size = cpu_to_le16(sizeof(re.key));
79563 +       re.key.ReparseTag = rtag;
79564 +       memcpy(&re.key.ref, ref, sizeof(*ref));
79566 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
79568 +       err = indx_insert_entry(indx, ni, &re.de, NULL, NULL);
79570 +       mark_inode_dirty(&ni->vfs_inode);
79571 +       ni_unlock(ni);
79573 +       return err;
79576 +int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
79577 +                       const struct MFT_REF *ref)
79579 +       int err, diff;
79580 +       struct ntfs_inode *ni = sbi->reparse.ni;
79581 +       struct ntfs_index *indx = &sbi->reparse.index_r;
79582 +       struct ntfs_fnd *fnd = NULL;
79583 +       struct REPARSE_KEY rkey;
79584 +       struct NTFS_DE_R *re;
79585 +       struct INDEX_ROOT *root_r;
79587 +       if (!ni)
79588 +               return -EINVAL;
79590 +       rkey.ReparseTag = rtag;
79591 +       rkey.ref = *ref;
79593 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
79595 +       if (rtag) {
79596 +               err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
79597 +               goto out1;
79598 +       }
79600 +       fnd = fnd_get();
79601 +       if (!fnd) {
79602 +               err = -ENOMEM;
79603 +               goto out1;
79604 +       }
79606 +       root_r = indx_get_root(indx, ni, NULL, NULL);
79607 +       if (!root_r) {
79608 +               err = -EINVAL;
79609 +               goto out;
79610 +       }
79612 +       /* 1 - forces to ignore rkey.ReparseTag when comparing keys */
79613 +       err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
79614 +                       (struct NTFS_DE **)&re, fnd);
79615 +       if (err)
79616 +               goto out;
79618 +       if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
79619 +               /* Impossible. Looks like volume corrupt?*/
79620 +               goto out;
79621 +       }
79623 +       memcpy(&rkey, &re->key, sizeof(rkey));
79625 +       fnd_put(fnd);
79626 +       fnd = NULL;
79628 +       err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
79629 +       if (err)
79630 +               goto out;
79632 +out:
79633 +       fnd_put(fnd);
79635 +out1:
79636 +       mark_inode_dirty(&ni->vfs_inode);
79637 +       ni_unlock(ni);
79639 +       return err;
79642 +static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
79643 +                                         CLST len)
79645 +       ntfs_unmap_meta(sbi->sb, lcn, len);
79646 +       ntfs_discard(sbi, lcn, len);
79649 +void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
79651 +       CLST end, i;
79652 +       struct wnd_bitmap *wnd = &sbi->used.bitmap;
79654 +       down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
79655 +       if (!wnd_is_used(wnd, lcn, len)) {
79656 +               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
79658 +               end = lcn + len;
79659 +               len = 0;
79660 +               for (i = lcn; i < end; i++) {
79661 +                       if (wnd_is_used(wnd, i, 1)) {
79662 +                               if (!len)
79663 +                                       lcn = i;
79664 +                               len += 1;
79665 +                               continue;
79666 +                       }
79668 +                       if (!len)
79669 +                               continue;
79671 +                       if (trim)
79672 +                               ntfs_unmap_and_discard(sbi, lcn, len);
79674 +                       wnd_set_free(wnd, lcn, len);
79675 +                       len = 0;
79676 +               }
79678 +               if (!len)
79679 +                       goto out;
79680 +       }
79682 +       if (trim)
79683 +               ntfs_unmap_and_discard(sbi, lcn, len);
79684 +       wnd_set_free(wnd, lcn, len);
79686 +out:
79687 +       up_write(&wnd->rw_lock);
79691 + * run_deallocate
79692 + *
79693 + * deallocate clusters
79694 + */
79695 +int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim)
79697 +       CLST lcn, len;
79698 +       size_t idx = 0;
79700 +       while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
79701 +               if (lcn == SPARSE_LCN)
79702 +                       continue;
79704 +               mark_as_free_ex(sbi, lcn, len, trim);
79705 +       }
79707 +       return 0;
79709 diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
79710 new file mode 100644
79711 index 000000000000..931a7241ef00
79712 --- /dev/null
79713 +++ b/fs/ntfs3/index.c
79714 @@ -0,0 +1,2641 @@
79715 +// SPDX-License-Identifier: GPL-2.0
79717 + *
79718 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
79719 + *
79720 + */
79722 +#include <linux/blkdev.h>
79723 +#include <linux/buffer_head.h>
79724 +#include <linux/fs.h>
79725 +#include <linux/nls.h>
79727 +#include "debug.h"
79728 +#include "ntfs.h"
79729 +#include "ntfs_fs.h"
79731 +static const struct INDEX_NAMES {
79732 +       const __le16 *name;
79733 +       u8 name_len;
79734 +} s_index_names[INDEX_MUTEX_TOTAL] = {
79735 +       { I30_NAME, ARRAY_SIZE(I30_NAME) }, { SII_NAME, ARRAY_SIZE(SII_NAME) },
79736 +       { SDH_NAME, ARRAY_SIZE(SDH_NAME) }, { SO_NAME, ARRAY_SIZE(SO_NAME) },
79737 +       { SQ_NAME, ARRAY_SIZE(SQ_NAME) },   { SR_NAME, ARRAY_SIZE(SR_NAME) },
79741 + * compare two names in index
79742 + * if l1 != 0
79743 + *   both names are little endian on-disk ATTR_FILE_NAME structs
79744 + * else
79745 + *   key1 - cpu_str, key2 - ATTR_FILE_NAME
79746 + */
79747 +static int cmp_fnames(const void *key1, size_t l1, const void *key2, size_t l2,
79748 +                     const void *data)
79750 +       const struct ATTR_FILE_NAME *f2 = key2;
79751 +       const struct ntfs_sb_info *sbi = data;
79752 +       const struct ATTR_FILE_NAME *f1;
79753 +       u16 fsize2;
79754 +       bool both_case;
79756 +       if (l2 <= offsetof(struct ATTR_FILE_NAME, name))
79757 +               return -1;
79759 +       fsize2 = fname_full_size(f2);
79760 +       if (l2 < fsize2)
79761 +               return -1;
79763 +       both_case = f2->type != FILE_NAME_DOS /*&& !sbi->options.nocase*/;
79764 +       if (!l1) {
79765 +               const struct le_str *s2 = (struct le_str *)&f2->name_len;
79767 +               /*
79768 +                * If names are equal (case insensitive)
79769 +                * try to compare it case sensitive
79770 +                */
79771 +               return ntfs_cmp_names_cpu(key1, s2, sbi->upcase, both_case);
79772 +       }
79774 +       f1 = key1;
79775 +       return ntfs_cmp_names(f1->name, f1->name_len, f2->name, f2->name_len,
79776 +                             sbi->upcase, both_case);
79779 +/* $SII of $Secure and $Q of Quota */
79780 +static int cmp_uint(const void *key1, size_t l1, const void *key2, size_t l2,
79781 +                   const void *data)
79783 +       const u32 *k1 = key1;
79784 +       const u32 *k2 = key2;
79786 +       if (l2 < sizeof(u32))
79787 +               return -1;
79789 +       if (*k1 < *k2)
79790 +               return -1;
79791 +       if (*k1 > *k2)
79792 +               return 1;
79793 +       return 0;
79796 +/* $SDH of $Secure */
79797 +static int cmp_sdh(const void *key1, size_t l1, const void *key2, size_t l2,
79798 +                  const void *data)
79800 +       const struct SECURITY_KEY *k1 = key1;
79801 +       const struct SECURITY_KEY *k2 = key2;
79802 +       u32 t1, t2;
79804 +       if (l2 < sizeof(struct SECURITY_KEY))
79805 +               return -1;
79807 +       t1 = le32_to_cpu(k1->hash);
79808 +       t2 = le32_to_cpu(k2->hash);
79810 +       /* First value is a hash value itself */
79811 +       if (t1 < t2)
79812 +               return -1;
79813 +       if (t1 > t2)
79814 +               return 1;
79816 +       /* Second value is security Id */
79817 +       if (data) {
79818 +               t1 = le32_to_cpu(k1->sec_id);
79819 +               t2 = le32_to_cpu(k2->sec_id);
79820 +               if (t1 < t2)
79821 +                       return -1;
79822 +               if (t1 > t2)
79823 +                       return 1;
79824 +       }
79826 +       return 0;
79829 +/* $O of ObjId and "$R" for Reparse */
79830 +static int cmp_uints(const void *key1, size_t l1, const void *key2, size_t l2,
79831 +                    const void *data)
79833 +       const __le32 *k1 = key1;
79834 +       const __le32 *k2 = key2;
79835 +       size_t count;
79837 +       if ((size_t)data == 1) {
79838 +               /*
79839 +                * ni_delete_all -> ntfs_remove_reparse -> delete all with this reference
79840 +                * k1, k2 - pointers to REPARSE_KEY
79841 +                */
79843 +               k1 += 1; // skip REPARSE_KEY.ReparseTag
79844 +               k2 += 1; // skip REPARSE_KEY.ReparseTag
79845 +               if (l2 <= sizeof(int))
79846 +                       return -1;
79847 +               l2 -= sizeof(int);
79848 +               if (l1 <= sizeof(int))
79849 +                       return 1;
79850 +               l1 -= sizeof(int);
79851 +       }
79853 +       if (l2 < sizeof(int))
79854 +               return -1;
79856 +       for (count = min(l1, l2) >> 2; count > 0; --count, ++k1, ++k2) {
79857 +               u32 t1 = le32_to_cpu(*k1);
79858 +               u32 t2 = le32_to_cpu(*k2);
79860 +               if (t1 > t2)
79861 +                       return 1;
79862 +               if (t1 < t2)
79863 +                       return -1;
79864 +       }
79866 +       if (l1 > l2)
79867 +               return 1;
79868 +       if (l1 < l2)
79869 +               return -1;
79871 +       return 0;
79874 +static inline NTFS_CMP_FUNC get_cmp_func(const struct INDEX_ROOT *root)
79876 +       switch (root->type) {
79877 +       case ATTR_NAME:
79878 +               if (root->rule == NTFS_COLLATION_TYPE_FILENAME)
79879 +                       return &cmp_fnames;
79880 +               break;
79881 +       case ATTR_ZERO:
79882 +               switch (root->rule) {
79883 +               case NTFS_COLLATION_TYPE_UINT:
79884 +                       return &cmp_uint;
79885 +               case NTFS_COLLATION_TYPE_SECURITY_HASH:
79886 +                       return &cmp_sdh;
79887 +               case NTFS_COLLATION_TYPE_UINTS:
79888 +                       return &cmp_uints;
79889 +               default:
79890 +                       break;
79891 +               }
79892 +       default:
79893 +               break;
79894 +       }
79896 +       return NULL;
79899 +struct bmp_buf {
79900 +       struct ATTRIB *b;
79901 +       struct mft_inode *mi;
79902 +       struct buffer_head *bh;
79903 +       ulong *buf;
79904 +       size_t bit;
79905 +       u32 nbits;
79906 +       u64 new_valid;
79909 +static int bmp_buf_get(struct ntfs_index *indx, struct ntfs_inode *ni,
79910 +                      size_t bit, struct bmp_buf *bbuf)
79912 +       struct ATTRIB *b;
79913 +       size_t data_size, valid_size, vbo, off = bit >> 3;
79914 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
79915 +       CLST vcn = off >> sbi->cluster_bits;
79916 +       struct ATTR_LIST_ENTRY *le = NULL;
79917 +       struct buffer_head *bh;
79918 +       struct super_block *sb;
79919 +       u32 blocksize;
79920 +       const struct INDEX_NAMES *in = &s_index_names[indx->type];
79922 +       bbuf->bh = NULL;
79924 +       b = ni_find_attr(ni, NULL, &le, ATTR_BITMAP, in->name, in->name_len,
79925 +                        &vcn, &bbuf->mi);
79926 +       bbuf->b = b;
79927 +       if (!b)
79928 +               return -EINVAL;
79930 +       if (!b->non_res) {
79931 +               data_size = le32_to_cpu(b->res.data_size);
79933 +               if (off >= data_size)
79934 +                       return -EINVAL;
79936 +               bbuf->buf = (ulong *)resident_data(b);
79937 +               bbuf->bit = 0;
79938 +               bbuf->nbits = data_size * 8;
79940 +               return 0;
79941 +       }
79943 +       data_size = le64_to_cpu(b->nres.data_size);
79944 +       if (WARN_ON(off >= data_size)) {
79945 +               /* looks like filesystem error */
79946 +               return -EINVAL;
79947 +       }
79949 +       valid_size = le64_to_cpu(b->nres.valid_size);
79951 +       bh = ntfs_bread_run(sbi, &indx->bitmap_run, off);
79952 +       if (!bh)
79953 +               return -EIO;
79955 +       if (IS_ERR(bh))
79956 +               return PTR_ERR(bh);
79958 +       bbuf->bh = bh;
79960 +       if (buffer_locked(bh))
79961 +               __wait_on_buffer(bh);
79963 +       lock_buffer(bh);
79965 +       sb = sbi->sb;
79966 +       blocksize = sb->s_blocksize;
79968 +       vbo = off & ~(size_t)sbi->block_mask;
79970 +       bbuf->new_valid = vbo + blocksize;
79971 +       if (bbuf->new_valid <= valid_size)
79972 +               bbuf->new_valid = 0;
79973 +       else if (bbuf->new_valid > data_size)
79974 +               bbuf->new_valid = data_size;
79976 +       if (vbo >= valid_size) {
79977 +               memset(bh->b_data, 0, blocksize);
79978 +       } else if (vbo + blocksize > valid_size) {
79979 +               u32 voff = valid_size & sbi->block_mask;
79981 +               memset(bh->b_data + voff, 0, blocksize - voff);
79982 +       }
79984 +       bbuf->buf = (ulong *)bh->b_data;
79985 +       bbuf->bit = 8 * (off & ~(size_t)sbi->block_mask);
79986 +       bbuf->nbits = 8 * blocksize;
79988 +       return 0;
79991 +static void bmp_buf_put(struct bmp_buf *bbuf, bool dirty)
79993 +       struct buffer_head *bh = bbuf->bh;
79994 +       struct ATTRIB *b = bbuf->b;
79996 +       if (!bh) {
79997 +               if (b && !b->non_res && dirty)
79998 +                       bbuf->mi->dirty = true;
79999 +               return;
80000 +       }
80002 +       if (!dirty)
80003 +               goto out;
80005 +       if (bbuf->new_valid) {
80006 +               b->nres.valid_size = cpu_to_le64(bbuf->new_valid);
80007 +               bbuf->mi->dirty = true;
80008 +       }
80010 +       set_buffer_uptodate(bh);
80011 +       mark_buffer_dirty(bh);
80013 +out:
80014 +       unlock_buffer(bh);
80015 +       put_bh(bh);
80019 + * indx_mark_used
80020 + *
80021 + * marks the bit 'bit' as used
80022 + */
80023 +static int indx_mark_used(struct ntfs_index *indx, struct ntfs_inode *ni,
80024 +                         size_t bit)
80026 +       int err;
80027 +       struct bmp_buf bbuf;
80029 +       err = bmp_buf_get(indx, ni, bit, &bbuf);
80030 +       if (err)
80031 +               return err;
80033 +       __set_bit(bit - bbuf.bit, bbuf.buf);
80035 +       bmp_buf_put(&bbuf, true);
80037 +       return 0;
80041 + * indx_mark_free
80042 + *
80043 + * the bit 'bit' as free
80044 + */
80045 +static int indx_mark_free(struct ntfs_index *indx, struct ntfs_inode *ni,
80046 +                         size_t bit)
80048 +       int err;
80049 +       struct bmp_buf bbuf;
80051 +       err = bmp_buf_get(indx, ni, bit, &bbuf);
80052 +       if (err)
80053 +               return err;
80055 +       __clear_bit(bit - bbuf.bit, bbuf.buf);
80057 +       bmp_buf_put(&bbuf, true);
80059 +       return 0;
80063 + * if ntfs_readdir calls this function (indx_used_bit -> scan_nres_bitmap),
80064 + * inode is shared locked and no ni_lock
80065 + * use rw_semaphore for read/write access to bitmap_run
80066 + */
80067 +static int scan_nres_bitmap(struct ntfs_inode *ni, struct ATTRIB *bitmap,
80068 +                           struct ntfs_index *indx, size_t from,
80069 +                           bool (*fn)(const ulong *buf, u32 bit, u32 bits,
80070 +                                      size_t *ret),
80071 +                           size_t *ret)
80073 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
80074 +       struct super_block *sb = sbi->sb;
80075 +       struct runs_tree *run = &indx->bitmap_run;
80076 +       struct rw_semaphore *lock = &indx->run_lock;
80077 +       u32 nbits = sb->s_blocksize * 8;
80078 +       u32 blocksize = sb->s_blocksize;
80079 +       u64 valid_size = le64_to_cpu(bitmap->nres.valid_size);
80080 +       u64 data_size = le64_to_cpu(bitmap->nres.data_size);
80081 +       sector_t eblock = bytes_to_block(sb, data_size);
80082 +       size_t vbo = from >> 3;
80083 +       sector_t blk = (vbo & sbi->cluster_mask) >> sb->s_blocksize_bits;
80084 +       sector_t vblock = vbo >> sb->s_blocksize_bits;
80085 +       sector_t blen, block;
80086 +       CLST lcn, clen, vcn, vcn_next;
80087 +       size_t idx;
80088 +       struct buffer_head *bh;
80089 +       bool ok;
80091 +       *ret = MINUS_ONE_T;
80093 +       if (vblock >= eblock)
80094 +               return 0;
80096 +       from &= nbits - 1;
80097 +       vcn = vbo >> sbi->cluster_bits;
80099 +       down_read(lock);
80100 +       ok = run_lookup_entry(run, vcn, &lcn, &clen, &idx);
80101 +       up_read(lock);
80103 +next_run:
80104 +       if (!ok) {
80105 +               int err;
80106 +               const struct INDEX_NAMES *name = &s_index_names[indx->type];
80108 +               down_write(lock);
80109 +               err = attr_load_runs_vcn(ni, ATTR_BITMAP, name->name,
80110 +                                        name->name_len, run, vcn);
80111 +               up_write(lock);
80112 +               if (err)
80113 +                       return err;
80114 +               down_read(lock);
80115 +               ok = run_lookup_entry(run, vcn, &lcn, &clen, &idx);
80116 +               up_read(lock);
80117 +               if (!ok)
80118 +                       return -EINVAL;
80119 +       }
80121 +       blen = (sector_t)clen * sbi->blocks_per_cluster;
80122 +       block = (sector_t)lcn * sbi->blocks_per_cluster;
80124 +       for (; blk < blen; blk++, from = 0) {
80125 +               bh = ntfs_bread(sb, block + blk);
80126 +               if (!bh)
80127 +                       return -EIO;
80129 +               vbo = (u64)vblock << sb->s_blocksize_bits;
80130 +               if (vbo >= valid_size) {
80131 +                       memset(bh->b_data, 0, blocksize);
80132 +               } else if (vbo + blocksize > valid_size) {
80133 +                       u32 voff = valid_size & sbi->block_mask;
80135 +                       memset(bh->b_data + voff, 0, blocksize - voff);
80136 +               }
80138 +               if (vbo + blocksize > data_size)
80139 +                       nbits = 8 * (data_size - vbo);
80141 +               ok = nbits > from ? (*fn)((ulong *)bh->b_data, from, nbits, ret)
80142 +                                 : false;
80143 +               put_bh(bh);
80145 +               if (ok) {
80146 +                       *ret += 8 * vbo;
80147 +                       return 0;
80148 +               }
80150 +               if (++vblock >= eblock) {
80151 +                       *ret = MINUS_ONE_T;
80152 +                       return 0;
80153 +               }
80154 +       }
80155 +       blk = 0;
80156 +       vcn_next = vcn + clen;
80157 +       down_read(lock);
80158 +       ok = run_get_entry(run, ++idx, &vcn, &lcn, &clen) && vcn == vcn_next;
80159 +       if (!ok)
80160 +               vcn = vcn_next;
80161 +       up_read(lock);
80162 +       goto next_run;
80165 +static bool scan_for_free(const ulong *buf, u32 bit, u32 bits, size_t *ret)
80167 +       size_t pos = find_next_zero_bit(buf, bits, bit);
80169 +       if (pos >= bits)
80170 +               return false;
80171 +       *ret = pos;
80172 +       return true;
80176 + * indx_find_free
80177 + *
80178 + * looks for free bit
80179 + * returns -1 if no free bits
80180 + */
80181 +static int indx_find_free(struct ntfs_index *indx, struct ntfs_inode *ni,
80182 +                         size_t *bit, struct ATTRIB **bitmap)
80184 +       struct ATTRIB *b;
80185 +       struct ATTR_LIST_ENTRY *le = NULL;
80186 +       const struct INDEX_NAMES *in = &s_index_names[indx->type];
80187 +       int err;
80189 +       b = ni_find_attr(ni, NULL, &le, ATTR_BITMAP, in->name, in->name_len,
80190 +                        NULL, NULL);
80192 +       if (!b)
80193 +               return -ENOENT;
80195 +       *bitmap = b;
80196 +       *bit = MINUS_ONE_T;
80198 +       if (!b->non_res) {
80199 +               u32 nbits = 8 * le32_to_cpu(b->res.data_size);
80200 +               size_t pos = find_next_zero_bit(resident_data(b), nbits, 0);
80202 +               if (pos < nbits)
80203 +                       *bit = pos;
80204 +       } else {
80205 +               err = scan_nres_bitmap(ni, b, indx, 0, &scan_for_free, bit);
80207 +               if (err)
80208 +                       return err;
80209 +       }
80211 +       return 0;
80214 +static bool scan_for_used(const ulong *buf, u32 bit, u32 bits, size_t *ret)
80216 +       size_t pos = find_next_bit(buf, bits, bit);
80218 +       if (pos >= bits)
80219 +               return false;
80220 +       *ret = pos;
80221 +       return true;
80225 + * indx_used_bit
80226 + *
80227 + * looks for used bit
80228 + * returns MINUS_ONE_T if no used bits
80229 + */
80230 +int indx_used_bit(struct ntfs_index *indx, struct ntfs_inode *ni, size_t *bit)
80232 +       struct ATTRIB *b;
80233 +       struct ATTR_LIST_ENTRY *le = NULL;
80234 +       size_t from = *bit;
80235 +       const struct INDEX_NAMES *in = &s_index_names[indx->type];
80236 +       int err;
80238 +       b = ni_find_attr(ni, NULL, &le, ATTR_BITMAP, in->name, in->name_len,
80239 +                        NULL, NULL);
80241 +       if (!b)
80242 +               return -ENOENT;
80244 +       *bit = MINUS_ONE_T;
80246 +       if (!b->non_res) {
80247 +               u32 nbits = le32_to_cpu(b->res.data_size) * 8;
80248 +               size_t pos = find_next_bit(resident_data(b), nbits, from);
80250 +               if (pos < nbits)
80251 +                       *bit = pos;
80252 +       } else {
80253 +               err = scan_nres_bitmap(ni, b, indx, from, &scan_for_used, bit);
80254 +               if (err)
80255 +                       return err;
80256 +       }
80258 +       return 0;
80262 + * hdr_find_split
80263 + *
80264 + * finds a point at which the index allocation buffer would like to
80265 + * be split.
80266 + * NOTE: This function should never return 'END' entry NULL returns on error
80267 + */
80268 +static const struct NTFS_DE *hdr_find_split(const struct INDEX_HDR *hdr)
80270 +       size_t o;
80271 +       const struct NTFS_DE *e = hdr_first_de(hdr);
80272 +       u32 used_2 = le32_to_cpu(hdr->used) >> 1;
80273 +       u16 esize = le16_to_cpu(e->size);
80275 +       if (!e || de_is_last(e))
80276 +               return NULL;
80278 +       for (o = le32_to_cpu(hdr->de_off) + esize; o < used_2; o += esize) {
80279 +               const struct NTFS_DE *p = e;
80281 +               e = Add2Ptr(hdr, o);
80283 +               /* We must not return END entry */
80284 +               if (de_is_last(e))
80285 +                       return p;
80287 +               esize = le16_to_cpu(e->size);
80288 +       }
80290 +       return e;
80294 + * hdr_insert_head
80295 + *
80296 + * inserts some entries at the beginning of the buffer.
80297 + * It is used to insert entries into a newly-created buffer.
80298 + */
80299 +static const struct NTFS_DE *hdr_insert_head(struct INDEX_HDR *hdr,
80300 +                                            const void *ins, u32 ins_bytes)
80302 +       u32 to_move;
80303 +       struct NTFS_DE *e = hdr_first_de(hdr);
80304 +       u32 used = le32_to_cpu(hdr->used);
80306 +       if (!e)
80307 +               return NULL;
80309 +       /* Now we just make room for the inserted entries and jam it in. */
80310 +       to_move = used - le32_to_cpu(hdr->de_off);
80311 +       memmove(Add2Ptr(e, ins_bytes), e, to_move);
80312 +       memcpy(e, ins, ins_bytes);
80313 +       hdr->used = cpu_to_le32(used + ins_bytes);
80315 +       return e;
80318 +void fnd_clear(struct ntfs_fnd *fnd)
80320 +       int i;
80322 +       for (i = 0; i < fnd->level; i++) {
80323 +               struct indx_node *n = fnd->nodes[i];
80325 +               if (!n)
80326 +                       continue;
80328 +               put_indx_node(n);
80329 +               fnd->nodes[i] = NULL;
80330 +       }
80331 +       fnd->level = 0;
80332 +       fnd->root_de = NULL;
80335 +static int fnd_push(struct ntfs_fnd *fnd, struct indx_node *n,
80336 +                   struct NTFS_DE *e)
80338 +       int i;
80340 +       i = fnd->level;
80341 +       if (i < 0 || i >= ARRAY_SIZE(fnd->nodes))
80342 +               return -EINVAL;
80343 +       fnd->nodes[i] = n;
80344 +       fnd->de[i] = e;
80345 +       fnd->level += 1;
80346 +       return 0;
80349 +static struct indx_node *fnd_pop(struct ntfs_fnd *fnd)
80351 +       struct indx_node *n;
80352 +       int i = fnd->level;
80354 +       i -= 1;
80355 +       n = fnd->nodes[i];
80356 +       fnd->nodes[i] = NULL;
80357 +       fnd->level = i;
80359 +       return n;
80362 +static bool fnd_is_empty(struct ntfs_fnd *fnd)
80364 +       if (!fnd->level)
80365 +               return !fnd->root_de;
80367 +       return !fnd->de[fnd->level - 1];
80371 + * hdr_find_e
80372 + *
80373 + * locates an entry the index buffer.
80374 + * If no matching entry is found, it returns the first entry which is greater
80375 + * than the desired entry If the search key is greater than all the entries the
80376 + * buffer, it returns the 'end' entry. This function does a binary search of the
80377 + * current index buffer, for the first entry that is <= to the search value
80378 + * Returns NULL if error
80379 + */
80380 +static struct NTFS_DE *hdr_find_e(const struct ntfs_index *indx,
80381 +                                 const struct INDEX_HDR *hdr, const void *key,
80382 +                                 size_t key_len, const void *ctx, int *diff)
80384 +       struct NTFS_DE *e;
80385 +       NTFS_CMP_FUNC cmp = indx->cmp;
80386 +       u32 e_size, e_key_len;
80387 +       u32 end = le32_to_cpu(hdr->used);
80388 +       u32 off = le32_to_cpu(hdr->de_off);
80390 +#ifdef NTFS3_INDEX_BINARY_SEARCH
80391 +       int max_idx = 0, fnd, min_idx;
80392 +       int nslots = 64;
80393 +       u16 *offs;
80395 +       if (end > 0x10000)
80396 +               goto next;
80398 +       offs = ntfs_malloc(sizeof(u16) * nslots);
80399 +       if (!offs)
80400 +               goto next;
80402 +       /* use binary search algorithm */
80403 +next1:
80404 +       if (off + sizeof(struct NTFS_DE) > end) {
80405 +               e = NULL;
80406 +               goto out1;
80407 +       }
80408 +       e = Add2Ptr(hdr, off);
80409 +       e_size = le16_to_cpu(e->size);
80411 +       if (e_size < sizeof(struct NTFS_DE) || off + e_size > end) {
80412 +               e = NULL;
80413 +               goto out1;
80414 +       }
80416 +       if (max_idx >= nslots) {
80417 +               u16 *ptr;
80418 +               int new_slots = QuadAlign(2 * nslots);
80420 +               ptr = ntfs_malloc(sizeof(u16) * new_slots);
80421 +               if (ptr)
80422 +                       memcpy(ptr, offs, sizeof(u16) * max_idx);
80423 +               ntfs_free(offs);
80424 +               offs = ptr;
80425 +               nslots = new_slots;
80426 +               if (!ptr)
80427 +                       goto next;
80428 +       }
80430 +       /* Store entry table */
80431 +       offs[max_idx] = off;
80433 +       if (!de_is_last(e)) {
80434 +               off += e_size;
80435 +               max_idx += 1;
80436 +               goto next1;
80437 +       }
80439 +       /*
80440 +        * Table of pointers is created
80441 +        * Use binary search to find entry that is <= to the search value
80442 +        */
80443 +       fnd = -1;
80444 +       min_idx = 0;
80446 +       while (min_idx <= max_idx) {
80447 +               int mid_idx = min_idx + ((max_idx - min_idx) >> 1);
80448 +               int diff2;
80450 +               e = Add2Ptr(hdr, offs[mid_idx]);
80452 +               e_key_len = le16_to_cpu(e->key_size);
80454 +               diff2 = (*cmp)(key, key_len, e + 1, e_key_len, ctx);
80456 +               if (!diff2) {
80457 +                       *diff = 0;
80458 +                       goto out1;
80459 +               }
80461 +               if (diff2 < 0) {
80462 +                       max_idx = mid_idx - 1;
80463 +                       fnd = mid_idx;
80464 +                       if (!fnd)
80465 +                               break;
80466 +               } else {
80467 +                       min_idx = mid_idx + 1;
80468 +               }
80469 +       }
80471 +       if (fnd == -1) {
80472 +               e = NULL;
80473 +               goto out1;
80474 +       }
80476 +       *diff = -1;
80477 +       e = Add2Ptr(hdr, offs[fnd]);
80479 +out1:
80480 +       ntfs_free(offs);
80482 +       return e;
80483 +#endif
80485 +next:
80486 +       /*
80487 +        * Entries index are sorted
80488 +        * Enumerate all entries until we find entry that is <= to the search value
80489 +        */
80490 +       if (off + sizeof(struct NTFS_DE) > end)
80491 +               return NULL;
80493 +       e = Add2Ptr(hdr, off);
80494 +       e_size = le16_to_cpu(e->size);
80496 +       if (e_size < sizeof(struct NTFS_DE) || off + e_size > end)
80497 +               return NULL;
80499 +       off += e_size;
80501 +       e_key_len = le16_to_cpu(e->key_size);
80503 +       *diff = (*cmp)(key, key_len, e + 1, e_key_len, ctx);
80504 +       if (!*diff)
80505 +               return e;
80507 +       if (*diff <= 0)
80508 +               return e;
80510 +       if (de_is_last(e)) {
80511 +               *diff = 1;
80512 +               return e;
80513 +       }
80514 +       goto next;
80518 + * hdr_insert_de
80519 + *
80520 + * inserts an index entry into the buffer.
80521 + * 'before' should be a pointer previously returned from hdr_find_e
80522 + */
80523 +static struct NTFS_DE *hdr_insert_de(const struct ntfs_index *indx,
80524 +                                    struct INDEX_HDR *hdr,
80525 +                                    const struct NTFS_DE *de,
80526 +                                    struct NTFS_DE *before, const void *ctx)
80528 +       int diff;
80529 +       size_t off = PtrOffset(hdr, before);
80530 +       u32 used = le32_to_cpu(hdr->used);
80531 +       u32 total = le32_to_cpu(hdr->total);
80532 +       u16 de_size = le16_to_cpu(de->size);
80534 +       /* First, check to see if there's enough room */
80535 +       if (used + de_size > total)
80536 +               return NULL;
80538 +       /* We know there's enough space, so we know we'll succeed. */
80539 +       if (before) {
80540 +               /* Check that before is inside Index */
80541 +               if (off >= used || off < le32_to_cpu(hdr->de_off) ||
80542 +                   off + le16_to_cpu(before->size) > total) {
80543 +                       return NULL;
80544 +               }
80545 +               goto ok;
80546 +       }
80547 +       /* No insert point is applied. Get it manually */
80548 +       before = hdr_find_e(indx, hdr, de + 1, le16_to_cpu(de->key_size), ctx,
80549 +                           &diff);
80550 +       if (!before)
80551 +               return NULL;
80552 +       off = PtrOffset(hdr, before);
80554 +ok:
80555 +       /* Now we just make room for the entry and jam it in. */
80556 +       memmove(Add2Ptr(before, de_size), before, used - off);
80558 +       hdr->used = cpu_to_le32(used + de_size);
80559 +       memcpy(before, de, de_size);
80561 +       return before;
80565 + * hdr_delete_de
80566 + *
80567 + * removes an entry from the index buffer
80568 + */
80569 +static inline struct NTFS_DE *hdr_delete_de(struct INDEX_HDR *hdr,
80570 +                                           struct NTFS_DE *re)
80572 +       u32 used = le32_to_cpu(hdr->used);
80573 +       u16 esize = le16_to_cpu(re->size);
80574 +       u32 off = PtrOffset(hdr, re);
80575 +       int bytes = used - (off + esize);
80577 +       if (off >= used || esize < sizeof(struct NTFS_DE) ||
80578 +           bytes < sizeof(struct NTFS_DE))
80579 +               return NULL;
80581 +       hdr->used = cpu_to_le32(used - esize);
80582 +       memmove(re, Add2Ptr(re, esize), bytes);
80584 +       return re;
80587 +void indx_clear(struct ntfs_index *indx)
80589 +       run_close(&indx->alloc_run);
80590 +       run_close(&indx->bitmap_run);
80593 +int indx_init(struct ntfs_index *indx, struct ntfs_sb_info *sbi,
80594 +             const struct ATTRIB *attr, enum index_mutex_classed type)
80596 +       u32 t32;
80597 +       const struct INDEX_ROOT *root = resident_data(attr);
80599 +       /* Check root fields */
80600 +       if (!root->index_block_clst)
80601 +               return -EINVAL;
80603 +       indx->type = type;
80604 +       indx->idx2vbn_bits = __ffs(root->index_block_clst);
80606 +       t32 = le32_to_cpu(root->index_block_size);
80607 +       indx->index_bits = blksize_bits(t32);
80609 +       /* Check index record size */
80610 +       if (t32 < sbi->cluster_size) {
80611 +               /* index record is smaller than a cluster, use 512 blocks */
80612 +               if (t32 != root->index_block_clst * SECTOR_SIZE)
80613 +                       return -EINVAL;
80615 +               /* Check alignment to a cluster */
80616 +               if ((sbi->cluster_size >> SECTOR_SHIFT) &
80617 +                   (root->index_block_clst - 1)) {
80618 +                       return -EINVAL;
80619 +               }
80621 +               indx->vbn2vbo_bits = SECTOR_SHIFT;
80622 +       } else {
80623 +               /* index record must be a multiple of cluster size */
80624 +               if (t32 != root->index_block_clst << sbi->cluster_bits)
80625 +                       return -EINVAL;
80627 +               indx->vbn2vbo_bits = sbi->cluster_bits;
80628 +       }
80630 +       init_rwsem(&indx->run_lock);
80632 +       indx->cmp = get_cmp_func(root);
80633 +       return indx->cmp ? 0 : -EINVAL;
80636 +static struct indx_node *indx_new(struct ntfs_index *indx,
80637 +                                 struct ntfs_inode *ni, CLST vbn,
80638 +                                 const __le64 *sub_vbn)
80640 +       int err;
80641 +       struct NTFS_DE *e;
80642 +       struct indx_node *r;
80643 +       struct INDEX_HDR *hdr;
80644 +       struct INDEX_BUFFER *index;
80645 +       u64 vbo = (u64)vbn << indx->vbn2vbo_bits;
80646 +       u32 bytes = 1u << indx->index_bits;
80647 +       u16 fn;
80648 +       u32 eo;
80650 +       r = ntfs_zalloc(sizeof(struct indx_node));
80651 +       if (!r)
80652 +               return ERR_PTR(-ENOMEM);
80654 +       index = ntfs_zalloc(bytes);
80655 +       if (!index) {
80656 +               ntfs_free(r);
80657 +               return ERR_PTR(-ENOMEM);
80658 +       }
80660 +       err = ntfs_get_bh(ni->mi.sbi, &indx->alloc_run, vbo, bytes, &r->nb);
80662 +       if (err) {
80663 +               ntfs_free(index);
80664 +               ntfs_free(r);
80665 +               return ERR_PTR(err);
80666 +       }
80668 +       /* Create header */
80669 +       index->rhdr.sign = NTFS_INDX_SIGNATURE;
80670 +       index->rhdr.fix_off = cpu_to_le16(sizeof(struct INDEX_BUFFER)); // 0x28
80671 +       fn = (bytes >> SECTOR_SHIFT) + 1; // 9
80672 +       index->rhdr.fix_num = cpu_to_le16(fn);
80673 +       index->vbn = cpu_to_le64(vbn);
80674 +       hdr = &index->ihdr;
80675 +       eo = QuadAlign(sizeof(struct INDEX_BUFFER) + fn * sizeof(short));
80676 +       hdr->de_off = cpu_to_le32(eo);
80678 +       e = Add2Ptr(hdr, eo);
80680 +       if (sub_vbn) {
80681 +               e->flags = NTFS_IE_LAST | NTFS_IE_HAS_SUBNODES;
80682 +               e->size = cpu_to_le16(sizeof(struct NTFS_DE) + sizeof(u64));
80683 +               hdr->used =
80684 +                       cpu_to_le32(eo + sizeof(struct NTFS_DE) + sizeof(u64));
80685 +               de_set_vbn_le(e, *sub_vbn);
80686 +               hdr->flags = 1;
80687 +       } else {
80688 +               e->size = cpu_to_le16(sizeof(struct NTFS_DE));
80689 +               hdr->used = cpu_to_le32(eo + sizeof(struct NTFS_DE));
80690 +               e->flags = NTFS_IE_LAST;
80691 +       }
80693 +       hdr->total = cpu_to_le32(bytes - offsetof(struct INDEX_BUFFER, ihdr));
80695 +       r->index = index;
80696 +       return r;
80699 +struct INDEX_ROOT *indx_get_root(struct ntfs_index *indx, struct ntfs_inode *ni,
80700 +                                struct ATTRIB **attr, struct mft_inode **mi)
80702 +       struct ATTR_LIST_ENTRY *le = NULL;
80703 +       struct ATTRIB *a;
80704 +       const struct INDEX_NAMES *in = &s_index_names[indx->type];
80706 +       a = ni_find_attr(ni, NULL, &le, ATTR_ROOT, in->name, in->name_len, NULL,
80707 +                        mi);
80708 +       if (!a)
80709 +               return NULL;
80711 +       if (attr)
80712 +               *attr = a;
80714 +       return resident_data_ex(a, sizeof(struct INDEX_ROOT));
80717 +static int indx_write(struct ntfs_index *indx, struct ntfs_inode *ni,
80718 +                     struct indx_node *node, int sync)
80720 +       struct INDEX_BUFFER *ib = node->index;
80722 +       return ntfs_write_bh(ni->mi.sbi, &ib->rhdr, &node->nb, sync);
80726 + * if ntfs_readdir calls this function
80727 + * inode is shared locked and no ni_lock
80728 + * use rw_semaphore for read/write access to alloc_run
80729 + */
80730 +int indx_read(struct ntfs_index *indx, struct ntfs_inode *ni, CLST vbn,
80731 +             struct indx_node **node)
80733 +       int err;
80734 +       struct INDEX_BUFFER *ib;
80735 +       struct runs_tree *run = &indx->alloc_run;
80736 +       struct rw_semaphore *lock = &indx->run_lock;
80737 +       u64 vbo = (u64)vbn << indx->vbn2vbo_bits;
80738 +       u32 bytes = 1u << indx->index_bits;
80739 +       struct indx_node *in = *node;
80740 +       const struct INDEX_NAMES *name;
80742 +       if (!in) {
80743 +               in = ntfs_zalloc(sizeof(struct indx_node));
80744 +               if (!in)
80745 +                       return -ENOMEM;
80746 +       } else {
80747 +               nb_put(&in->nb);
80748 +       }
80750 +       ib = in->index;
80751 +       if (!ib) {
80752 +               ib = ntfs_malloc(bytes);
80753 +               if (!ib) {
80754 +                       err = -ENOMEM;
80755 +                       goto out;
80756 +               }
80757 +       }
80759 +       down_read(lock);
80760 +       err = ntfs_read_bh(ni->mi.sbi, run, vbo, &ib->rhdr, bytes, &in->nb);
80761 +       up_read(lock);
80762 +       if (!err)
80763 +               goto ok;
80765 +       if (err == -E_NTFS_FIXUP)
80766 +               goto ok;
80768 +       if (err != -ENOENT)
80769 +               goto out;
80771 +       name = &s_index_names[indx->type];
80772 +       down_write(lock);
80773 +       err = attr_load_runs_range(ni, ATTR_ALLOC, name->name, name->name_len,
80774 +                                  run, vbo, vbo + bytes);
80775 +       up_write(lock);
80776 +       if (err)
80777 +               goto out;
80779 +       down_read(lock);
80780 +       err = ntfs_read_bh(ni->mi.sbi, run, vbo, &ib->rhdr, bytes, &in->nb);
80781 +       up_read(lock);
80782 +       if (err == -E_NTFS_FIXUP)
80783 +               goto ok;
80785 +       if (err)
80786 +               goto out;
80788 +ok:
80789 +       if (err == -E_NTFS_FIXUP) {
80790 +               ntfs_write_bh(ni->mi.sbi, &ib->rhdr, &in->nb, 0);
80791 +               err = 0;
80792 +       }
80794 +       in->index = ib;
80795 +       *node = in;
80797 +out:
80798 +       if (ib != in->index)
80799 +               ntfs_free(ib);
80801 +       if (*node != in) {
80802 +               nb_put(&in->nb);
80803 +               ntfs_free(in);
80804 +       }
80806 +       return err;
80810 + * indx_find
80811 + *
80812 + * scans NTFS directory for given entry
80813 + */
80814 +int indx_find(struct ntfs_index *indx, struct ntfs_inode *ni,
80815 +             const struct INDEX_ROOT *root, const void *key, size_t key_len,
80816 +             const void *ctx, int *diff, struct NTFS_DE **entry,
80817 +             struct ntfs_fnd *fnd)
80819 +       int err;
80820 +       struct NTFS_DE *e;
80821 +       const struct INDEX_HDR *hdr;
80822 +       struct indx_node *node;
80824 +       if (!root)
80825 +               root = indx_get_root(&ni->dir, ni, NULL, NULL);
80827 +       if (!root) {
80828 +               err = -EINVAL;
80829 +               goto out;
80830 +       }
80832 +       hdr = &root->ihdr;
80834 +       /* Check cache */
80835 +       e = fnd->level ? fnd->de[fnd->level - 1] : fnd->root_de;
80836 +       if (e && !de_is_last(e) &&
80837 +           !(*indx->cmp)(key, key_len, e + 1, le16_to_cpu(e->key_size), ctx)) {
80838 +               *entry = e;
80839 +               *diff = 0;
80840 +               return 0;
80841 +       }
80843 +       /* Soft finder reset */
80844 +       fnd_clear(fnd);
80846 +       /* Lookup entry that is <= to the search value */
80847 +       e = hdr_find_e(indx, hdr, key, key_len, ctx, diff);
80848 +       if (!e)
80849 +               return -EINVAL;
80851 +       if (fnd)
80852 +               fnd->root_de = e;
80854 +       err = 0;
80856 +       for (;;) {
80857 +               node = NULL;
80858 +               if (*diff >= 0 || !de_has_vcn_ex(e)) {
80859 +                       *entry = e;
80860 +                       goto out;
80861 +               }
80863 +               /* Read next level. */
80864 +               err = indx_read(indx, ni, de_get_vbn(e), &node);
80865 +               if (err)
80866 +                       goto out;
80868 +               /* Lookup entry that is <= to the search value */
80869 +               e = hdr_find_e(indx, &node->index->ihdr, key, key_len, ctx,
80870 +                              diff);
80871 +               if (!e) {
80872 +                       err = -EINVAL;
80873 +                       put_indx_node(node);
80874 +                       goto out;
80875 +               }
80877 +               fnd_push(fnd, node, e);
80878 +       }
80880 +out:
80881 +       return err;
80884 +int indx_find_sort(struct ntfs_index *indx, struct ntfs_inode *ni,
80885 +                  const struct INDEX_ROOT *root, struct NTFS_DE **entry,
80886 +                  struct ntfs_fnd *fnd)
80888 +       int err;
80889 +       struct indx_node *n = NULL;
80890 +       struct NTFS_DE *e;
80891 +       size_t iter = 0;
80892 +       int level = fnd->level;
80894 +       if (!*entry) {
80895 +               /* Start find */
80896 +               e = hdr_first_de(&root->ihdr);
80897 +               if (!e)
80898 +                       return 0;
80899 +               fnd_clear(fnd);
80900 +               fnd->root_de = e;
80901 +       } else if (!level) {
80902 +               if (de_is_last(fnd->root_de)) {
80903 +                       *entry = NULL;
80904 +                       return 0;
80905 +               }
80907 +               e = hdr_next_de(&root->ihdr, fnd->root_de);
80908 +               if (!e)
80909 +                       return -EINVAL;
80910 +               fnd->root_de = e;
80911 +       } else {
80912 +               n = fnd->nodes[level - 1];
80913 +               e = fnd->de[level - 1];
80915 +               if (de_is_last(e))
80916 +                       goto pop_level;
80918 +               e = hdr_next_de(&n->index->ihdr, e);
80919 +               if (!e)
80920 +                       return -EINVAL;
80922 +               fnd->de[level - 1] = e;
80923 +       }
80925 +       /* Just to avoid tree cycle */
80926 +next_iter:
80927 +       if (iter++ >= 1000)
80928 +               return -EINVAL;
80930 +       while (de_has_vcn_ex(e)) {
80931 +               if (le16_to_cpu(e->size) <
80932 +                   sizeof(struct NTFS_DE) + sizeof(u64)) {
80933 +                       if (n) {
80934 +                               fnd_pop(fnd);
80935 +                               ntfs_free(n);
80936 +                       }
80937 +                       return -EINVAL;
80938 +               }
80940 +               /* Read next level */
80941 +               err = indx_read(indx, ni, de_get_vbn(e), &n);
80942 +               if (err)
80943 +                       return err;
80945 +               /* Try next level */
80946 +               e = hdr_first_de(&n->index->ihdr);
80947 +               if (!e) {
80948 +                       ntfs_free(n);
80949 +                       return -EINVAL;
80950 +               }
80952 +               fnd_push(fnd, n, e);
80953 +       }
80955 +       if (le16_to_cpu(e->size) > sizeof(struct NTFS_DE)) {
80956 +               *entry = e;
80957 +               return 0;
80958 +       }
80960 +pop_level:
80961 +       for (;;) {
80962 +               if (!de_is_last(e))
80963 +                       goto next_iter;
80965 +               /* Pop one level */
80966 +               if (n) {
80967 +                       fnd_pop(fnd);
80968 +                       ntfs_free(n);
80969 +               }
80971 +               level = fnd->level;
80973 +               if (level) {
80974 +                       n = fnd->nodes[level - 1];
80975 +                       e = fnd->de[level - 1];
80976 +               } else if (fnd->root_de) {
80977 +                       n = NULL;
80978 +                       e = fnd->root_de;
80979 +                       fnd->root_de = NULL;
80980 +               } else {
80981 +                       *entry = NULL;
80982 +                       return 0;
80983 +               }
80985 +               if (le16_to_cpu(e->size) > sizeof(struct NTFS_DE)) {
80986 +                       *entry = e;
80987 +                       if (!fnd->root_de)
80988 +                               fnd->root_de = e;
80989 +                       return 0;
80990 +               }
80991 +       }
80994 +int indx_find_raw(struct ntfs_index *indx, struct ntfs_inode *ni,
80995 +                 const struct INDEX_ROOT *root, struct NTFS_DE **entry,
80996 +                 size_t *off, struct ntfs_fnd *fnd)
80998 +       int err;
80999 +       struct indx_node *n = NULL;
81000 +       struct NTFS_DE *e = NULL;
81001 +       struct NTFS_DE *e2;
81002 +       size_t bit;
81003 +       CLST next_used_vbn;
81004 +       CLST next_vbn;
81005 +       u32 record_size = ni->mi.sbi->record_size;
81007 +       /* Use non sorted algorithm */
81008 +       if (!*entry) {
81009 +               /* This is the first call */
81010 +               e = hdr_first_de(&root->ihdr);
81011 +               if (!e)
81012 +                       return 0;
81013 +               fnd_clear(fnd);
81014 +               fnd->root_de = e;
81016 +               /* The first call with setup of initial element */
81017 +               if (*off >= record_size) {
81018 +                       next_vbn = (((*off - record_size) >> indx->index_bits))
81019 +                                  << indx->idx2vbn_bits;
81020 +                       /* jump inside cycle 'for'*/
81021 +                       goto next;
81022 +               }
81024 +               /* Start enumeration from root */
81025 +               *off = 0;
81026 +       } else if (!fnd->root_de)
81027 +               return -EINVAL;
81029 +       for (;;) {
81030 +               /* Check if current entry can be used */
81031 +               if (e && le16_to_cpu(e->size) > sizeof(struct NTFS_DE))
81032 +                       goto ok;
81034 +               if (!fnd->level) {
81035 +                       /* Continue to enumerate root */
81036 +                       if (!de_is_last(fnd->root_de)) {
81037 +                               e = hdr_next_de(&root->ihdr, fnd->root_de);
81038 +                               if (!e)
81039 +                                       return -EINVAL;
81040 +                               fnd->root_de = e;
81041 +                               continue;
81042 +                       }
81044 +                       /* Start to enumerate indexes from 0 */
81045 +                       next_vbn = 0;
81046 +               } else {
81047 +                       /* Continue to enumerate indexes */
81048 +                       e2 = fnd->de[fnd->level - 1];
81050 +                       n = fnd->nodes[fnd->level - 1];
81052 +                       if (!de_is_last(e2)) {
81053 +                               e = hdr_next_de(&n->index->ihdr, e2);
81054 +                               if (!e)
81055 +                                       return -EINVAL;
81056 +                               fnd->de[fnd->level - 1] = e;
81057 +                               continue;
81058 +                       }
81060 +                       /* Continue with next index */
81061 +                       next_vbn = le64_to_cpu(n->index->vbn) +
81062 +                                  root->index_block_clst;
81063 +               }
81065 +next:
81066 +               /* Release current index */
81067 +               if (n) {
81068 +                       fnd_pop(fnd);
81069 +                       put_indx_node(n);
81070 +                       n = NULL;
81071 +               }
81073 +               /* Skip all free indexes */
81074 +               bit = next_vbn >> indx->idx2vbn_bits;
81075 +               err = indx_used_bit(indx, ni, &bit);
81076 +               if (err == -ENOENT || bit == MINUS_ONE_T) {
81077 +                       /* No used indexes */
81078 +                       *entry = NULL;
81079 +                       return 0;
81080 +               }
81082 +               next_used_vbn = bit << indx->idx2vbn_bits;
81084 +               /* Read buffer into memory */
81085 +               err = indx_read(indx, ni, next_used_vbn, &n);
81086 +               if (err)
81087 +                       return err;
81089 +               e = hdr_first_de(&n->index->ihdr);
81090 +               fnd_push(fnd, n, e);
81091 +               if (!e)
81092 +                       return -EINVAL;
81093 +       }
81095 +ok:
81096 +       /* return offset to restore enumerator if necessary */
81097 +       if (!n) {
81098 +               /* 'e' points in root */
81099 +               *off = PtrOffset(&root->ihdr, e);
81100 +       } else {
81101 +               /* 'e' points in index */
81102 +               *off = (le64_to_cpu(n->index->vbn) << indx->vbn2vbo_bits) +
81103 +                      record_size + PtrOffset(&n->index->ihdr, e);
81104 +       }
81106 +       *entry = e;
81107 +       return 0;
81111 + * indx_create_allocate
81112 + *
81113 + * create "Allocation + Bitmap" attributes
81114 + */
81115 +static int indx_create_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
81116 +                               CLST *vbn)
81118 +       int err = -ENOMEM;
81119 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
81120 +       struct ATTRIB *bitmap;
81121 +       struct ATTRIB *alloc;
81122 +       u32 data_size = 1u << indx->index_bits;
81123 +       u32 alloc_size = ntfs_up_cluster(sbi, data_size);
81124 +       CLST len = alloc_size >> sbi->cluster_bits;
81125 +       const struct INDEX_NAMES *in = &s_index_names[indx->type];
81126 +       CLST alen;
81127 +       struct runs_tree run;
81129 +       run_init(&run);
81131 +       err = attr_allocate_clusters(sbi, &run, 0, 0, len, NULL, 0, &alen, 0,
81132 +                                    NULL);
81133 +       if (err)
81134 +               goto out;
81136 +       err = ni_insert_nonresident(ni, ATTR_ALLOC, in->name, in->name_len,
81137 +                                   &run, 0, len, 0, &alloc, NULL);
81138 +       if (err)
81139 +               goto out1;
81141 +       alloc->nres.valid_size = alloc->nres.data_size = cpu_to_le64(data_size);
81143 +       err = ni_insert_resident(ni, bitmap_size(1), ATTR_BITMAP, in->name,
81144 +                                in->name_len, &bitmap, NULL);
81145 +       if (err)
81146 +               goto out2;
81148 +       if (in->name == I30_NAME) {
81149 +               ni->vfs_inode.i_size = data_size;
81150 +               inode_set_bytes(&ni->vfs_inode, alloc_size);
81151 +       }
81153 +       memcpy(&indx->alloc_run, &run, sizeof(run));
81155 +       *vbn = 0;
81157 +       return 0;
81159 +out2:
81160 +       mi_remove_attr(&ni->mi, alloc);
81162 +out1:
81163 +       run_deallocate(sbi, &run, false);
81165 +out:
81166 +       return err;
81170 + * indx_add_allocate
81171 + *
81172 + * add clusters to index
81173 + */
81174 +static int indx_add_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
81175 +                            CLST *vbn)
81177 +       int err;
81178 +       size_t bit;
81179 +       u64 data_size;
81180 +       u64 bmp_size, bmp_size_v;
81181 +       struct ATTRIB *bmp, *alloc;
81182 +       struct mft_inode *mi;
81183 +       const struct INDEX_NAMES *in = &s_index_names[indx->type];
81185 +       err = indx_find_free(indx, ni, &bit, &bmp);
81186 +       if (err)
81187 +               goto out1;
81189 +       if (bit != MINUS_ONE_T) {
81190 +               bmp = NULL;
81191 +       } else {
81192 +               if (bmp->non_res) {
81193 +                       bmp_size = le64_to_cpu(bmp->nres.data_size);
81194 +                       bmp_size_v = le64_to_cpu(bmp->nres.valid_size);
81195 +               } else {
81196 +                       bmp_size = bmp_size_v = le32_to_cpu(bmp->res.data_size);
81197 +               }
81199 +               bit = bmp_size << 3;
81200 +       }
81202 +       data_size = (u64)(bit + 1) << indx->index_bits;
81204 +       if (bmp) {
81205 +               /* Increase bitmap */
81206 +               err = attr_set_size(ni, ATTR_BITMAP, in->name, in->name_len,
81207 +                                   &indx->bitmap_run, bitmap_size(bit + 1),
81208 +                                   NULL, true, NULL);
81209 +               if (err)
81210 +                       goto out1;
81211 +       }
81213 +       alloc = ni_find_attr(ni, NULL, NULL, ATTR_ALLOC, in->name, in->name_len,
81214 +                            NULL, &mi);
81215 +       if (!alloc) {
81216 +               if (bmp)
81217 +                       goto out2;
81218 +               goto out1;
81219 +       }
81221 +       /* Increase allocation */
81222 +       err = attr_set_size(ni, ATTR_ALLOC, in->name, in->name_len,
81223 +                           &indx->alloc_run, data_size, &data_size, true,
81224 +                           NULL);
81225 +       if (err) {
81226 +               if (bmp)
81227 +                       goto out2;
81228 +               goto out1;
81229 +       }
81231 +       *vbn = bit << indx->idx2vbn_bits;
81233 +       return 0;
81235 +out2:
81236 +       /* Ops (no space?) */
81237 +       attr_set_size(ni, ATTR_BITMAP, in->name, in->name_len,
81238 +                     &indx->bitmap_run, bmp_size, &bmp_size_v, false, NULL);
81240 +out1:
81241 +       return err;
81245 + * indx_insert_into_root
81246 + *
81247 + * attempts to insert an entry into the index root
81248 + * If necessary, it will twiddle the index b-tree.
81249 + */
81250 +static int indx_insert_into_root(struct ntfs_index *indx, struct ntfs_inode *ni,
81251 +                                const struct NTFS_DE *new_de,
81252 +                                struct NTFS_DE *root_de, const void *ctx,
81253 +                                struct ntfs_fnd *fnd)
81255 +       int err = 0;
81256 +       struct NTFS_DE *e, *e0, *re;
81257 +       struct mft_inode *mi;
81258 +       struct ATTRIB *attr;
81259 +       struct MFT_REC *rec;
81260 +       struct INDEX_HDR *hdr;
81261 +       struct indx_node *n;
81262 +       CLST new_vbn;
81263 +       __le64 *sub_vbn, t_vbn;
81264 +       u16 new_de_size;
81265 +       u32 hdr_used, hdr_total, asize, used, to_move;
81266 +       u32 root_size, new_root_size;
81267 +       struct ntfs_sb_info *sbi;
81268 +       int ds_root;
81269 +       struct INDEX_ROOT *root, *a_root = NULL;
81271 +       /* Get the record this root placed in */
81272 +       root = indx_get_root(indx, ni, &attr, &mi);
81273 +       if (!root)
81274 +               goto out;
81276 +       /*
81277 +        * Try easy case:
81278 +        * hdr_insert_de will succeed if there's room the root for the new entry.
81279 +        */
81280 +       hdr = &root->ihdr;
81281 +       sbi = ni->mi.sbi;
81282 +       rec = mi->mrec;
81283 +       used = le32_to_cpu(rec->used);
81284 +       new_de_size = le16_to_cpu(new_de->size);
81285 +       hdr_used = le32_to_cpu(hdr->used);
81286 +       hdr_total = le32_to_cpu(hdr->total);
81287 +       asize = le32_to_cpu(attr->size);
81288 +       root_size = le32_to_cpu(attr->res.data_size);
81290 +       ds_root = new_de_size + hdr_used - hdr_total;
81292 +       if (used + ds_root < sbi->max_bytes_per_attr) {
81293 +               /* make a room for new elements */
81294 +               mi_resize_attr(mi, attr, ds_root);
81295 +               hdr->total = cpu_to_le32(hdr_total + ds_root);
81296 +               e = hdr_insert_de(indx, hdr, new_de, root_de, ctx);
81297 +               WARN_ON(!e);
81298 +               fnd_clear(fnd);
81299 +               fnd->root_de = e;
81301 +               return 0;
81302 +       }
81304 +       /* Make a copy of root attribute to restore if error */
81305 +       a_root = ntfs_memdup(attr, asize);
81306 +       if (!a_root) {
81307 +               err = -ENOMEM;
81308 +               goto out;
81309 +       }
81311 +       /* copy all the non-end entries from the index root to the new buffer.*/
81312 +       to_move = 0;
81313 +       e0 = hdr_first_de(hdr);
81315 +       /* Calculate the size to copy */
81316 +       for (e = e0;; e = hdr_next_de(hdr, e)) {
81317 +               if (!e) {
81318 +                       err = -EINVAL;
81319 +                       goto out;
81320 +               }
81322 +               if (de_is_last(e))
81323 +                       break;
81324 +               to_move += le16_to_cpu(e->size);
81325 +       }
81327 +       n = NULL;
81328 +       if (!to_move) {
81329 +               re = NULL;
81330 +       } else {
81331 +               re = ntfs_memdup(e0, to_move);
81332 +               if (!re) {
81333 +                       err = -ENOMEM;
81334 +                       goto out;
81335 +               }
81336 +       }
81338 +       sub_vbn = NULL;
81339 +       if (de_has_vcn(e)) {
81340 +               t_vbn = de_get_vbn_le(e);
81341 +               sub_vbn = &t_vbn;
81342 +       }
81344 +       new_root_size = sizeof(struct INDEX_ROOT) + sizeof(struct NTFS_DE) +
81345 +                       sizeof(u64);
81346 +       ds_root = new_root_size - root_size;
81348 +       if (ds_root > 0 && used + ds_root > sbi->max_bytes_per_attr) {
81349 +               /* make root external */
81350 +               err = -EOPNOTSUPP;
81351 +               goto out;
81352 +       }
81354 +       if (ds_root)
81355 +               mi_resize_attr(mi, attr, ds_root);
81357 +       /* Fill first entry (vcn will be set later) */
81358 +       e = (struct NTFS_DE *)(root + 1);
81359 +       memset(e, 0, sizeof(struct NTFS_DE));
81360 +       e->size = cpu_to_le16(sizeof(struct NTFS_DE) + sizeof(u64));
81361 +       e->flags = NTFS_IE_HAS_SUBNODES | NTFS_IE_LAST;
81363 +       hdr->flags = 1;
81364 +       hdr->used = hdr->total =
81365 +               cpu_to_le32(new_root_size - offsetof(struct INDEX_ROOT, ihdr));
81367 +       fnd->root_de = hdr_first_de(hdr);
81368 +       mi->dirty = true;
81370 +       /* Create alloc and bitmap attributes (if not) */
81371 +       err = run_is_empty(&indx->alloc_run)
81372 +                     ? indx_create_allocate(indx, ni, &new_vbn)
81373 +                     : indx_add_allocate(indx, ni, &new_vbn);
81375 +       /* layout of record may be changed, so rescan root */
81376 +       root = indx_get_root(indx, ni, &attr, &mi);
81377 +       if (!root) {
81378 +               /* bug? */
81379 +               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
81380 +               err = -EINVAL;
81381 +               goto out1;
81382 +       }
81384 +       if (err) {
81385 +               /* restore root */
81386 +               if (mi_resize_attr(mi, attr, -ds_root))
81387 +                       memcpy(attr, a_root, asize);
81388 +               else {
81389 +                       /* bug? */
81390 +                       ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
81391 +               }
81392 +               goto out1;
81393 +       }
81395 +       e = (struct NTFS_DE *)(root + 1);
81396 +       *(__le64 *)(e + 1) = cpu_to_le64(new_vbn);
81397 +       mi->dirty = true;
81399 +       /* now we can create/format the new buffer and copy the entries into */
81400 +       n = indx_new(indx, ni, new_vbn, sub_vbn);
81401 +       if (IS_ERR(n)) {
81402 +               err = PTR_ERR(n);
81403 +               goto out1;
81404 +       }
81406 +       hdr = &n->index->ihdr;
81407 +       hdr_used = le32_to_cpu(hdr->used);
81408 +       hdr_total = le32_to_cpu(hdr->total);
81410 +       /* Copy root entries into new buffer */
81411 +       hdr_insert_head(hdr, re, to_move);
81413 +       /* Update bitmap attribute */
81414 +       indx_mark_used(indx, ni, new_vbn >> indx->idx2vbn_bits);
81416 +       /* Check if we can insert new entry new index buffer */
81417 +       if (hdr_used + new_de_size > hdr_total) {
81418 +               /*
81419 +                * This occurs if mft record is the same or bigger than index
81420 +                * buffer. Move all root new index and have no space to add
81421 +                * new entry classic case when mft record is 1K and index
81422 +                * buffer 4K the problem should not occurs
81423 +                */
81424 +               ntfs_free(re);
81425 +               indx_write(indx, ni, n, 0);
81427 +               put_indx_node(n);
81428 +               fnd_clear(fnd);
81429 +               err = indx_insert_entry(indx, ni, new_de, ctx, fnd);
81430 +               goto out;
81431 +       }
81433 +       /*
81434 +        * Now root is a parent for new index buffer
81435 +        * Insert NewEntry a new buffer
81436 +        */
81437 +       e = hdr_insert_de(indx, hdr, new_de, NULL, ctx);
81438 +       if (!e) {
81439 +               err = -EINVAL;
81440 +               goto out1;
81441 +       }
81442 +       fnd_push(fnd, n, e);
81444 +       /* Just write updates index into disk */
81445 +       indx_write(indx, ni, n, 0);
81447 +       n = NULL;
81449 +out1:
81450 +       ntfs_free(re);
81451 +       if (n)
81452 +               put_indx_node(n);
81454 +out:
81455 +       ntfs_free(a_root);
81456 +       return err;
81460 + * indx_insert_into_buffer
81461 + *
81462 + * attempts to insert an entry into an Index Allocation Buffer.
81463 + * If necessary, it will split the buffer.
81464 + */
81465 +static int
81466 +indx_insert_into_buffer(struct ntfs_index *indx, struct ntfs_inode *ni,
81467 +                       struct INDEX_ROOT *root, const struct NTFS_DE *new_de,
81468 +                       const void *ctx, int level, struct ntfs_fnd *fnd)
81470 +       int err;
81471 +       const struct NTFS_DE *sp;
81472 +       struct NTFS_DE *e, *de_t, *up_e = NULL;
81473 +       struct indx_node *n2 = NULL;
81474 +       struct indx_node *n1 = fnd->nodes[level];
81475 +       struct INDEX_HDR *hdr1 = &n1->index->ihdr;
81476 +       struct INDEX_HDR *hdr2;
81477 +       u32 to_copy, used;
81478 +       CLST new_vbn;
81479 +       __le64 t_vbn, *sub_vbn;
81480 +       u16 sp_size;
81482 +       /* Try the most easy case */
81483 +       e = fnd->level - 1 == level ? fnd->de[level] : NULL;
81484 +       e = hdr_insert_de(indx, hdr1, new_de, e, ctx);
81485 +       fnd->de[level] = e;
81486 +       if (e) {
81487 +               /* Just write updated index into disk */
81488 +               indx_write(indx, ni, n1, 0);
81489 +               return 0;
81490 +       }
81492 +       /*
81493 +        * No space to insert into buffer. Split it.
81494 +        * To split we:
81495 +        *  - Save split point ('cause index buffers will be changed)
81496 +        * - Allocate NewBuffer and copy all entries <= sp into new buffer
81497 +        * - Remove all entries (sp including) from TargetBuffer
81498 +        * - Insert NewEntry into left or right buffer (depending on sp <=>
81499 +        *     NewEntry)
81500 +        * - Insert sp into parent buffer (or root)
81501 +        * - Make sp a parent for new buffer
81502 +        */
81503 +       sp = hdr_find_split(hdr1);
81504 +       if (!sp)
81505 +               return -EINVAL;
81507 +       sp_size = le16_to_cpu(sp->size);
81508 +       up_e = ntfs_malloc(sp_size + sizeof(u64));
81509 +       if (!up_e)
81510 +               return -ENOMEM;
81511 +       memcpy(up_e, sp, sp_size);
81513 +       if (!hdr1->flags) {
81514 +               up_e->flags |= NTFS_IE_HAS_SUBNODES;
81515 +               up_e->size = cpu_to_le16(sp_size + sizeof(u64));
81516 +               sub_vbn = NULL;
81517 +       } else {
81518 +               t_vbn = de_get_vbn_le(up_e);
81519 +               sub_vbn = &t_vbn;
81520 +       }
81522 +       /* Allocate on disk a new index allocation buffer. */
81523 +       err = indx_add_allocate(indx, ni, &new_vbn);
81524 +       if (err)
81525 +               goto out;
81527 +       /* Allocate and format memory a new index buffer */
81528 +       n2 = indx_new(indx, ni, new_vbn, sub_vbn);
81529 +       if (IS_ERR(n2)) {
81530 +               err = PTR_ERR(n2);
81531 +               goto out;
81532 +       }
81534 +       hdr2 = &n2->index->ihdr;
81536 +       /* Make sp a parent for new buffer */
81537 +       de_set_vbn(up_e, new_vbn);
81539 +       /* copy all the entries <= sp into the new buffer. */
81540 +       de_t = hdr_first_de(hdr1);
81541 +       to_copy = PtrOffset(de_t, sp);
81542 +       hdr_insert_head(hdr2, de_t, to_copy);
81544 +       /* remove all entries (sp including) from hdr1 */
81545 +       used = le32_to_cpu(hdr1->used) - to_copy - sp_size;
81546 +       memmove(de_t, Add2Ptr(sp, sp_size), used - le32_to_cpu(hdr1->de_off));
81547 +       hdr1->used = cpu_to_le32(used);
81549 +       /* Insert new entry into left or right buffer (depending on sp <=> new_de) */
81550 +       hdr_insert_de(indx,
81551 +                     (*indx->cmp)(new_de + 1, le16_to_cpu(new_de->key_size),
81552 +                                  up_e + 1, le16_to_cpu(up_e->key_size),
81553 +                                  ctx) < 0
81554 +                             ? hdr2
81555 +                             : hdr1,
81556 +                     new_de, NULL, ctx);
81558 +       indx_mark_used(indx, ni, new_vbn >> indx->idx2vbn_bits);
81560 +       indx_write(indx, ni, n1, 0);
81561 +       indx_write(indx, ni, n2, 0);
81563 +       put_indx_node(n2);
81565 +       /*
81566 +        * we've finished splitting everybody, so we are ready to
81567 +        * insert the promoted entry into the parent.
81568 +        */
81569 +       if (!level) {
81570 +               /* Insert in root */
81571 +               err = indx_insert_into_root(indx, ni, up_e, NULL, ctx, fnd);
81572 +               if (err)
81573 +                       goto out;
81574 +       } else {
81575 +               /*
81576 +                * The target buffer's parent is another index buffer
81577 +                * TODO: Remove recursion
81578 +                */
81579 +               err = indx_insert_into_buffer(indx, ni, root, up_e, ctx,
81580 +                                             level - 1, fnd);
81581 +               if (err)
81582 +                       goto out;
81583 +       }
81585 +out:
81586 +       ntfs_free(up_e);
81588 +       return err;
81592 + * indx_insert_entry
81593 + *
81594 + * inserts new entry into index
81595 + */
81596 +int indx_insert_entry(struct ntfs_index *indx, struct ntfs_inode *ni,
81597 +                     const struct NTFS_DE *new_de, const void *ctx,
81598 +                     struct ntfs_fnd *fnd)
81600 +       int err;
81601 +       int diff;
81602 +       struct NTFS_DE *e;
81603 +       struct ntfs_fnd *fnd_a = NULL;
81604 +       struct INDEX_ROOT *root;
81606 +       if (!fnd) {
81607 +               fnd_a = fnd_get();
81608 +               if (!fnd_a) {
81609 +                       err = -ENOMEM;
81610 +                       goto out1;
81611 +               }
81612 +               fnd = fnd_a;
81613 +       }
81615 +       root = indx_get_root(indx, ni, NULL, NULL);
81616 +       if (!root) {
81617 +               err = -EINVAL;
81618 +               goto out;
81619 +       }
81621 +       if (fnd_is_empty(fnd)) {
81622 +               /* Find the spot the tree where we want to insert the new entry. */
81623 +               err = indx_find(indx, ni, root, new_de + 1,
81624 +                               le16_to_cpu(new_de->key_size), ctx, &diff, &e,
81625 +                               fnd);
81626 +               if (err)
81627 +                       goto out;
81629 +               if (!diff) {
81630 +                       err = -EEXIST;
81631 +                       goto out;
81632 +               }
81633 +       }
81635 +       if (!fnd->level) {
81636 +               /* The root is also a leaf, so we'll insert the new entry into it. */
81637 +               err = indx_insert_into_root(indx, ni, new_de, fnd->root_de, ctx,
81638 +                                           fnd);
81639 +               if (err)
81640 +                       goto out;
81641 +       } else {
81642 +               /* found a leaf buffer, so we'll insert the new entry into it.*/
81643 +               err = indx_insert_into_buffer(indx, ni, root, new_de, ctx,
81644 +                                             fnd->level - 1, fnd);
81645 +               if (err)
81646 +                       goto out;
81647 +       }
81649 +out:
81650 +       fnd_put(fnd_a);
81651 +out1:
81652 +       return err;
81656 + * indx_find_buffer
81657 + *
81658 + * locates a buffer the tree.
81659 + */
81660 +static struct indx_node *indx_find_buffer(struct ntfs_index *indx,
81661 +                                         struct ntfs_inode *ni,
81662 +                                         const struct INDEX_ROOT *root,
81663 +                                         __le64 vbn, struct indx_node *n)
81665 +       int err;
81666 +       const struct NTFS_DE *e;
81667 +       struct indx_node *r;
81668 +       const struct INDEX_HDR *hdr = n ? &n->index->ihdr : &root->ihdr;
81670 +       /* Step 1: Scan one level */
81671 +       for (e = hdr_first_de(hdr);; e = hdr_next_de(hdr, e)) {
81672 +               if (!e)
81673 +                       return ERR_PTR(-EINVAL);
81675 +               if (de_has_vcn(e) && vbn == de_get_vbn_le(e))
81676 +                       return n;
81678 +               if (de_is_last(e))
81679 +                       break;
81680 +       }
81682 +       /* Step2: Do recursion */
81683 +       e = Add2Ptr(hdr, le32_to_cpu(hdr->de_off));
81684 +       for (;;) {
81685 +               if (de_has_vcn_ex(e)) {
81686 +                       err = indx_read(indx, ni, de_get_vbn(e), &n);
81687 +                       if (err)
81688 +                               return ERR_PTR(err);
81690 +                       r = indx_find_buffer(indx, ni, root, vbn, n);
81691 +                       if (r)
81692 +                               return r;
81693 +               }
81695 +               if (de_is_last(e))
81696 +                       break;
81698 +               e = Add2Ptr(e, le16_to_cpu(e->size));
81699 +       }
81701 +       return NULL;
81705 + * indx_shrink
81706 + *
81707 + * deallocates unused tail indexes
81708 + */
81709 +static int indx_shrink(struct ntfs_index *indx, struct ntfs_inode *ni,
81710 +                      size_t bit)
81712 +       int err = 0;
81713 +       u64 bpb, new_data;
81714 +       size_t nbits;
81715 +       struct ATTRIB *b;
81716 +       struct ATTR_LIST_ENTRY *le = NULL;
81717 +       const struct INDEX_NAMES *in = &s_index_names[indx->type];
81719 +       b = ni_find_attr(ni, NULL, &le, ATTR_BITMAP, in->name, in->name_len,
81720 +                        NULL, NULL);
81722 +       if (!b)
81723 +               return -ENOENT;
81725 +       if (!b->non_res) {
81726 +               unsigned long pos;
81727 +               const unsigned long *bm = resident_data(b);
81729 +               nbits = le32_to_cpu(b->res.data_size) * 8;
81731 +               if (bit >= nbits)
81732 +                       return 0;
81734 +               pos = find_next_bit(bm, nbits, bit);
81735 +               if (pos < nbits)
81736 +                       return 0;
81737 +       } else {
81738 +               size_t used = MINUS_ONE_T;
81740 +               nbits = le64_to_cpu(b->nres.data_size) * 8;
81742 +               if (bit >= nbits)
81743 +                       return 0;
81745 +               err = scan_nres_bitmap(ni, b, indx, bit, &scan_for_used, &used);
81746 +               if (err)
81747 +                       return err;
81749 +               if (used != MINUS_ONE_T)
81750 +                       return 0;
81751 +       }
81753 +       new_data = (u64)bit << indx->index_bits;
81755 +       err = attr_set_size(ni, ATTR_ALLOC, in->name, in->name_len,
81756 +                           &indx->alloc_run, new_data, &new_data, false, NULL);
81757 +       if (err)
81758 +               return err;
81760 +       bpb = bitmap_size(bit);
81761 +       if (bpb * 8 == nbits)
81762 +               return 0;
81764 +       err = attr_set_size(ni, ATTR_BITMAP, in->name, in->name_len,
81765 +                           &indx->bitmap_run, bpb, &bpb, false, NULL);
81767 +       return err;
81770 +static int indx_free_children(struct ntfs_index *indx, struct ntfs_inode *ni,
81771 +                             const struct NTFS_DE *e, bool trim)
81773 +       int err;
81774 +       struct indx_node *n;
81775 +       struct INDEX_HDR *hdr;
81776 +       CLST vbn = de_get_vbn(e);
81777 +       size_t i;
81779 +       err = indx_read(indx, ni, vbn, &n);
81780 +       if (err)
81781 +               return err;
81783 +       hdr = &n->index->ihdr;
81784 +       /* First, recurse into the children, if any.*/
81785 +       if (hdr_has_subnode(hdr)) {
81786 +               for (e = hdr_first_de(hdr); e; e = hdr_next_de(hdr, e)) {
81787 +                       indx_free_children(indx, ni, e, false);
81788 +                       if (de_is_last(e))
81789 +                               break;
81790 +               }
81791 +       }
81793 +       put_indx_node(n);
81795 +       i = vbn >> indx->idx2vbn_bits;
81796 +       /* We've gotten rid of the children; add this buffer to the free list. */
81797 +       indx_mark_free(indx, ni, i);
81799 +       if (!trim)
81800 +               return 0;
81802 +       /*
81803 +        * If there are no used indexes after current free index
81804 +        * then we can truncate allocation and bitmap
81805 +        * Use bitmap to estimate the case
81806 +        */
81807 +       indx_shrink(indx, ni, i + 1);
81808 +       return 0;
81812 + * indx_get_entry_to_replace
81813 + *
81814 + * finds a replacement entry for a deleted entry
81815 + * always returns a node entry:
81816 + * NTFS_IE_HAS_SUBNODES is set the flags and the size includes the sub_vcn
81817 + */
81818 +static int indx_get_entry_to_replace(struct ntfs_index *indx,
81819 +                                    struct ntfs_inode *ni,
81820 +                                    const struct NTFS_DE *de_next,
81821 +                                    struct NTFS_DE **de_to_replace,
81822 +                                    struct ntfs_fnd *fnd)
81824 +       int err;
81825 +       int level = -1;
81826 +       CLST vbn;
81827 +       struct NTFS_DE *e, *te, *re;
81828 +       struct indx_node *n;
81829 +       struct INDEX_BUFFER *ib;
81831 +       *de_to_replace = NULL;
81833 +       /* Find first leaf entry down from de_next */
81834 +       vbn = de_get_vbn(de_next);
81835 +       for (;;) {
81836 +               n = NULL;
81837 +               err = indx_read(indx, ni, vbn, &n);
81838 +               if (err)
81839 +                       goto out;
81841 +               e = hdr_first_de(&n->index->ihdr);
81842 +               fnd_push(fnd, n, e);
81844 +               if (!de_is_last(e)) {
81845 +                       /*
81846 +                        * This buffer is non-empty, so its first entry could be used as the
81847 +                        * replacement entry.
81848 +                        */
81849 +                       level = fnd->level - 1;
81850 +               }
81852 +               if (!de_has_vcn(e))
81853 +                       break;
81855 +               /* This buffer is a node. Continue to go down */
81856 +               vbn = de_get_vbn(e);
81857 +       }
81859 +       if (level == -1)
81860 +               goto out;
81862 +       n = fnd->nodes[level];
81863 +       te = hdr_first_de(&n->index->ihdr);
81864 +       /* Copy the candidate entry into the replacement entry buffer. */
81865 +       re = ntfs_malloc(le16_to_cpu(te->size) + sizeof(u64));
81866 +       if (!re) {
81867 +               err = -ENOMEM;
81868 +               goto out;
81869 +       }
81871 +       *de_to_replace = re;
81872 +       memcpy(re, te, le16_to_cpu(te->size));
81874 +       if (!de_has_vcn(re)) {
81875 +               /*
81876 +                * The replacement entry we found doesn't have a sub_vcn. increase its size
81877 +                * to hold one.
81878 +                */
81879 +               le16_add_cpu(&re->size, sizeof(u64));
81880 +               re->flags |= NTFS_IE_HAS_SUBNODES;
81881 +       } else {
81882 +               /*
81883 +                * The replacement entry we found was a node entry, which means that all
81884 +                * its child buffers are empty. Return them to the free pool.
81885 +                */
81886 +               indx_free_children(indx, ni, te, true);
81887 +       }
81889 +       /*
81890 +        * Expunge the replacement entry from its former location,
81891 +        * and then write that buffer.
81892 +        */
81893 +       ib = n->index;
81894 +       e = hdr_delete_de(&ib->ihdr, te);
81896 +       fnd->de[level] = e;
81897 +       indx_write(indx, ni, n, 0);
81899 +       /* Check to see if this action created an empty leaf. */
81900 +       if (ib_is_leaf(ib) && ib_is_empty(ib))
81901 +               return 0;
81903 +out:
81904 +       fnd_clear(fnd);
81905 +       return err;
81909 + * indx_delete_entry
81910 + *
81911 + * deletes an entry from the index.
81912 + */
81913 +int indx_delete_entry(struct ntfs_index *indx, struct ntfs_inode *ni,
81914 +                     const void *key, u32 key_len, const void *ctx)
81916 +       int err, diff;
81917 +       struct INDEX_ROOT *root;
81918 +       struct INDEX_HDR *hdr;
81919 +       struct ntfs_fnd *fnd, *fnd2;
81920 +       struct INDEX_BUFFER *ib;
81921 +       struct NTFS_DE *e, *re, *next, *prev, *me;
81922 +       struct indx_node *n, *n2d = NULL;
81923 +       __le64 sub_vbn;
81924 +       int level, level2;
81925 +       struct ATTRIB *attr;
81926 +       struct mft_inode *mi;
81927 +       u32 e_size, root_size, new_root_size;
81928 +       size_t trim_bit;
81929 +       const struct INDEX_NAMES *in;
81931 +       fnd = fnd_get();
81932 +       if (!fnd) {
81933 +               err = -ENOMEM;
81934 +               goto out2;
81935 +       }
81937 +       fnd2 = fnd_get();
81938 +       if (!fnd2) {
81939 +               err = -ENOMEM;
81940 +               goto out1;
81941 +       }
81943 +       root = indx_get_root(indx, ni, &attr, &mi);
81944 +       if (!root) {
81945 +               err = -EINVAL;
81946 +               goto out;
81947 +       }
81949 +       /* Locate the entry to remove. */
81950 +       err = indx_find(indx, ni, root, key, key_len, ctx, &diff, &e, fnd);
81951 +       if (err)
81952 +               goto out;
81954 +       if (!e || diff) {
81955 +               err = -ENOENT;
81956 +               goto out;
81957 +       }
81959 +       level = fnd->level;
81961 +       if (level) {
81962 +               n = fnd->nodes[level - 1];
81963 +               e = fnd->de[level - 1];
81964 +               ib = n->index;
81965 +               hdr = &ib->ihdr;
81966 +       } else {
81967 +               hdr = &root->ihdr;
81968 +               e = fnd->root_de;
81969 +               n = NULL;
81970 +       }
81972 +       e_size = le16_to_cpu(e->size);
81974 +       if (!de_has_vcn_ex(e)) {
81975 +               /* The entry to delete is a leaf, so we can just rip it out */
81976 +               hdr_delete_de(hdr, e);
81978 +               if (!level) {
81979 +                       hdr->total = hdr->used;
81981 +                       /* Shrink resident root attribute */
81982 +                       mi_resize_attr(mi, attr, 0 - e_size);
81983 +                       goto out;
81984 +               }
81986 +               indx_write(indx, ni, n, 0);
81988 +               /*
81989 +                * Check to see if removing that entry made
81990 +                * the leaf empty.
81991 +                */
81992 +               if (ib_is_leaf(ib) && ib_is_empty(ib)) {
81993 +                       fnd_pop(fnd);
81994 +                       fnd_push(fnd2, n, e);
81995 +               }
81996 +       } else {
81997 +               /*
81998 +                * The entry we wish to delete is a node buffer, so we
81999 +                * have to find a replacement for it.
82000 +                */
82001 +               next = de_get_next(e);
82003 +               err = indx_get_entry_to_replace(indx, ni, next, &re, fnd2);
82004 +               if (err)
82005 +                       goto out;
82007 +               if (re) {
82008 +                       de_set_vbn_le(re, de_get_vbn_le(e));
82009 +                       hdr_delete_de(hdr, e);
82011 +                       err = level ? indx_insert_into_buffer(indx, ni, root,
82012 +                                                             re, ctx,
82013 +                                                             fnd->level - 1,
82014 +                                                             fnd)
82015 +                                   : indx_insert_into_root(indx, ni, re, e,
82016 +                                                           ctx, fnd);
82017 +                       ntfs_free(re);
82019 +                       if (err)
82020 +                               goto out;
82021 +               } else {
82022 +                       /*
82023 +                        * There is no replacement for the current entry.
82024 +                        * This means that the subtree rooted at its node is empty,
82025 +                        * and can be deleted, which turn means that the node can
82026 +                        * just inherit the deleted entry sub_vcn
82027 +                        */
82028 +                       indx_free_children(indx, ni, next, true);
82030 +                       de_set_vbn_le(next, de_get_vbn_le(e));
82031 +                       hdr_delete_de(hdr, e);
82032 +                       if (level) {
82033 +                               indx_write(indx, ni, n, 0);
82034 +                       } else {
82035 +                               hdr->total = hdr->used;
82037 +                               /* Shrink resident root attribute */
82038 +                               mi_resize_attr(mi, attr, 0 - e_size);
82039 +                       }
82040 +               }
82041 +       }
82043 +       /* Delete a branch of tree */
82044 +       if (!fnd2 || !fnd2->level)
82045 +               goto out;
82047 +       /* Reinit root 'cause it can be changed */
82048 +       root = indx_get_root(indx, ni, &attr, &mi);
82049 +       if (!root) {
82050 +               err = -EINVAL;
82051 +               goto out;
82052 +       }
82054 +       n2d = NULL;
82055 +       sub_vbn = fnd2->nodes[0]->index->vbn;
82056 +       level2 = 0;
82057 +       level = fnd->level;
82059 +       hdr = level ? &fnd->nodes[level - 1]->index->ihdr : &root->ihdr;
82061 +       /* Scan current level */
82062 +       for (e = hdr_first_de(hdr);; e = hdr_next_de(hdr, e)) {
82063 +               if (!e) {
82064 +                       err = -EINVAL;
82065 +                       goto out;
82066 +               }
82068 +               if (de_has_vcn(e) && sub_vbn == de_get_vbn_le(e))
82069 +                       break;
82071 +               if (de_is_last(e)) {
82072 +                       e = NULL;
82073 +                       break;
82074 +               }
82075 +       }
82077 +       if (!e) {
82078 +               /* Do slow search from root */
82079 +               struct indx_node *in;
82081 +               fnd_clear(fnd);
82083 +               in = indx_find_buffer(indx, ni, root, sub_vbn, NULL);
82084 +               if (IS_ERR(in)) {
82085 +                       err = PTR_ERR(in);
82086 +                       goto out;
82087 +               }
82089 +               if (in)
82090 +                       fnd_push(fnd, in, NULL);
82091 +       }
82093 +       /* Merge fnd2 -> fnd */
82094 +       for (level = 0; level < fnd2->level; level++) {
82095 +               fnd_push(fnd, fnd2->nodes[level], fnd2->de[level]);
82096 +               fnd2->nodes[level] = NULL;
82097 +       }
82098 +       fnd2->level = 0;
82100 +       hdr = NULL;
82101 +       for (level = fnd->level; level; level--) {
82102 +               struct indx_node *in = fnd->nodes[level - 1];
82104 +               ib = in->index;
82105 +               if (ib_is_empty(ib)) {
82106 +                       sub_vbn = ib->vbn;
82107 +               } else {
82108 +                       hdr = &ib->ihdr;
82109 +                       n2d = in;
82110 +                       level2 = level;
82111 +                       break;
82112 +               }
82113 +       }
82115 +       if (!hdr)
82116 +               hdr = &root->ihdr;
82118 +       e = hdr_first_de(hdr);
82119 +       if (!e) {
82120 +               err = -EINVAL;
82121 +               goto out;
82122 +       }
82124 +       if (hdr != &root->ihdr || !de_is_last(e)) {
82125 +               prev = NULL;
82126 +               while (!de_is_last(e)) {
82127 +                       if (de_has_vcn(e) && sub_vbn == de_get_vbn_le(e))
82128 +                               break;
82129 +                       prev = e;
82130 +                       e = hdr_next_de(hdr, e);
82131 +                       if (!e) {
82132 +                               err = -EINVAL;
82133 +                               goto out;
82134 +                       }
82135 +               }
82137 +               if (sub_vbn != de_get_vbn_le(e)) {
82138 +                       /*
82139 +                        * Didn't find the parent entry, although this buffer is the parent trail.
82140 +                        * Something is corrupt.
82141 +                        */
82142 +                       err = -EINVAL;
82143 +                       goto out;
82144 +               }
82146 +               if (de_is_last(e)) {
82147 +                       /*
82148 +                        * Since we can't remove the end entry, we'll remove its
82149 +                        * predecessor instead. This means we have to transfer the
82150 +                        * predecessor's sub_vcn to the end entry.
82151 +                        * Note: that this index block is not empty, so the
82152 +                        * predecessor must exist
82153 +                        */
82154 +                       if (!prev) {
82155 +                               err = -EINVAL;
82156 +                               goto out;
82157 +                       }
82159 +                       if (de_has_vcn(prev)) {
82160 +                               de_set_vbn_le(e, de_get_vbn_le(prev));
82161 +                       } else if (de_has_vcn(e)) {
82162 +                               le16_sub_cpu(&e->size, sizeof(u64));
82163 +                               e->flags &= ~NTFS_IE_HAS_SUBNODES;
82164 +                               le32_sub_cpu(&hdr->used, sizeof(u64));
82165 +                       }
82166 +                       e = prev;
82167 +               }
82169 +               /*
82170 +                * Copy the current entry into a temporary buffer (stripping off its
82171 +                * down-pointer, if any) and delete it from the current buffer or root,
82172 +                * as appropriate.
82173 +                */
82174 +               e_size = le16_to_cpu(e->size);
82175 +               me = ntfs_memdup(e, e_size);
82176 +               if (!me) {
82177 +                       err = -ENOMEM;
82178 +                       goto out;
82179 +               }
82181 +               if (de_has_vcn(me)) {
82182 +                       me->flags &= ~NTFS_IE_HAS_SUBNODES;
82183 +                       le16_sub_cpu(&me->size, sizeof(u64));
82184 +               }
82186 +               hdr_delete_de(hdr, e);
82188 +               if (hdr == &root->ihdr) {
82189 +                       level = 0;
82190 +                       hdr->total = hdr->used;
82192 +                       /* Shrink resident root attribute */
82193 +                       mi_resize_attr(mi, attr, 0 - e_size);
82194 +               } else {
82195 +                       indx_write(indx, ni, n2d, 0);
82196 +                       level = level2;
82197 +               }
82199 +               /* Mark unused buffers as free */
82200 +               trim_bit = -1;
82201 +               for (; level < fnd->level; level++) {
82202 +                       ib = fnd->nodes[level]->index;
82203 +                       if (ib_is_empty(ib)) {
82204 +                               size_t k = le64_to_cpu(ib->vbn) >>
82205 +                                          indx->idx2vbn_bits;
82207 +                               indx_mark_free(indx, ni, k);
82208 +                               if (k < trim_bit)
82209 +                                       trim_bit = k;
82210 +                       }
82211 +               }
82213 +               fnd_clear(fnd);
82214 +               /*fnd->root_de = NULL;*/
82216 +               /*
82217 +                * Re-insert the entry into the tree.
82218 +                * Find the spot the tree where we want to insert the new entry.
82219 +                */
82220 +               err = indx_insert_entry(indx, ni, me, ctx, fnd);
82221 +               ntfs_free(me);
82222 +               if (err)
82223 +                       goto out;
82225 +               if (trim_bit != -1)
82226 +                       indx_shrink(indx, ni, trim_bit);
82227 +       } else {
82228 +               /*
82229 +                * This tree needs to be collapsed down to an empty root.
82230 +                * Recreate the index root as an empty leaf and free all the bits the
82231 +                * index allocation bitmap.
82232 +                */
82233 +               fnd_clear(fnd);
82234 +               fnd_clear(fnd2);
82236 +               in = &s_index_names[indx->type];
82238 +               err = attr_set_size(ni, ATTR_ALLOC, in->name, in->name_len,
82239 +                                   &indx->alloc_run, 0, NULL, false, NULL);
82240 +               err = ni_remove_attr(ni, ATTR_ALLOC, in->name, in->name_len,
82241 +                                    false, NULL);
82242 +               run_close(&indx->alloc_run);
82244 +               err = attr_set_size(ni, ATTR_BITMAP, in->name, in->name_len,
82245 +                                   &indx->bitmap_run, 0, NULL, false, NULL);
82246 +               err = ni_remove_attr(ni, ATTR_BITMAP, in->name, in->name_len,
82247 +                                    false, NULL);
82248 +               run_close(&indx->bitmap_run);
82250 +               root = indx_get_root(indx, ni, &attr, &mi);
82251 +               if (!root) {
82252 +                       err = -EINVAL;
82253 +                       goto out;
82254 +               }
82256 +               root_size = le32_to_cpu(attr->res.data_size);
82257 +               new_root_size =
82258 +                       sizeof(struct INDEX_ROOT) + sizeof(struct NTFS_DE);
82260 +               if (new_root_size != root_size &&
82261 +                   !mi_resize_attr(mi, attr, new_root_size - root_size)) {
82262 +                       err = -EINVAL;
82263 +                       goto out;
82264 +               }
82266 +               /* Fill first entry */
82267 +               e = (struct NTFS_DE *)(root + 1);
82268 +               e->ref.low = 0;
82269 +               e->ref.high = 0;
82270 +               e->ref.seq = 0;
82271 +               e->size = cpu_to_le16(sizeof(struct NTFS_DE));
82272 +               e->flags = NTFS_IE_LAST; // 0x02
82273 +               e->key_size = 0;
82274 +               e->res = 0;
82276 +               hdr = &root->ihdr;
82277 +               hdr->flags = 0;
82278 +               hdr->used = hdr->total = cpu_to_le32(
82279 +                       new_root_size - offsetof(struct INDEX_ROOT, ihdr));
82280 +               mi->dirty = true;
82281 +       }
82283 +out:
82284 +       fnd_put(fnd2);
82285 +out1:
82286 +       fnd_put(fnd);
82287 +out2:
82288 +       return err;
82291 +int indx_update_dup(struct ntfs_inode *ni, struct ntfs_sb_info *sbi,
82292 +                   const struct ATTR_FILE_NAME *fname,
82293 +                   const struct NTFS_DUP_INFO *dup, int sync)
82295 +       int err, diff;
82296 +       struct NTFS_DE *e = NULL;
82297 +       struct ATTR_FILE_NAME *e_fname;
82298 +       struct ntfs_fnd *fnd;
82299 +       struct INDEX_ROOT *root;
82300 +       struct mft_inode *mi;
82301 +       struct ntfs_index *indx = &ni->dir;
82303 +       fnd = fnd_get();
82304 +       if (!fnd) {
82305 +               err = -ENOMEM;
82306 +               goto out1;
82307 +       }
82309 +       root = indx_get_root(indx, ni, NULL, &mi);
82310 +       if (!root) {
82311 +               err = -EINVAL;
82312 +               goto out;
82313 +       }
82315 +       /* Find entries tree and on disk */
82316 +       err = indx_find(indx, ni, root, fname, fname_full_size(fname), sbi,
82317 +                       &diff, &e, fnd);
82318 +       if (err)
82319 +               goto out;
82321 +       if (!e) {
82322 +               err = -EINVAL;
82323 +               goto out;
82324 +       }
82326 +       if (diff) {
82327 +               err = -EINVAL;
82328 +               goto out;
82329 +       }
82331 +       e_fname = (struct ATTR_FILE_NAME *)(e + 1);
82333 +       if (!memcmp(&e_fname->dup, dup, sizeof(*dup))) {
82334 +               /* nothing to update in index! Try to avoid this call */
82335 +               goto out;
82336 +       }
82338 +       memcpy(&e_fname->dup, dup, sizeof(*dup));
82340 +       if (fnd->level) {
82341 +               err = indx_write(indx, ni, fnd->nodes[fnd->level - 1], sync);
82342 +       } else if (sync) {
82343 +               mi->dirty = true;
82344 +               err = mi_write(mi, 1);
82345 +       } else {
82346 +               mi->dirty = true;
82347 +               mark_inode_dirty(&ni->vfs_inode);
82348 +       }
82350 +out:
82351 +       fnd_put(fnd);
82353 +out1:
82354 +       return err;
82356 diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
82357 new file mode 100644
82358 index 000000000000..9e836c192ddf
82359 --- /dev/null
82360 +++ b/fs/ntfs3/inode.c
82361 @@ -0,0 +1,2033 @@
82362 +// SPDX-License-Identifier: GPL-2.0
82364 + *
82365 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
82366 + *
82367 + */
82369 +#include <linux/blkdev.h>
82370 +#include <linux/buffer_head.h>
82371 +#include <linux/fs.h>
82372 +#include <linux/iversion.h>
82373 +#include <linux/mpage.h>
82374 +#include <linux/namei.h>
82375 +#include <linux/nls.h>
82376 +#include <linux/uio.h>
82377 +#include <linux/version.h>
82378 +#include <linux/writeback.h>
82380 +#include "debug.h"
82381 +#include "ntfs.h"
82382 +#include "ntfs_fs.h"
82385 + * ntfs_read_mft
82386 + *
82387 + * reads record and parses MFT
82388 + */
82389 +static struct inode *ntfs_read_mft(struct inode *inode,
82390 +                                  const struct cpu_str *name,
82391 +                                  const struct MFT_REF *ref)
82393 +       int err = 0;
82394 +       struct ntfs_inode *ni = ntfs_i(inode);
82395 +       struct super_block *sb = inode->i_sb;
82396 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
82397 +       mode_t mode = 0;
82398 +       struct ATTR_STD_INFO5 *std5 = NULL;
82399 +       struct ATTR_LIST_ENTRY *le;
82400 +       struct ATTRIB *attr;
82401 +       bool is_match = false;
82402 +       bool is_root = false;
82403 +       bool is_dir;
82404 +       unsigned long ino = inode->i_ino;
82405 +       u32 rp_fa = 0, asize, t32;
82406 +       u16 roff, rsize, names = 0;
82407 +       const struct ATTR_FILE_NAME *fname = NULL;
82408 +       const struct INDEX_ROOT *root;
82409 +       struct REPARSE_DATA_BUFFER rp; // 0x18 bytes
82410 +       u64 t64;
82411 +       struct MFT_REC *rec;
82412 +       struct runs_tree *run;
82414 +       inode->i_op = NULL;
82416 +       err = mi_init(&ni->mi, sbi, ino);
82417 +       if (err)
82418 +               goto out;
82420 +       if (!sbi->mft.ni && ino == MFT_REC_MFT && !sb->s_root) {
82421 +               t64 = sbi->mft.lbo >> sbi->cluster_bits;
82422 +               t32 = bytes_to_cluster(sbi, MFT_REC_VOL * sbi->record_size);
82423 +               sbi->mft.ni = ni;
82424 +               init_rwsem(&ni->file.run_lock);
82426 +               if (!run_add_entry(&ni->file.run, 0, t64, t32, true)) {
82427 +                       err = -ENOMEM;
82428 +                       goto out;
82429 +               }
82430 +       }
82432 +       err = mi_read(&ni->mi, ino == MFT_REC_MFT);
82434 +       if (err)
82435 +               goto out;
82437 +       rec = ni->mi.mrec;
82439 +       if (sbi->flags & NTFS_FLAGS_LOG_REPLAYING) {
82440 +               ;
82441 +       } else if (ref->seq != rec->seq) {
82442 +               err = -EINVAL;
82443 +               ntfs_err(sb, "MFT: r=%lx, expect seq=%x instead of %x!", ino,
82444 +                        le16_to_cpu(ref->seq), le16_to_cpu(rec->seq));
82445 +               goto out;
82446 +       } else if (!is_rec_inuse(rec)) {
82447 +               err = -EINVAL;
82448 +               ntfs_err(sb, "Inode r=%x is not in use!", (u32)ino);
82449 +               goto out;
82450 +       }
82452 +       if (le32_to_cpu(rec->total) != sbi->record_size) {
82453 +               // bad inode?
82454 +               err = -EINVAL;
82455 +               goto out;
82456 +       }
82458 +       if (!is_rec_base(rec))
82459 +               goto Ok;
82461 +       /* record should contain $I30 root */
82462 +       is_dir = rec->flags & RECORD_FLAG_DIR;
82464 +       inode->i_generation = le16_to_cpu(rec->seq);
82466 +       /* Enumerate all struct Attributes MFT */
82467 +       le = NULL;
82468 +       attr = NULL;
82470 +       /*
82471 +        * to reduce tab pressure use goto instead of
82472 +        * while( (attr = ni_enum_attr_ex(ni, attr, &le, NULL) ))
82473 +        */
82474 +next_attr:
82475 +       run = NULL;
82476 +       err = -EINVAL;
82477 +       attr = ni_enum_attr_ex(ni, attr, &le, NULL);
82478 +       if (!attr)
82479 +               goto end_enum;
82481 +       if (le && le->vcn) {
82482 +               /* This is non primary attribute segment. Ignore if not MFT */
82483 +               if (ino != MFT_REC_MFT || attr->type != ATTR_DATA)
82484 +                       goto next_attr;
82486 +               run = &ni->file.run;
82487 +               asize = le32_to_cpu(attr->size);
82488 +               goto attr_unpack_run;
82489 +       }
82491 +       roff = attr->non_res ? 0 : le16_to_cpu(attr->res.data_off);
82492 +       rsize = attr->non_res ? 0 : le32_to_cpu(attr->res.data_size);
82493 +       asize = le32_to_cpu(attr->size);
82495 +       switch (attr->type) {
82496 +       case ATTR_STD:
82497 +               if (attr->non_res ||
82498 +                   asize < sizeof(struct ATTR_STD_INFO) + roff ||
82499 +                   rsize < sizeof(struct ATTR_STD_INFO))
82500 +                       goto out;
82502 +               if (std5)
82503 +                       goto next_attr;
82505 +               std5 = Add2Ptr(attr, roff);
82507 +#ifdef STATX_BTIME
82508 +               nt2kernel(std5->cr_time, &ni->i_crtime);
82509 +#endif
82510 +               nt2kernel(std5->a_time, &inode->i_atime);
82511 +               nt2kernel(std5->c_time, &inode->i_ctime);
82512 +               nt2kernel(std5->m_time, &inode->i_mtime);
82514 +               ni->std_fa = std5->fa;
82516 +               if (asize >= sizeof(struct ATTR_STD_INFO5) + roff &&
82517 +                   rsize >= sizeof(struct ATTR_STD_INFO5))
82518 +                       ni->std_security_id = std5->security_id;
82519 +               goto next_attr;
82521 +       case ATTR_LIST:
82522 +               if (attr->name_len || le || ino == MFT_REC_LOG)
82523 +                       goto out;
82525 +               err = ntfs_load_attr_list(ni, attr);
82526 +               if (err)
82527 +                       goto out;
82529 +               le = NULL;
82530 +               attr = NULL;
82531 +               goto next_attr;
82533 +       case ATTR_NAME:
82534 +               if (attr->non_res || asize < SIZEOF_ATTRIBUTE_FILENAME + roff ||
82535 +                   rsize < SIZEOF_ATTRIBUTE_FILENAME)
82536 +                       goto out;
82538 +               fname = Add2Ptr(attr, roff);
82539 +               if (fname->type == FILE_NAME_DOS)
82540 +                       goto next_attr;
82542 +               names += 1;
82543 +               if (name && name->len == fname->name_len &&
82544 +                   !ntfs_cmp_names_cpu(name, (struct le_str *)&fname->name_len,
82545 +                                       NULL, false))
82546 +                       is_match = true;
82548 +               goto next_attr;
82550 +       case ATTR_DATA:
82551 +               if (is_dir) {
82552 +                       /* ignore data attribute in dir record */
82553 +                       goto next_attr;
82554 +               }
82556 +               if (ino == MFT_REC_BADCLUST && !attr->non_res)
82557 +                       goto next_attr;
82559 +               if (attr->name_len &&
82560 +                   ((ino != MFT_REC_BADCLUST || !attr->non_res ||
82561 +                     attr->name_len != ARRAY_SIZE(BAD_NAME) ||
82562 +                     memcmp(attr_name(attr), BAD_NAME, sizeof(BAD_NAME))) &&
82563 +                    (ino != MFT_REC_SECURE || !attr->non_res ||
82564 +                     attr->name_len != ARRAY_SIZE(SDS_NAME) ||
82565 +                     memcmp(attr_name(attr), SDS_NAME, sizeof(SDS_NAME))))) {
82566 +                       /* file contains stream attribute. ignore it */
82567 +                       goto next_attr;
82568 +               }
82570 +               if (is_attr_sparsed(attr))
82571 +                       ni->std_fa |= FILE_ATTRIBUTE_SPARSE_FILE;
82572 +               else
82573 +                       ni->std_fa &= ~FILE_ATTRIBUTE_SPARSE_FILE;
82575 +               if (is_attr_compressed(attr))
82576 +                       ni->std_fa |= FILE_ATTRIBUTE_COMPRESSED;
82577 +               else
82578 +                       ni->std_fa &= ~FILE_ATTRIBUTE_COMPRESSED;
82580 +               if (is_attr_encrypted(attr))
82581 +                       ni->std_fa |= FILE_ATTRIBUTE_ENCRYPTED;
82582 +               else
82583 +                       ni->std_fa &= ~FILE_ATTRIBUTE_ENCRYPTED;
82585 +               if (!attr->non_res) {
82586 +                       ni->i_valid = inode->i_size = rsize;
82587 +                       inode_set_bytes(inode, rsize);
82588 +                       t32 = asize;
82589 +               } else {
82590 +                       t32 = le16_to_cpu(attr->nres.run_off);
82591 +               }
82593 +               mode = S_IFREG | (0777 & sbi->options.fs_fmask_inv);
82595 +               if (!attr->non_res) {
82596 +                       ni->ni_flags |= NI_FLAG_RESIDENT;
82597 +                       goto next_attr;
82598 +               }
82600 +               inode_set_bytes(inode, attr_ondisk_size(attr));
82602 +               ni->i_valid = le64_to_cpu(attr->nres.valid_size);
82603 +               inode->i_size = le64_to_cpu(attr->nres.data_size);
82604 +               if (!attr->nres.alloc_size)
82605 +                       goto next_attr;
82607 +               run = ino == MFT_REC_BITMAP ? &sbi->used.bitmap.run
82608 +                                           : &ni->file.run;
82609 +               break;
82611 +       case ATTR_ROOT:
82612 +               if (attr->non_res)
82613 +                       goto out;
82615 +               root = Add2Ptr(attr, roff);
82616 +               is_root = true;
82618 +               if (attr->name_len != ARRAY_SIZE(I30_NAME) ||
82619 +                   memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME)))
82620 +                       goto next_attr;
82622 +               if (root->type != ATTR_NAME ||
82623 +                   root->rule != NTFS_COLLATION_TYPE_FILENAME)
82624 +                       goto out;
82626 +               if (!is_dir)
82627 +                       goto next_attr;
82629 +               ni->ni_flags |= NI_FLAG_DIR;
82631 +               err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30);
82632 +               if (err)
82633 +                       goto out;
82635 +               mode = sb->s_root
82636 +                              ? (S_IFDIR | (0777 & sbi->options.fs_dmask_inv))
82637 +                              : (S_IFDIR | 0777);
82638 +               goto next_attr;
82640 +       case ATTR_ALLOC:
82641 +               if (!is_root || attr->name_len != ARRAY_SIZE(I30_NAME) ||
82642 +                   memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME)))
82643 +                       goto next_attr;
82645 +               inode->i_size = le64_to_cpu(attr->nres.data_size);
82646 +               ni->i_valid = le64_to_cpu(attr->nres.valid_size);
82647 +               inode_set_bytes(inode, le64_to_cpu(attr->nres.alloc_size));
82649 +               run = &ni->dir.alloc_run;
82650 +               break;
82652 +       case ATTR_BITMAP:
82653 +               if (ino == MFT_REC_MFT) {
82654 +                       if (!attr->non_res)
82655 +                               goto out;
82656 +#ifndef CONFIG_NTFS3_64BIT_CLUSTER
82657 +                       /* 0x20000000 = 2^32 / 8 */
82658 +                       if (le64_to_cpu(attr->nres.alloc_size) >= 0x20000000)
82659 +                               goto out;
82660 +#endif
82661 +                       run = &sbi->mft.bitmap.run;
82662 +                       break;
82663 +               } else if (is_dir && attr->name_len == ARRAY_SIZE(I30_NAME) &&
82664 +                          !memcmp(attr_name(attr), I30_NAME,
82665 +                                  sizeof(I30_NAME)) &&
82666 +                          attr->non_res) {
82667 +                       run = &ni->dir.bitmap_run;
82668 +                       break;
82669 +               }
82670 +               goto next_attr;
82672 +       case ATTR_REPARSE:
82673 +               if (attr->name_len)
82674 +                       goto next_attr;
82676 +               rp_fa = ni_parse_reparse(ni, attr, &rp);
82677 +               switch (rp_fa) {
82678 +               case REPARSE_LINK:
82679 +                       if (!attr->non_res) {
82680 +                               inode->i_size = rsize;
82681 +                               inode_set_bytes(inode, rsize);
82682 +                               t32 = asize;
82683 +                       } else {
82684 +                               inode->i_size =
82685 +                                       le64_to_cpu(attr->nres.data_size);
82686 +                               t32 = le16_to_cpu(attr->nres.run_off);
82687 +                       }
82689 +                       /* Looks like normal symlink */
82690 +                       ni->i_valid = inode->i_size;
82692 +                       /* Clear directory bit */
82693 +                       if (ni->ni_flags & NI_FLAG_DIR) {
82694 +                               indx_clear(&ni->dir);
82695 +                               memset(&ni->dir, 0, sizeof(ni->dir));
82696 +                               ni->ni_flags &= ~NI_FLAG_DIR;
82697 +                       } else {
82698 +                               run_close(&ni->file.run);
82699 +                       }
82700 +                       mode = S_IFLNK | 0777;
82701 +                       is_dir = false;
82702 +                       if (attr->non_res) {
82703 +                               run = &ni->file.run;
82704 +                               goto attr_unpack_run; // double break
82705 +                       }
82706 +                       break;
82708 +               case REPARSE_COMPRESSED:
82709 +                       break;
82711 +               case REPARSE_DEDUPLICATED:
82712 +                       break;
82713 +               }
82714 +               goto next_attr;
82716 +       case ATTR_EA_INFO:
82717 +               if (!attr->name_len &&
82718 +                   resident_data_ex(attr, sizeof(struct EA_INFO)))
82719 +                       ni->ni_flags |= NI_FLAG_EA;
82720 +               goto next_attr;
82722 +       default:
82723 +               goto next_attr;
82724 +       }
82726 +attr_unpack_run:
82727 +       roff = le16_to_cpu(attr->nres.run_off);
82729 +       t64 = le64_to_cpu(attr->nres.svcn);
82730 +       err = run_unpack_ex(run, sbi, ino, t64, le64_to_cpu(attr->nres.evcn),
82731 +                           t64, Add2Ptr(attr, roff), asize - roff);
82732 +       if (err < 0)
82733 +               goto out;
82734 +       err = 0;
82735 +       goto next_attr;
82737 +end_enum:
82739 +       if (!std5)
82740 +               goto out;
82742 +       if (!is_match && name) {
82743 +               /* reuse rec as buffer for ascii name */
82744 +               err = -ENOENT;
82745 +               goto out;
82746 +       }
82748 +       if (std5->fa & FILE_ATTRIBUTE_READONLY)
82749 +               mode &= ~0222;
82751 +       /* Setup 'uid' and 'gid' */
82752 +       inode->i_uid = sbi->options.fs_uid;
82753 +       inode->i_gid = sbi->options.fs_gid;
82755 +       if (!names) {
82756 +               err = -EINVAL;
82757 +               goto out;
82758 +       }
82760 +       if (S_ISDIR(mode)) {
82761 +               ni->std_fa |= FILE_ATTRIBUTE_DIRECTORY;
82763 +               /*
82764 +                * dot and dot-dot should be included in count but was not
82765 +                * included in enumeration.
82766 +                * Usually a hard links to directories are disabled
82767 +                */
82768 +               set_nlink(inode, 1);
82769 +               inode->i_op = &ntfs_dir_inode_operations;
82770 +               inode->i_fop = &ntfs_dir_operations;
82771 +               ni->i_valid = 0;
82772 +       } else if (S_ISLNK(mode)) {
82773 +               ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
82774 +               inode->i_op = &ntfs_link_inode_operations;
82775 +               inode->i_fop = NULL;
82776 +               inode_nohighmem(inode); // ??
82777 +               set_nlink(inode, names);
82778 +       } else if (S_ISREG(mode)) {
82779 +               ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
82781 +               set_nlink(inode, names);
82783 +               inode->i_op = &ntfs_file_inode_operations;
82784 +               inode->i_fop = &ntfs_file_operations;
82785 +               inode->i_mapping->a_ops =
82786 +                       is_compressed(ni) ? &ntfs_aops_cmpr : &ntfs_aops;
82788 +               if (ino != MFT_REC_MFT)
82789 +                       init_rwsem(&ni->file.run_lock);
82790 +       } else if (fname && fname->home.low == cpu_to_le32(MFT_REC_EXTEND) &&
82791 +                  fname->home.seq == cpu_to_le16(MFT_REC_EXTEND)) {
82792 +               /* Records in $Extend are not a files or general directories */
82793 +       } else {
82794 +               err = -EINVAL;
82795 +               goto out;
82796 +       }
82798 +       if ((sbi->options.sys_immutable &&
82799 +            (std5->fa & FILE_ATTRIBUTE_SYSTEM)) &&
82800 +           !S_ISFIFO(mode) && !S_ISSOCK(mode) && !S_ISLNK(mode)) {
82801 +               inode->i_flags |= S_IMMUTABLE;
82802 +       } else {
82803 +               inode->i_flags &= ~S_IMMUTABLE;
82804 +       }
82806 +       inode->i_mode = mode;
82807 +       if (!(ni->ni_flags & NI_FLAG_EA)) {
82808 +               /* if no xattr then no security (stored in xattr) */
82809 +               inode->i_flags |= S_NOSEC;
82810 +       }
82812 +Ok:
82813 +       if (ino == MFT_REC_MFT && !sb->s_root)
82814 +               sbi->mft.ni = NULL;
82816 +       unlock_new_inode(inode);
82818 +       return inode;
82820 +out:
82821 +       if (ino == MFT_REC_MFT && !sb->s_root)
82822 +               sbi->mft.ni = NULL;
82824 +       iget_failed(inode);
82825 +       return ERR_PTR(err);
82828 +/* returns 1 if match */
82829 +static int ntfs_test_inode(struct inode *inode, void *data)
82831 +       struct MFT_REF *ref = data;
82833 +       return ino_get(ref) == inode->i_ino;
82836 +static int ntfs_set_inode(struct inode *inode, void *data)
82838 +       const struct MFT_REF *ref = data;
82840 +       inode->i_ino = ino_get(ref);
82841 +       return 0;
82844 +struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref,
82845 +                        const struct cpu_str *name)
82847 +       struct inode *inode;
82849 +       inode = iget5_locked(sb, ino_get(ref), ntfs_test_inode, ntfs_set_inode,
82850 +                            (void *)ref);
82851 +       if (unlikely(!inode))
82852 +               return ERR_PTR(-ENOMEM);
82854 +       /* If this is a freshly allocated inode, need to read it now. */
82855 +       if (inode->i_state & I_NEW)
82856 +               inode = ntfs_read_mft(inode, name, ref);
82857 +       else if (ref->seq != ntfs_i(inode)->mi.mrec->seq) {
82858 +               /* inode overlaps? */
82859 +               make_bad_inode(inode);
82860 +       }
82862 +       return inode;
82865 +enum get_block_ctx {
82866 +       GET_BLOCK_GENERAL = 0,
82867 +       GET_BLOCK_WRITE_BEGIN = 1,
82868 +       GET_BLOCK_DIRECT_IO_R = 2,
82869 +       GET_BLOCK_DIRECT_IO_W = 3,
82870 +       GET_BLOCK_BMAP = 4,
82873 +static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
82874 +                                      struct buffer_head *bh, int create,
82875 +                                      enum get_block_ctx ctx)
82877 +       struct super_block *sb = inode->i_sb;
82878 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
82879 +       struct ntfs_inode *ni = ntfs_i(inode);
82880 +       struct page *page = bh->b_page;
82881 +       u8 cluster_bits = sbi->cluster_bits;
82882 +       u32 block_size = sb->s_blocksize;
82883 +       u64 bytes, lbo, valid;
82884 +       u32 off;
82885 +       int err;
82886 +       CLST vcn, lcn, len;
82887 +       bool new;
82889 +       /*clear previous state*/
82890 +       clear_buffer_new(bh);
82891 +       clear_buffer_uptodate(bh);
82893 +       /* direct write uses 'create=0'*/
82894 +       if (!create && vbo >= ni->i_valid) {
82895 +               /* out of valid */
82896 +               return 0;
82897 +       }
82899 +       if (vbo >= inode->i_size) {
82900 +               /* out of size */
82901 +               return 0;
82902 +       }
82904 +       if (is_resident(ni)) {
82905 +               ni_lock(ni);
82906 +               err = attr_data_read_resident(ni, page);
82907 +               ni_unlock(ni);
82909 +               if (!err)
82910 +                       set_buffer_uptodate(bh);
82911 +               bh->b_size = block_size;
82912 +               return err;
82913 +       }
82915 +       vcn = vbo >> cluster_bits;
82916 +       off = vbo & sbi->cluster_mask;
82917 +       new = false;
82919 +       err = attr_data_get_block(ni, vcn, 1, &lcn, &len, create ? &new : NULL);
82920 +       if (err)
82921 +               goto out;
82923 +       if (!len)
82924 +               return 0;
82926 +       bytes = ((u64)len << cluster_bits) - off;
82928 +       if (lcn == SPARSE_LCN) {
82929 +               if (!create) {
82930 +                       if (bh->b_size > bytes)
82931 +                               bh->b_size = bytes;
82933 +                       return 0;
82934 +               }
82935 +               WARN_ON(1);
82936 +       }
82938 +       if (new) {
82939 +               set_buffer_new(bh);
82940 +               if ((len << cluster_bits) > block_size)
82941 +                       ntfs_sparse_cluster(inode, page, vcn, len);
82942 +       }
82944 +       lbo = ((u64)lcn << cluster_bits) + off;
82946 +       set_buffer_mapped(bh);
82947 +       bh->b_bdev = sb->s_bdev;
82948 +       bh->b_blocknr = lbo >> sb->s_blocksize_bits;
82950 +       valid = ni->i_valid;
82952 +       if (ctx == GET_BLOCK_DIRECT_IO_W) {
82953 +               /*ntfs_direct_IO will update ni->i_valid */
82954 +               if (vbo >= valid)
82955 +                       set_buffer_new(bh);
82956 +       } else if (create) {
82957 +               /*normal write*/
82958 +               if (vbo >= valid) {
82959 +                       set_buffer_new(bh);
82960 +                       if (bytes > bh->b_size)
82961 +                               bytes = bh->b_size;
82962 +                       ni->i_valid = vbo + bytes;
82963 +                       mark_inode_dirty(inode);
82964 +               }
82965 +       } else if (valid >= inode->i_size) {
82966 +               /* normal read of normal file*/
82967 +       } else if (vbo >= valid) {
82968 +               /* read out of valid data*/
82969 +               /* should never be here 'cause already checked */
82970 +               clear_buffer_mapped(bh);
82971 +       } else if (vbo + bytes <= valid) {
82972 +               /* normal read */
82973 +       } else if (vbo + block_size <= valid) {
82974 +               /* normal short read */
82975 +               bytes = block_size;
82976 +       } else {
82977 +               /*
82978 +                * read across valid size: vbo < valid && valid < vbo + block_size
82979 +                */
82980 +               u32 voff = valid - vbo;
82982 +               bh->b_size = bytes = block_size;
82983 +               off = vbo & (PAGE_SIZE - 1);
82984 +               set_bh_page(bh, page, off);
82985 +               ll_rw_block(REQ_OP_READ, 0, 1, &bh);
82986 +               wait_on_buffer(bh);
82987 +               /* Uhhuh. Read error. Complain and punt. */
82988 +               if (!buffer_uptodate(bh)) {
82989 +                       err = -EIO;
82990 +                       goto out;
82991 +               }
82992 +               zero_user_segment(page, off + voff, off + block_size);
82993 +       }
82995 +       if (bh->b_size > bytes)
82996 +               bh->b_size = bytes;
82998 +#ifndef __LP64__
82999 +       if (ctx == GET_BLOCK_DIRECT_IO_W || ctx == GET_BLOCK_DIRECT_IO_R) {
83000 +               static_assert(sizeof(size_t) < sizeof(loff_t));
83001 +               if (bytes > 0x40000000u)
83002 +                       bh->b_size = 0x40000000u;
83003 +       }
83004 +#endif
83006 +       return 0;
83008 +out:
83009 +       return err;
83012 +int ntfs_get_block(struct inode *inode, sector_t vbn,
83013 +                  struct buffer_head *bh_result, int create)
83015 +       return ntfs_get_block_vbo(inode, (u64)vbn << inode->i_blkbits,
83016 +                                 bh_result, create, GET_BLOCK_GENERAL);
83019 +static int ntfs_get_block_bmap(struct inode *inode, sector_t vsn,
83020 +                              struct buffer_head *bh_result, int create)
83022 +       return ntfs_get_block_vbo(inode,
83023 +                                 (u64)vsn << inode->i_sb->s_blocksize_bits,
83024 +                                 bh_result, create, GET_BLOCK_BMAP);
83027 +static sector_t ntfs_bmap(struct address_space *mapping, sector_t block)
83029 +       return generic_block_bmap(mapping, block, ntfs_get_block_bmap);
83032 +static int ntfs_readpage(struct file *file, struct page *page)
83034 +       int err;
83035 +       struct address_space *mapping = page->mapping;
83036 +       struct inode *inode = mapping->host;
83037 +       struct ntfs_inode *ni = ntfs_i(inode);
83039 +       if (is_resident(ni)) {
83040 +               ni_lock(ni);
83041 +               err = attr_data_read_resident(ni, page);
83042 +               ni_unlock(ni);
83043 +               if (err != E_NTFS_NONRESIDENT) {
83044 +                       unlock_page(page);
83045 +                       return err;
83046 +               }
83047 +       }
83049 +       if (is_compressed(ni)) {
83050 +               ni_lock(ni);
83051 +               err = ni_readpage_cmpr(ni, page);
83052 +               ni_unlock(ni);
83053 +               return err;
83054 +       }
83056 +       /* normal + sparse files */
83057 +       return mpage_readpage(page, ntfs_get_block);
83060 +static void ntfs_readahead(struct readahead_control *rac)
83062 +       struct address_space *mapping = rac->mapping;
83063 +       struct inode *inode = mapping->host;
83064 +       struct ntfs_inode *ni = ntfs_i(inode);
83065 +       u64 valid;
83066 +       loff_t pos;
83068 +       if (is_resident(ni)) {
83069 +               /* no readahead for resident */
83070 +               return;
83071 +       }
83073 +       if (is_compressed(ni)) {
83074 +               /* no readahead for compressed */
83075 +               return;
83076 +       }
83078 +       valid = ni->i_valid;
83079 +       pos = readahead_pos(rac);
83081 +       if (valid < i_size_read(inode) && pos <= valid &&
83082 +           valid < pos + readahead_length(rac)) {
83083 +               /* range cross 'valid'. read it page by page */
83084 +               return;
83085 +       }
83087 +       mpage_readahead(rac, ntfs_get_block);
83090 +static int ntfs_get_block_direct_IO_R(struct inode *inode, sector_t iblock,
83091 +                                     struct buffer_head *bh_result, int create)
83093 +       return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits,
83094 +                                 bh_result, create, GET_BLOCK_DIRECT_IO_R);
83097 +static int ntfs_get_block_direct_IO_W(struct inode *inode, sector_t iblock,
83098 +                                     struct buffer_head *bh_result, int create)
83100 +       return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits,
83101 +                                 bh_result, create, GET_BLOCK_DIRECT_IO_W);
83104 +static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
83106 +       struct file *file = iocb->ki_filp;
83107 +       struct address_space *mapping = file->f_mapping;
83108 +       struct inode *inode = mapping->host;
83109 +       struct ntfs_inode *ni = ntfs_i(inode);
83110 +       size_t count = iov_iter_count(iter);
83111 +       loff_t vbo = iocb->ki_pos;
83112 +       loff_t end = vbo + count;
83113 +       int wr = iov_iter_rw(iter) & WRITE;
83114 +       const struct iovec *iov = iter->iov;
83115 +       unsigned long nr_segs = iter->nr_segs;
83116 +       loff_t valid;
83117 +       ssize_t ret;
83119 +       if (is_resident(ni)) {
83120 +               /*switch to buffered write*/
83121 +               ret = 0;
83122 +               goto out;
83123 +       }
83125 +       ret = blockdev_direct_IO(iocb, inode, iter,
83126 +                                wr ? ntfs_get_block_direct_IO_W
83127 +                                   : ntfs_get_block_direct_IO_R);
83128 +       valid = ni->i_valid;
83129 +       if (wr) {
83130 +               if (ret <= 0)
83131 +                       goto out;
83133 +               vbo += ret;
83134 +               if (vbo > valid && !S_ISBLK(inode->i_mode)) {
83135 +                       ni->i_valid = vbo;
83136 +                       mark_inode_dirty(inode);
83137 +               }
83138 +       } else if (vbo < valid && valid < end) {
83139 +               /* fix page */
83140 +               unsigned long uaddr = ~0ul;
83141 +               struct page *page;
83142 +               long i, npages;
83143 +               size_t dvbo = valid - vbo;
83144 +               size_t off = 0;
83146 +               /*Find user address*/
83147 +               for (i = 0; i < nr_segs; i++) {
83148 +                       if (off <= dvbo && dvbo < off + iov[i].iov_len) {
83149 +                               uaddr = (unsigned long)iov[i].iov_base + dvbo -
83150 +                                       off;
83151 +                               break;
83152 +                       }
83153 +                       off += iov[i].iov_len;
83154 +               }
83156 +               if (uaddr == ~0ul)
83157 +                       goto fix_error;
83159 +               npages = get_user_pages_unlocked(uaddr, 1, &page, FOLL_WRITE);
83161 +               if (npages <= 0)
83162 +                       goto fix_error;
83164 +               zero_user_segment(page, valid & (PAGE_SIZE - 1), PAGE_SIZE);
83165 +               put_page(page);
83166 +       }
83168 +out:
83169 +       return ret;
83170 +fix_error:
83171 +       ntfs_inode_warn(inode, "file garbage at 0x%llx", valid);
83172 +       goto out;
83175 +int ntfs_set_size(struct inode *inode, u64 new_size)
83177 +       struct super_block *sb = inode->i_sb;
83178 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
83179 +       struct ntfs_inode *ni = ntfs_i(inode);
83180 +       int err;
83182 +       /* Check for maximum file size */
83183 +       if (is_sparsed(ni) || is_compressed(ni)) {
83184 +               if (new_size > sbi->maxbytes_sparse) {
83185 +                       err = -EFBIG;
83186 +                       goto out;
83187 +               }
83188 +       } else if (new_size > sbi->maxbytes) {
83189 +               err = -EFBIG;
83190 +               goto out;
83191 +       }
83193 +       ni_lock(ni);
83194 +       down_write(&ni->file.run_lock);
83196 +       err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
83197 +                           &ni->i_valid, true, NULL);
83199 +       up_write(&ni->file.run_lock);
83200 +       ni_unlock(ni);
83202 +       mark_inode_dirty(inode);
83204 +out:
83205 +       return err;
83208 +static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
83210 +       struct address_space *mapping = page->mapping;
83211 +       struct inode *inode = mapping->host;
83212 +       struct ntfs_inode *ni = ntfs_i(inode);
83213 +       int err;
83215 +       if (is_resident(ni)) {
83216 +               ni_lock(ni);
83217 +               err = attr_data_write_resident(ni, page);
83218 +               ni_unlock(ni);
83219 +               if (err != E_NTFS_NONRESIDENT) {
83220 +                       unlock_page(page);
83221 +                       return err;
83222 +               }
83223 +       }
83225 +       return block_write_full_page(page, ntfs_get_block, wbc);
83228 +static int ntfs_writepages(struct address_space *mapping,
83229 +                          struct writeback_control *wbc)
83231 +       struct inode *inode = mapping->host;
83232 +       struct ntfs_inode *ni = ntfs_i(inode);
83233 +       /* redirect call to 'ntfs_writepage' for resident files*/
83234 +       get_block_t *get_block = is_resident(ni) ? NULL : &ntfs_get_block;
83236 +       return mpage_writepages(mapping, wbc, get_block);
83239 +static int ntfs_get_block_write_begin(struct inode *inode, sector_t vbn,
83240 +                                     struct buffer_head *bh_result, int create)
83242 +       return ntfs_get_block_vbo(inode, (u64)vbn << inode->i_blkbits,
83243 +                                 bh_result, create, GET_BLOCK_WRITE_BEGIN);
83246 +static int ntfs_write_begin(struct file *file, struct address_space *mapping,
83247 +                           loff_t pos, u32 len, u32 flags, struct page **pagep,
83248 +                           void **fsdata)
83250 +       int err;
83251 +       struct inode *inode = mapping->host;
83252 +       struct ntfs_inode *ni = ntfs_i(inode);
83254 +       *pagep = NULL;
83255 +       if (is_resident(ni)) {
83256 +               struct page *page = grab_cache_page_write_begin(
83257 +                       mapping, pos >> PAGE_SHIFT, flags);
83259 +               if (!page) {
83260 +                       err = -ENOMEM;
83261 +                       goto out;
83262 +               }
83264 +               ni_lock(ni);
83265 +               err = attr_data_read_resident(ni, page);
83266 +               ni_unlock(ni);
83268 +               if (!err) {
83269 +                       *pagep = page;
83270 +                       goto out;
83271 +               }
83272 +               unlock_page(page);
83273 +               put_page(page);
83275 +               if (err != E_NTFS_NONRESIDENT)
83276 +                       goto out;
83277 +       }
83279 +       err = block_write_begin(mapping, pos, len, flags, pagep,
83280 +                               ntfs_get_block_write_begin);
83282 +out:
83283 +       return err;
83286 +/* address_space_operations::write_end */
83287 +static int ntfs_write_end(struct file *file, struct address_space *mapping,
83288 +                         loff_t pos, u32 len, u32 copied, struct page *page,
83289 +                         void *fsdata)
83292 +       struct inode *inode = mapping->host;
83293 +       struct ntfs_inode *ni = ntfs_i(inode);
83294 +       u64 valid = ni->i_valid;
83295 +       bool dirty = false;
83296 +       int err;
83298 +       if (is_resident(ni)) {
83299 +               ni_lock(ni);
83300 +               err = attr_data_write_resident(ni, page);
83301 +               ni_unlock(ni);
83302 +               if (!err) {
83303 +                       dirty = true;
83304 +                       /* clear any buffers in page*/
83305 +                       if (page_has_buffers(page)) {
83306 +                               struct buffer_head *head, *bh;
83308 +                               bh = head = page_buffers(page);
83309 +                               do {
83310 +                                       clear_buffer_dirty(bh);
83311 +                                       clear_buffer_mapped(bh);
83312 +                                       set_buffer_uptodate(bh);
83313 +                               } while (head != (bh = bh->b_this_page));
83314 +                       }
83315 +                       SetPageUptodate(page);
83316 +                       err = copied;
83317 +               }
83318 +               unlock_page(page);
83319 +               put_page(page);
83320 +       } else {
83321 +               err = generic_write_end(file, mapping, pos, len, copied, page,
83322 +                                       fsdata);
83323 +       }
83325 +       if (err >= 0) {
83326 +               if (!(ni->std_fa & FILE_ATTRIBUTE_ARCHIVE)) {
83327 +                       inode->i_ctime = inode->i_mtime = current_time(inode);
83328 +                       ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
83329 +                       dirty = true;
83330 +               }
83332 +               if (valid != ni->i_valid) {
83333 +                       /* ni->i_valid is changed in ntfs_get_block_vbo */
83334 +                       dirty = true;
83335 +               }
83337 +               if (dirty)
83338 +                       mark_inode_dirty(inode);
83339 +       }
83341 +       return err;
83344 +int reset_log_file(struct inode *inode)
83346 +       int err;
83347 +       loff_t pos = 0;
83348 +       u32 log_size = inode->i_size;
83349 +       struct address_space *mapping = inode->i_mapping;
83351 +       for (;;) {
83352 +               u32 len;
83353 +               void *kaddr;
83354 +               struct page *page;
83356 +               len = pos + PAGE_SIZE > log_size ? (log_size - pos) : PAGE_SIZE;
83358 +               err = block_write_begin(mapping, pos, len, 0, &page,
83359 +                                       ntfs_get_block_write_begin);
83360 +               if (err)
83361 +                       goto out;
83363 +               kaddr = kmap_atomic(page);
83364 +               memset(kaddr, -1, len);
83365 +               kunmap_atomic(kaddr);
83366 +               flush_dcache_page(page);
83368 +               err = block_write_end(NULL, mapping, pos, len, len, page, NULL);
83369 +               if (err < 0)
83370 +                       goto out;
83371 +               pos += len;
83373 +               if (pos >= log_size)
83374 +                       break;
83375 +               balance_dirty_pages_ratelimited(mapping);
83376 +       }
83377 +out:
83378 +       mark_inode_dirty_sync(inode);
83380 +       return err;
83383 +int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc)
83385 +       return _ni_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
83388 +int ntfs_sync_inode(struct inode *inode)
83390 +       return _ni_write_inode(inode, 1);
83394 + * helper function for ntfs_flush_inodes.  This writes both the inode
83395 + * and the file data blocks, waiting for in flight data blocks before
83396 + * the start of the call.  It does not wait for any io started
83397 + * during the call
83398 + */
83399 +static int writeback_inode(struct inode *inode)
83401 +       int ret = sync_inode_metadata(inode, 0);
83403 +       if (!ret)
83404 +               ret = filemap_fdatawrite(inode->i_mapping);
83405 +       return ret;
83409 + * write data and metadata corresponding to i1 and i2.  The io is
83410 + * started but we do not wait for any of it to finish.
83411 + *
83412 + * filemap_flush is used for the block device, so if there is a dirty
83413 + * page for a block already in flight, we will not wait and start the
83414 + * io over again
83415 + */
83416 +int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
83417 +                     struct inode *i2)
83419 +       int ret = 0;
83421 +       if (i1)
83422 +               ret = writeback_inode(i1);
83423 +       if (!ret && i2)
83424 +               ret = writeback_inode(i2);
83425 +       if (!ret)
83426 +               ret = filemap_flush(sb->s_bdev->bd_inode->i_mapping);
83427 +       return ret;
83430 +int inode_write_data(struct inode *inode, const void *data, size_t bytes)
83432 +       pgoff_t idx;
83434 +       /* Write non resident data */
83435 +       for (idx = 0; bytes; idx++) {
83436 +               size_t op = bytes > PAGE_SIZE ? PAGE_SIZE : bytes;
83437 +               struct page *page = ntfs_map_page(inode->i_mapping, idx);
83439 +               if (IS_ERR(page))
83440 +                       return PTR_ERR(page);
83442 +               lock_page(page);
83443 +               WARN_ON(!PageUptodate(page));
83444 +               ClearPageUptodate(page);
83446 +               memcpy(page_address(page), data, op);
83448 +               flush_dcache_page(page);
83449 +               SetPageUptodate(page);
83450 +               unlock_page(page);
83452 +               ntfs_unmap_page(page);
83454 +               bytes -= op;
83455 +               data = Add2Ptr(data, PAGE_SIZE);
83456 +       }
83457 +       return 0;
83461 + * number of bytes to for REPARSE_DATA_BUFFER(IO_REPARSE_TAG_SYMLINK)
83462 + * for unicode string of 'uni_len' length
83463 + */
83464 +static inline u32 ntfs_reparse_bytes(u32 uni_len)
83466 +       /* header + unicode string + decorated unicode string */
83467 +       return sizeof(short) * (2 * uni_len + 4) +
83468 +              offsetof(struct REPARSE_DATA_BUFFER,
83469 +                       SymbolicLinkReparseBuffer.PathBuffer);
83472 +static struct REPARSE_DATA_BUFFER *
83473 +ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname,
83474 +                          u32 size, u16 *nsize)
83476 +       int i, err;
83477 +       struct REPARSE_DATA_BUFFER *rp;
83478 +       __le16 *rp_name;
83479 +       typeof(rp->SymbolicLinkReparseBuffer) *rs;
83481 +       rp = ntfs_zalloc(ntfs_reparse_bytes(2 * size + 2));
83482 +       if (!rp)
83483 +               return ERR_PTR(-ENOMEM);
83485 +       rs = &rp->SymbolicLinkReparseBuffer;
83486 +       rp_name = rs->PathBuffer;
83488 +       /* Convert link name to utf16 */
83489 +       err = ntfs_nls_to_utf16(sbi, symname, size,
83490 +                               (struct cpu_str *)(rp_name - 1), 2 * size,
83491 +                               UTF16_LITTLE_ENDIAN);
83492 +       if (err < 0)
83493 +               goto out;
83495 +       /* err = the length of unicode name of symlink */
83496 +       *nsize = ntfs_reparse_bytes(err);
83498 +       if (*nsize > sbi->reparse.max_size) {
83499 +               err = -EFBIG;
83500 +               goto out;
83501 +       }
83503 +       /* translate linux '/' into windows '\' */
83504 +       for (i = 0; i < err; i++) {
83505 +               if (rp_name[i] == cpu_to_le16('/'))
83506 +                       rp_name[i] = cpu_to_le16('\\');
83507 +       }
83509 +       rp->ReparseTag = IO_REPARSE_TAG_SYMLINK;
83510 +       rp->ReparseDataLength =
83511 +               cpu_to_le16(*nsize - offsetof(struct REPARSE_DATA_BUFFER,
83512 +                                             SymbolicLinkReparseBuffer));
83514 +       /* PrintName + SubstituteName */
83515 +       rs->SubstituteNameOffset = cpu_to_le16(sizeof(short) * err);
83516 +       rs->SubstituteNameLength = cpu_to_le16(sizeof(short) * err + 8);
83517 +       rs->PrintNameLength = rs->SubstituteNameOffset;
83519 +       /*
83520 +        * TODO: use relative path if possible to allow windows to parse this path
83521 +        * 0-absolute path 1- relative path (SYMLINK_FLAG_RELATIVE)
83522 +        */
83523 +       rs->Flags = 0;
83525 +       memmove(rp_name + err + 4, rp_name, sizeof(short) * err);
83527 +       /* decorate SubstituteName */
83528 +       rp_name += err;
83529 +       rp_name[0] = cpu_to_le16('\\');
83530 +       rp_name[1] = cpu_to_le16('?');
83531 +       rp_name[2] = cpu_to_le16('?');
83532 +       rp_name[3] = cpu_to_le16('\\');
83534 +       return rp;
83535 +out:
83536 +       ntfs_free(rp);
83537 +       return ERR_PTR(err);
83540 +struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
83541 +                               struct inode *dir, struct dentry *dentry,
83542 +                               const struct cpu_str *uni, umode_t mode,
83543 +                               dev_t dev, const char *symname, u32 size,
83544 +                               int excl, struct ntfs_fnd *fnd)
83546 +       int err;
83547 +       struct super_block *sb = dir->i_sb;
83548 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
83549 +       const struct qstr *name = &dentry->d_name;
83550 +       CLST ino = 0;
83551 +       struct ntfs_inode *dir_ni = ntfs_i(dir);
83552 +       struct ntfs_inode *ni = NULL;
83553 +       struct inode *inode = NULL;
83554 +       struct ATTRIB *attr;
83555 +       struct ATTR_STD_INFO5 *std5;
83556 +       struct ATTR_FILE_NAME *fname;
83557 +       struct MFT_REC *rec;
83558 +       u32 asize, dsize, sd_size;
83559 +       enum FILE_ATTRIBUTE fa;
83560 +       __le32 security_id = SECURITY_ID_INVALID;
83561 +       CLST vcn;
83562 +       const void *sd;
83563 +       u16 t16, nsize = 0, aid = 0;
83564 +       struct INDEX_ROOT *root, *dir_root;
83565 +       struct NTFS_DE *e, *new_de = NULL;
83566 +       struct REPARSE_DATA_BUFFER *rp = NULL;
83567 +       bool is_dir = S_ISDIR(mode);
83568 +       bool is_link = S_ISLNK(mode);
83569 +       bool rp_inserted = false;
83570 +       bool is_sp = S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode) ||
83571 +                    S_ISSOCK(mode);
83573 +       if (is_sp)
83574 +               return ERR_PTR(-EOPNOTSUPP);
83576 +       dir_root = indx_get_root(&dir_ni->dir, dir_ni, NULL, NULL);
83577 +       if (!dir_root)
83578 +               return ERR_PTR(-EINVAL);
83580 +       if (is_dir) {
83581 +               /* use parent's directory attributes */
83582 +               fa = dir_ni->std_fa | FILE_ATTRIBUTE_DIRECTORY |
83583 +                    FILE_ATTRIBUTE_ARCHIVE;
83584 +               /*
83585 +                * By default child directory inherits parent attributes
83586 +                * root directory is hidden + system
83587 +                * Make an exception for children in root
83588 +                */
83589 +               if (dir->i_ino == MFT_REC_ROOT)
83590 +                       fa &= ~(FILE_ATTRIBUTE_HIDDEN | FILE_ATTRIBUTE_SYSTEM);
83591 +       } else if (is_link) {
83592 +               /* It is good idea that link should be the same type (file/dir) as target */
83593 +               fa = FILE_ATTRIBUTE_REPARSE_POINT;
83595 +               /*
83596 +                * linux: there are dir/file/symlink and so on
83597 +                * NTFS: symlinks are "dir + reparse" or "file + reparse"
83598 +                * It is good idea to create:
83599 +                * dir + reparse if 'symname' points to directory
83600 +                * or
83601 +                * file + reparse if 'symname' points to file
83602 +                * Unfortunately kern_path hangs if symname contains 'dir'
83603 +                */
83605 +               /*
83606 +                *      struct path path;
83607 +                *
83608 +                *      if (!kern_path(symname, LOOKUP_FOLLOW, &path)){
83609 +                *              struct inode *target = d_inode(path.dentry);
83610 +                *
83611 +                *              if (S_ISDIR(target->i_mode))
83612 +                *                      fa |= FILE_ATTRIBUTE_DIRECTORY;
83613 +                *              // if ( target->i_sb == sb ){
83614 +                *              //      use relative path?
83615 +                *              // }
83616 +                *              path_put(&path);
83617 +                *      }
83618 +                */
83619 +       } else if (sbi->options.sparse) {
83620 +               /* sparsed regular file, cause option 'sparse' */
83621 +               fa = FILE_ATTRIBUTE_SPARSE_FILE | FILE_ATTRIBUTE_ARCHIVE;
83622 +       } else if (dir_ni->std_fa & FILE_ATTRIBUTE_COMPRESSED) {
83623 +               /* compressed regular file, if parent is compressed */
83624 +               fa = FILE_ATTRIBUTE_COMPRESSED | FILE_ATTRIBUTE_ARCHIVE;
83625 +       } else {
83626 +               /* regular file, default attributes */
83627 +               fa = FILE_ATTRIBUTE_ARCHIVE;
83628 +       }
83630 +       if (!(mode & 0222))
83631 +               fa |= FILE_ATTRIBUTE_READONLY;
83633 +       /* allocate PATH_MAX bytes */
83634 +       new_de = __getname();
83635 +       if (!new_de) {
83636 +               err = -ENOMEM;
83637 +               goto out1;
83638 +       }
83640 +       /*mark rw ntfs as dirty. it will be cleared at umount*/
83641 +       ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
83643 +       /* Step 1: allocate and fill new mft record */
83644 +       err = ntfs_look_free_mft(sbi, &ino, false, NULL, NULL);
83645 +       if (err)
83646 +               goto out2;
83648 +       ni = ntfs_new_inode(sbi, ino, fa & FILE_ATTRIBUTE_DIRECTORY);
83649 +       if (IS_ERR(ni)) {
83650 +               err = PTR_ERR(ni);
83651 +               ni = NULL;
83652 +               goto out3;
83653 +       }
83654 +       inode = &ni->vfs_inode;
83656 +       inode->i_atime = inode->i_mtime = inode->i_ctime = ni->i_crtime =
83657 +               current_time(inode);
83659 +       rec = ni->mi.mrec;
83660 +       rec->hard_links = cpu_to_le16(1);
83661 +       attr = Add2Ptr(rec, le16_to_cpu(rec->attr_off));
83663 +       /* Get default security id */
83664 +       sd = s_default_security;
83665 +       sd_size = sizeof(s_default_security);
83667 +       if (is_ntfs3(sbi)) {
83668 +               security_id = dir_ni->std_security_id;
83669 +               if (le32_to_cpu(security_id) < SECURITY_ID_FIRST) {
83670 +                       security_id = sbi->security.def_security_id;
83672 +                       if (security_id == SECURITY_ID_INVALID &&
83673 +                           !ntfs_insert_security(sbi, sd, sd_size,
83674 +                                                 &security_id, NULL))
83675 +                               sbi->security.def_security_id = security_id;
83676 +               }
83677 +       }
83679 +       /* Insert standard info */
83680 +       std5 = Add2Ptr(attr, SIZEOF_RESIDENT);
83682 +       if (security_id == SECURITY_ID_INVALID) {
83683 +               dsize = sizeof(struct ATTR_STD_INFO);
83684 +       } else {
83685 +               dsize = sizeof(struct ATTR_STD_INFO5);
83686 +               std5->security_id = security_id;
83687 +               ni->std_security_id = security_id;
83688 +       }
83689 +       asize = SIZEOF_RESIDENT + dsize;
83691 +       attr->type = ATTR_STD;
83692 +       attr->size = cpu_to_le32(asize);
83693 +       attr->id = cpu_to_le16(aid++);
83694 +       attr->res.data_off = SIZEOF_RESIDENT_LE;
83695 +       attr->res.data_size = cpu_to_le32(dsize);
83697 +       std5->cr_time = std5->m_time = std5->c_time = std5->a_time =
83698 +               kernel2nt(&inode->i_atime);
83700 +       ni->std_fa = fa;
83701 +       std5->fa = fa;
83703 +       attr = Add2Ptr(attr, asize);
83705 +       /* Insert file name */
83706 +       err = fill_name_de(sbi, new_de, name, uni);
83707 +       if (err)
83708 +               goto out4;
83710 +       mi_get_ref(&ni->mi, &new_de->ref);
83712 +       fname = (struct ATTR_FILE_NAME *)(new_de + 1);
83713 +       mi_get_ref(&dir_ni->mi, &fname->home);
83714 +       fname->dup.cr_time = fname->dup.m_time = fname->dup.c_time =
83715 +               fname->dup.a_time = std5->cr_time;
83716 +       fname->dup.alloc_size = fname->dup.data_size = 0;
83717 +       fname->dup.fa = std5->fa;
83718 +       fname->dup.ea_size = fname->dup.reparse = 0;
83720 +       dsize = le16_to_cpu(new_de->key_size);
83721 +       asize = QuadAlign(SIZEOF_RESIDENT + dsize);
83723 +       attr->type = ATTR_NAME;
83724 +       attr->size = cpu_to_le32(asize);
83725 +       attr->res.data_off = SIZEOF_RESIDENT_LE;
83726 +       attr->res.flags = RESIDENT_FLAG_INDEXED;
83727 +       attr->id = cpu_to_le16(aid++);
83728 +       attr->res.data_size = cpu_to_le32(dsize);
83729 +       memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), fname, dsize);
83731 +       attr = Add2Ptr(attr, asize);
83733 +       if (security_id == SECURITY_ID_INVALID) {
83734 +               /* Insert security attribute */
83735 +               asize = SIZEOF_RESIDENT + QuadAlign(sd_size);
83737 +               attr->type = ATTR_SECURE;
83738 +               attr->size = cpu_to_le32(asize);
83739 +               attr->id = cpu_to_le16(aid++);
83740 +               attr->res.data_off = SIZEOF_RESIDENT_LE;
83741 +               attr->res.data_size = cpu_to_le32(sd_size);
83742 +               memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), sd, sd_size);
83744 +               attr = Add2Ptr(attr, asize);
83745 +       }
83747 +       if (fa & FILE_ATTRIBUTE_DIRECTORY) {
83748 +               /*
83749 +                * regular directory or symlink to directory
83750 +                * Create root attribute
83751 +                */
83752 +               dsize = sizeof(struct INDEX_ROOT) + sizeof(struct NTFS_DE);
83753 +               asize = sizeof(I30_NAME) + SIZEOF_RESIDENT + dsize;
83755 +               attr->type = ATTR_ROOT;
83756 +               attr->size = cpu_to_le32(asize);
83757 +               attr->id = cpu_to_le16(aid++);
83759 +               attr->name_len = ARRAY_SIZE(I30_NAME);
83760 +               attr->name_off = SIZEOF_RESIDENT_LE;
83761 +               attr->res.data_off =
83762 +                       cpu_to_le16(sizeof(I30_NAME) + SIZEOF_RESIDENT);
83763 +               attr->res.data_size = cpu_to_le32(dsize);
83764 +               memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), I30_NAME,
83765 +                      sizeof(I30_NAME));
83767 +               root = Add2Ptr(attr, sizeof(I30_NAME) + SIZEOF_RESIDENT);
83768 +               memcpy(root, dir_root, offsetof(struct INDEX_ROOT, ihdr));
83769 +               root->ihdr.de_off =
83770 +                       cpu_to_le32(sizeof(struct INDEX_HDR)); // 0x10
83771 +               root->ihdr.used = cpu_to_le32(sizeof(struct INDEX_HDR) +
83772 +                                             sizeof(struct NTFS_DE));
83773 +               root->ihdr.total = root->ihdr.used;
83775 +               e = Add2Ptr(root, sizeof(struct INDEX_ROOT));
83776 +               e->size = cpu_to_le16(sizeof(struct NTFS_DE));
83777 +               e->flags = NTFS_IE_LAST;
83778 +       } else if (is_link) {
83779 +               /*
83780 +                * symlink to file
83781 +                * Create empty resident data attribute
83782 +                */
83783 +               asize = SIZEOF_RESIDENT;
83785 +               /* insert empty ATTR_DATA */
83786 +               attr->type = ATTR_DATA;
83787 +               attr->size = cpu_to_le32(SIZEOF_RESIDENT);
83788 +               attr->id = cpu_to_le16(aid++);
83789 +               attr->name_off = SIZEOF_RESIDENT_LE;
83790 +               attr->res.data_off = SIZEOF_RESIDENT_LE;
83791 +       } else {
83792 +               /*
83793 +                * regular file
83794 +                */
83795 +               attr->type = ATTR_DATA;
83796 +               attr->id = cpu_to_le16(aid++);
83797 +               /* Create empty non resident data attribute */
83798 +               attr->non_res = 1;
83799 +               attr->nres.evcn = cpu_to_le64(-1ll);
83800 +               if (fa & FILE_ATTRIBUTE_SPARSE_FILE) {
83801 +                       attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8);
83802 +                       attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
83803 +                       attr->flags = ATTR_FLAG_SPARSED;
83804 +                       asize = SIZEOF_NONRESIDENT_EX + 8;
83805 +               } else if (fa & FILE_ATTRIBUTE_COMPRESSED) {
83806 +                       attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8);
83807 +                       attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
83808 +                       attr->flags = ATTR_FLAG_COMPRESSED;
83809 +                       attr->nres.c_unit = COMPRESSION_UNIT;
83810 +                       asize = SIZEOF_NONRESIDENT_EX + 8;
83811 +               } else {
83812 +                       attr->size = cpu_to_le32(SIZEOF_NONRESIDENT + 8);
83813 +                       attr->name_off = SIZEOF_NONRESIDENT_LE;
83814 +                       asize = SIZEOF_NONRESIDENT + 8;
83815 +               }
83816 +               attr->nres.run_off = attr->name_off;
83817 +       }
83819 +       if (is_dir) {
83820 +               ni->ni_flags |= NI_FLAG_DIR;
83821 +               err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30);
83822 +               if (err)
83823 +                       goto out4;
83824 +       } else if (is_link) {
83825 +               rp = ntfs_create_reparse_buffer(sbi, symname, size, &nsize);
83827 +               if (IS_ERR(rp)) {
83828 +                       err = PTR_ERR(rp);
83829 +                       rp = NULL;
83830 +                       goto out4;
83831 +               }
83833 +               /*
83834 +                * Insert ATTR_REPARSE
83835 +                */
83836 +               attr = Add2Ptr(attr, asize);
83837 +               attr->type = ATTR_REPARSE;
83838 +               attr->id = cpu_to_le16(aid++);
83840 +               /* resident or non resident? */
83841 +               asize = QuadAlign(SIZEOF_RESIDENT + nsize);
83842 +               t16 = PtrOffset(rec, attr);
83844 +               if (asize + t16 + 8 > sbi->record_size) {
83845 +                       CLST alen;
83846 +                       CLST clst = bytes_to_cluster(sbi, nsize);
83848 +                       /* bytes per runs */
83849 +                       t16 = sbi->record_size - t16 - SIZEOF_NONRESIDENT;
83851 +                       attr->non_res = 1;
83852 +                       attr->nres.evcn = cpu_to_le64(clst - 1);
83853 +                       attr->name_off = SIZEOF_NONRESIDENT_LE;
83854 +                       attr->nres.run_off = attr->name_off;
83855 +                       attr->nres.data_size = cpu_to_le64(nsize);
83856 +                       attr->nres.valid_size = attr->nres.data_size;
83857 +                       attr->nres.alloc_size =
83858 +                               cpu_to_le64(ntfs_up_cluster(sbi, nsize));
83860 +                       err = attr_allocate_clusters(sbi, &ni->file.run, 0, 0,
83861 +                                                    clst, NULL, 0, &alen, 0,
83862 +                                                    NULL);
83863 +                       if (err)
83864 +                               goto out5;
83866 +                       err = run_pack(&ni->file.run, 0, clst,
83867 +                                      Add2Ptr(attr, SIZEOF_NONRESIDENT), t16,
83868 +                                      &vcn);
83869 +                       if (err < 0)
83870 +                               goto out5;
83872 +                       if (vcn != clst) {
83873 +                               err = -EINVAL;
83874 +                               goto out5;
83875 +                       }
83877 +                       asize = SIZEOF_NONRESIDENT + QuadAlign(err);
83878 +                       inode->i_size = nsize;
83879 +               } else {
83880 +                       attr->res.data_off = SIZEOF_RESIDENT_LE;
83881 +                       attr->res.data_size = cpu_to_le32(nsize);
83882 +                       memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), rp, nsize);
83883 +                       inode->i_size = nsize;
83884 +                       nsize = 0;
83885 +               }
83887 +               attr->size = cpu_to_le32(asize);
83889 +               err = ntfs_insert_reparse(sbi, IO_REPARSE_TAG_SYMLINK,
83890 +                                         &new_de->ref);
83891 +               if (err)
83892 +                       goto out5;
83894 +               rp_inserted = true;
83895 +       }
83897 +       attr = Add2Ptr(attr, asize);
83898 +       attr->type = ATTR_END;
83900 +       rec->used = cpu_to_le32(PtrOffset(rec, attr) + 8);
83901 +       rec->next_attr_id = cpu_to_le16(aid);
83903 +       /* Step 2: Add new name in index */
83904 +       err = indx_insert_entry(&dir_ni->dir, dir_ni, new_de, sbi, fnd);
83905 +       if (err)
83906 +               goto out6;
83908 +       /* Update current directory record */
83909 +       mark_inode_dirty(dir);
83911 +       /* Fill vfs inode fields */
83912 +       inode->i_uid = sbi->options.uid ? sbi->options.fs_uid : current_fsuid();
83913 +       inode->i_gid = sbi->options.gid          ? sbi->options.fs_gid
83914 +                      : (dir->i_mode & S_ISGID) ? dir->i_gid
83915 +                                                : current_fsgid();
83916 +       inode->i_generation = le16_to_cpu(rec->seq);
83918 +       dir->i_mtime = dir->i_ctime = inode->i_atime;
83920 +       if (is_dir) {
83921 +               if (dir->i_mode & S_ISGID)
83922 +                       mode |= S_ISGID;
83923 +               inode->i_op = &ntfs_dir_inode_operations;
83924 +               inode->i_fop = &ntfs_dir_operations;
83925 +       } else if (is_link) {
83926 +               inode->i_op = &ntfs_link_inode_operations;
83927 +               inode->i_fop = NULL;
83928 +               inode->i_mapping->a_ops = &ntfs_aops;
83929 +       } else {
83930 +               inode->i_op = &ntfs_file_inode_operations;
83931 +               inode->i_fop = &ntfs_file_operations;
83932 +               inode->i_mapping->a_ops =
83933 +                       is_compressed(ni) ? &ntfs_aops_cmpr : &ntfs_aops;
83934 +               init_rwsem(&ni->file.run_lock);
83935 +       }
83937 +       inode->i_mode = mode;
83939 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
83940 +       if (!is_link && (sb->s_flags & SB_POSIXACL)) {
83941 +               err = ntfs_init_acl(mnt_userns, inode, dir);
83942 +               if (err)
83943 +                       goto out6;
83944 +       } else
83945 +#endif
83946 +       {
83947 +               inode->i_flags |= S_NOSEC;
83948 +       }
83950 +       /* Write non resident data */
83951 +       if (nsize) {
83952 +               err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rp, nsize);
83953 +               if (err)
83954 +                       goto out7;
83955 +       }
83957 +       /* call 'd_instantiate' after inode->i_op is set but before finish_open */
83958 +       d_instantiate(dentry, inode);
83960 +       mark_inode_dirty(inode);
83961 +       mark_inode_dirty(dir);
83963 +       /* normal exit */
83964 +       goto out2;
83966 +out7:
83968 +       /* undo 'indx_insert_entry' */
83969 +       indx_delete_entry(&dir_ni->dir, dir_ni, new_de + 1,
83970 +                         le16_to_cpu(new_de->key_size), sbi);
83971 +out6:
83972 +       if (rp_inserted)
83973 +               ntfs_remove_reparse(sbi, IO_REPARSE_TAG_SYMLINK, &new_de->ref);
83975 +out5:
83976 +       if (is_dir || run_is_empty(&ni->file.run))
83977 +               goto out4;
83979 +       run_deallocate(sbi, &ni->file.run, false);
83981 +out4:
83982 +       clear_rec_inuse(rec);
83983 +       clear_nlink(inode);
83984 +       ni->mi.dirty = false;
83985 +       discard_new_inode(inode);
83986 +out3:
83987 +       ntfs_mark_rec_free(sbi, ino);
83989 +out2:
83990 +       __putname(new_de);
83991 +       ntfs_free(rp);
83993 +out1:
83994 +       if (err)
83995 +               return ERR_PTR(err);
83997 +       unlock_new_inode(inode);
83999 +       return inode;
84002 +int ntfs_link_inode(struct inode *inode, struct dentry *dentry)
84004 +       int err;
84005 +       struct inode *dir = d_inode(dentry->d_parent);
84006 +       struct ntfs_inode *dir_ni = ntfs_i(dir);
84007 +       struct ntfs_inode *ni = ntfs_i(inode);
84008 +       struct super_block *sb = inode->i_sb;
84009 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
84010 +       const struct qstr *name = &dentry->d_name;
84011 +       struct NTFS_DE *new_de = NULL;
84012 +       struct ATTR_FILE_NAME *fname;
84013 +       struct ATTRIB *attr;
84014 +       u16 key_size;
84015 +       struct INDEX_ROOT *dir_root;
84017 +       dir_root = indx_get_root(&dir_ni->dir, dir_ni, NULL, NULL);
84018 +       if (!dir_root)
84019 +               return -EINVAL;
84021 +       /* allocate PATH_MAX bytes */
84022 +       new_de = __getname();
84023 +       if (!new_de)
84024 +               return -ENOMEM;
84026 +       /*mark rw ntfs as dirty. it will be cleared at umount*/
84027 +       ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_DIRTY);
84029 +       // Insert file name
84030 +       err = fill_name_de(sbi, new_de, name, NULL);
84031 +       if (err)
84032 +               goto out;
84034 +       key_size = le16_to_cpu(new_de->key_size);
84035 +       err = ni_insert_resident(ni, key_size, ATTR_NAME, NULL, 0, &attr, NULL);
84036 +       if (err)
84037 +               goto out;
84039 +       mi_get_ref(&ni->mi, &new_de->ref);
84041 +       fname = (struct ATTR_FILE_NAME *)(new_de + 1);
84042 +       mi_get_ref(&dir_ni->mi, &fname->home);
84043 +       fname->dup.cr_time = fname->dup.m_time = fname->dup.c_time =
84044 +               fname->dup.a_time = kernel2nt(&inode->i_ctime);
84045 +       fname->dup.alloc_size = fname->dup.data_size = 0;
84046 +       fname->dup.fa = ni->std_fa;
84047 +       fname->dup.ea_size = fname->dup.reparse = 0;
84049 +       memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), fname, key_size);
84051 +       err = indx_insert_entry(&dir_ni->dir, dir_ni, new_de, sbi, NULL);
84052 +       if (err)
84053 +               goto out;
84055 +       le16_add_cpu(&ni->mi.mrec->hard_links, 1);
84056 +       ni->mi.dirty = true;
84058 +out:
84059 +       __putname(new_de);
84060 +       return err;
84064 + * ntfs_unlink_inode
84065 + *
84066 + * inode_operations::unlink
84067 + * inode_operations::rmdir
84068 + */
84069 +int ntfs_unlink_inode(struct inode *dir, const struct dentry *dentry)
84071 +       int err;
84072 +       struct super_block *sb = dir->i_sb;
84073 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
84074 +       struct inode *inode = d_inode(dentry);
84075 +       struct ntfs_inode *ni = ntfs_i(inode);
84076 +       const struct qstr *name = &dentry->d_name;
84077 +       struct ntfs_inode *dir_ni = ntfs_i(dir);
84078 +       struct ntfs_index *indx = &dir_ni->dir;
84079 +       struct cpu_str *uni = NULL;
84080 +       struct ATTR_FILE_NAME *fname;
84081 +       u8 name_type;
84082 +       struct ATTR_LIST_ENTRY *le;
84083 +       struct MFT_REF ref;
84084 +       bool is_dir = S_ISDIR(inode->i_mode);
84085 +       struct INDEX_ROOT *dir_root;
84087 +       dir_root = indx_get_root(indx, dir_ni, NULL, NULL);
84088 +       if (!dir_root)
84089 +               return -EINVAL;
84091 +       ni_lock(ni);
84093 +       if (is_dir && !dir_is_empty(inode)) {
84094 +               err = -ENOTEMPTY;
84095 +               goto out1;
84096 +       }
84098 +       if (ntfs_is_meta_file(sbi, inode->i_ino)) {
84099 +               err = -EINVAL;
84100 +               goto out1;
84101 +       }
84103 +       /* allocate PATH_MAX bytes */
84104 +       uni = __getname();
84105 +       if (!uni) {
84106 +               err = -ENOMEM;
84107 +               goto out1;
84108 +       }
84110 +       /* Convert input string to unicode */
84111 +       err = ntfs_nls_to_utf16(sbi, name->name, name->len, uni, NTFS_NAME_LEN,
84112 +                               UTF16_HOST_ENDIAN);
84113 +       if (err < 0)
84114 +               goto out2;
84116 +       /*mark rw ntfs as dirty. it will be cleared at umount*/
84117 +       ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
84119 +       /* find name in record */
84120 +       mi_get_ref(&dir_ni->mi, &ref);
84122 +       le = NULL;
84123 +       fname = ni_fname_name(ni, uni, &ref, &le);
84124 +       if (!fname) {
84125 +               err = -ENOENT;
84126 +               goto out3;
84127 +       }
84129 +       name_type = paired_name(fname->type);
84131 +       err = indx_delete_entry(indx, dir_ni, fname, fname_full_size(fname),
84132 +                               sbi);
84133 +       if (err)
84134 +               goto out3;
84136 +       /* Then remove name from mft */
84137 +       ni_remove_attr_le(ni, attr_from_name(fname), le);
84139 +       le16_add_cpu(&ni->mi.mrec->hard_links, -1);
84140 +       ni->mi.dirty = true;
84142 +       if (name_type != FILE_NAME_POSIX) {
84143 +               /* Now we should delete name by type */
84144 +               fname = ni_fname_type(ni, name_type, &le);
84145 +               if (fname) {
84146 +                       err = indx_delete_entry(indx, dir_ni, fname,
84147 +                                               fname_full_size(fname), sbi);
84148 +                       if (err)
84149 +                               goto out3;
84151 +                       ni_remove_attr_le(ni, attr_from_name(fname), le);
84153 +                       le16_add_cpu(&ni->mi.mrec->hard_links, -1);
84154 +               }
84155 +       }
84156 +out3:
84157 +       switch (err) {
84158 +       case 0:
84159 +               drop_nlink(inode);
84160 +       case -ENOTEMPTY:
84161 +       case -ENOSPC:
84162 +       case -EROFS:
84163 +               break;
84164 +       default:
84165 +               make_bad_inode(inode);
84166 +       }
84168 +       dir->i_mtime = dir->i_ctime = current_time(dir);
84169 +       mark_inode_dirty(dir);
84170 +       inode->i_ctime = dir->i_ctime;
84171 +       if (inode->i_nlink)
84172 +               mark_inode_dirty(inode);
84174 +out2:
84175 +       __putname(uni);
84176 +out1:
84177 +       ni_unlock(ni);
84178 +       return err;
84181 +void ntfs_evict_inode(struct inode *inode)
84183 +       truncate_inode_pages_final(&inode->i_data);
84185 +       if (inode->i_nlink)
84186 +               _ni_write_inode(inode, inode_needs_sync(inode));
84188 +       invalidate_inode_buffers(inode);
84189 +       clear_inode(inode);
84191 +       ni_clear(ntfs_i(inode));
84194 +static noinline int ntfs_readlink_hlp(struct inode *inode, char *buffer,
84195 +                                     int buflen)
84197 +       int i, err = 0;
84198 +       struct ntfs_inode *ni = ntfs_i(inode);
84199 +       struct super_block *sb = inode->i_sb;
84200 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
84201 +       u64 i_size = inode->i_size;
84202 +       u16 nlen = 0;
84203 +       void *to_free = NULL;
84204 +       struct REPARSE_DATA_BUFFER *rp;
84205 +       struct le_str *uni;
84206 +       struct ATTRIB *attr;
84208 +       /* Reparse data present. Try to parse it */
84209 +       static_assert(!offsetof(struct REPARSE_DATA_BUFFER, ReparseTag));
84210 +       static_assert(sizeof(u32) == sizeof(rp->ReparseTag));
84212 +       *buffer = 0;
84214 +       /* Read into temporal buffer */
84215 +       if (i_size > sbi->reparse.max_size || i_size <= sizeof(u32)) {
84216 +               err = -EINVAL;
84217 +               goto out;
84218 +       }
84220 +       attr = ni_find_attr(ni, NULL, NULL, ATTR_REPARSE, NULL, 0, NULL, NULL);
84221 +       if (!attr) {
84222 +               err = -EINVAL;
84223 +               goto out;
84224 +       }
84226 +       if (!attr->non_res) {
84227 +               rp = resident_data_ex(attr, i_size);
84228 +               if (!rp) {
84229 +                       err = -EINVAL;
84230 +                       goto out;
84231 +               }
84232 +       } else {
84233 +               rp = ntfs_malloc(i_size);
84234 +               if (!rp) {
84235 +                       err = -ENOMEM;
84236 +                       goto out;
84237 +               }
84238 +               to_free = rp;
84239 +               err = ntfs_read_run_nb(sbi, &ni->file.run, 0, rp, i_size, NULL);
84240 +               if (err)
84241 +                       goto out;
84242 +       }
84244 +       err = -EINVAL;
84246 +       /* Microsoft Tag */
84247 +       switch (rp->ReparseTag) {
84248 +       case IO_REPARSE_TAG_MOUNT_POINT:
84249 +               /* Mount points and junctions */
84250 +               /* Can we use 'Rp->MountPointReparseBuffer.PrintNameLength'? */
84251 +               if (i_size <= offsetof(struct REPARSE_DATA_BUFFER,
84252 +                                      MountPointReparseBuffer.PathBuffer))
84253 +                       goto out;
84254 +               uni = Add2Ptr(rp,
84255 +                             offsetof(struct REPARSE_DATA_BUFFER,
84256 +                                      MountPointReparseBuffer.PathBuffer) +
84257 +                                     le16_to_cpu(rp->MountPointReparseBuffer
84258 +                                                         .PrintNameOffset) -
84259 +                                     2);
84260 +               nlen = le16_to_cpu(rp->MountPointReparseBuffer.PrintNameLength);
84261 +               break;
84263 +       case IO_REPARSE_TAG_SYMLINK:
84264 +               /* FolderSymbolicLink */
84265 +               /* Can we use 'Rp->SymbolicLinkReparseBuffer.PrintNameLength'? */
84266 +               if (i_size <= offsetof(struct REPARSE_DATA_BUFFER,
84267 +                                      SymbolicLinkReparseBuffer.PathBuffer))
84268 +                       goto out;
84269 +               uni = Add2Ptr(rp,
84270 +                             offsetof(struct REPARSE_DATA_BUFFER,
84271 +                                      SymbolicLinkReparseBuffer.PathBuffer) +
84272 +                                     le16_to_cpu(rp->SymbolicLinkReparseBuffer
84273 +                                                         .PrintNameOffset) -
84274 +                                     2);
84275 +               nlen = le16_to_cpu(
84276 +                       rp->SymbolicLinkReparseBuffer.PrintNameLength);
84277 +               break;
84279 +       case IO_REPARSE_TAG_CLOUD:
84280 +       case IO_REPARSE_TAG_CLOUD_1:
84281 +       case IO_REPARSE_TAG_CLOUD_2:
84282 +       case IO_REPARSE_TAG_CLOUD_3:
84283 +       case IO_REPARSE_TAG_CLOUD_4:
84284 +       case IO_REPARSE_TAG_CLOUD_5:
84285 +       case IO_REPARSE_TAG_CLOUD_6:
84286 +       case IO_REPARSE_TAG_CLOUD_7:
84287 +       case IO_REPARSE_TAG_CLOUD_8:
84288 +       case IO_REPARSE_TAG_CLOUD_9:
84289 +       case IO_REPARSE_TAG_CLOUD_A:
84290 +       case IO_REPARSE_TAG_CLOUD_B:
84291 +       case IO_REPARSE_TAG_CLOUD_C:
84292 +       case IO_REPARSE_TAG_CLOUD_D:
84293 +       case IO_REPARSE_TAG_CLOUD_E:
84294 +       case IO_REPARSE_TAG_CLOUD_F:
84295 +               err = sizeof("OneDrive") - 1;
84296 +               if (err > buflen)
84297 +                       err = buflen;
84298 +               memcpy(buffer, "OneDrive", err);
84299 +               goto out;
84301 +       default:
84302 +               if (IsReparseTagMicrosoft(rp->ReparseTag)) {
84303 +                       /* unknown Microsoft Tag */
84304 +                       goto out;
84305 +               }
84306 +               if (!IsReparseTagNameSurrogate(rp->ReparseTag) ||
84307 +                   i_size <= sizeof(struct REPARSE_POINT)) {
84308 +                       goto out;
84309 +               }
84311 +               /* Users tag */
84312 +               uni = Add2Ptr(rp, sizeof(struct REPARSE_POINT) - 2);
84313 +               nlen = le16_to_cpu(rp->ReparseDataLength) -
84314 +                      sizeof(struct REPARSE_POINT);
84315 +       }
84317 +       /* Convert nlen from bytes to UNICODE chars */
84318 +       nlen >>= 1;
84320 +       /* Check that name is available */
84321 +       if (!nlen || &uni->name[nlen] > (__le16 *)Add2Ptr(rp, i_size))
84322 +               goto out;
84324 +       /* If name is already zero terminated then truncate it now */
84325 +       if (!uni->name[nlen - 1])
84326 +               nlen -= 1;
84327 +       uni->len = nlen;
84329 +       err = ntfs_utf16_to_nls(sbi, uni, buffer, buflen);
84331 +       if (err < 0)
84332 +               goto out;
84334 +       /* translate windows '\' into linux '/' */
84335 +       for (i = 0; i < err; i++) {
84336 +               if (buffer[i] == '\\')
84337 +                       buffer[i] = '/';
84338 +       }
84340 +       /* Always set last zero */
84341 +       buffer[err] = 0;
84342 +out:
84343 +       ntfs_free(to_free);
84344 +       return err;
84347 +static const char *ntfs_get_link(struct dentry *de, struct inode *inode,
84348 +                                struct delayed_call *done)
84350 +       int err;
84351 +       char *ret;
84353 +       if (!de)
84354 +               return ERR_PTR(-ECHILD);
84356 +       ret = kmalloc(PAGE_SIZE, GFP_NOFS);
84357 +       if (!ret)
84358 +               return ERR_PTR(-ENOMEM);
84360 +       err = ntfs_readlink_hlp(inode, ret, PAGE_SIZE);
84361 +       if (err < 0) {
84362 +               kfree(ret);
84363 +               return ERR_PTR(err);
84364 +       }
84366 +       set_delayed_call(done, kfree_link, ret);
84368 +       return ret;
84371 +const struct inode_operations ntfs_link_inode_operations = {
84372 +       .get_link = ntfs_get_link,
84373 +       .setattr = ntfs3_setattr,
84374 +       .listxattr = ntfs_listxattr,
84375 +       .permission = ntfs_permission,
84376 +       .get_acl = ntfs_get_acl,
84377 +       .set_acl = ntfs_set_acl,
84380 +const struct address_space_operations ntfs_aops = {
84381 +       .readpage = ntfs_readpage,
84382 +       .readahead = ntfs_readahead,
84383 +       .writepage = ntfs_writepage,
84384 +       .writepages = ntfs_writepages,
84385 +       .write_begin = ntfs_write_begin,
84386 +       .write_end = ntfs_write_end,
84387 +       .direct_IO = ntfs_direct_IO,
84388 +       .bmap = ntfs_bmap,
84391 +const struct address_space_operations ntfs_aops_cmpr = {
84392 +       .readpage = ntfs_readpage,
84393 +       .readahead = ntfs_readahead,
84395 diff --git a/fs/ntfs3/lib/decompress_common.c b/fs/ntfs3/lib/decompress_common.c
84396 new file mode 100644
84397 index 000000000000..83c9e93aea77
84398 --- /dev/null
84399 +++ b/fs/ntfs3/lib/decompress_common.c
84400 @@ -0,0 +1,332 @@
84401 +// SPDX-License-Identifier: GPL-2.0-or-later
84403 + * decompress_common.c - Code shared by the XPRESS and LZX decompressors
84404 + *
84405 + * Copyright (C) 2015 Eric Biggers
84406 + *
84407 + * This program is free software: you can redistribute it and/or modify it under
84408 + * the terms of the GNU General Public License as published by the Free Software
84409 + * Foundation, either version 2 of the License, or (at your option) any later
84410 + * version.
84411 + *
84412 + * This program is distributed in the hope that it will be useful, but WITHOUT
84413 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
84414 + * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
84415 + * details.
84416 + *
84417 + * You should have received a copy of the GNU General Public License along with
84418 + * this program.  If not, see <http://www.gnu.org/licenses/>.
84419 + */
84421 +#include "decompress_common.h"
84424 + * make_huffman_decode_table() -
84425 + *
84426 + * Build a decoding table for a canonical prefix code, or "Huffman code".
84427 + *
84428 + * This is an internal function, not part of the library API!
84429 + *
84430 + * This takes as input the length of the codeword for each symbol in the
84431 + * alphabet and produces as output a table that can be used for fast
84432 + * decoding of prefix-encoded symbols using read_huffsym().
84433 + *
84434 + * Strictly speaking, a canonical prefix code might not be a Huffman
84435 + * code.  But this algorithm will work either way; and in fact, since
84436 + * Huffman codes are defined in terms of symbol frequencies, there is no
84437 + * way for the decompressor to know whether the code is a true Huffman
84438 + * code or not until all symbols have been decoded.
84439 + *
84440 + * Because the prefix code is assumed to be "canonical", it can be
84441 + * reconstructed directly from the codeword lengths.  A prefix code is
84442 + * canonical if and only if a longer codeword never lexicographically
84443 + * precedes a shorter codeword, and the lexicographic ordering of
84444 + * codewords of the same length is the same as the lexicographic ordering
84445 + * of the corresponding symbols.  Consequently, we can sort the symbols
84446 + * primarily by codeword length and secondarily by symbol value, then
84447 + * reconstruct the prefix code by generating codewords lexicographically
84448 + * in that order.
84449 + *
84450 + * This function does not, however, generate the prefix code explicitly.
84451 + * Instead, it directly builds a table for decoding symbols using the
84452 + * code.  The basic idea is this: given the next 'max_codeword_len' bits
84453 + * in the input, we can look up the decoded symbol by indexing a table
84454 + * containing 2**max_codeword_len entries.  A codeword with length
84455 + * 'max_codeword_len' will have exactly one entry in this table, whereas
84456 + * a codeword shorter than 'max_codeword_len' will have multiple entries
84457 + * in this table.  Precisely, a codeword of length n will be represented
84458 + * by 2**(max_codeword_len - n) entries in this table.  The 0-based index
84459 + * of each such entry will contain the corresponding codeword as a prefix
84460 + * when zero-padded on the left to 'max_codeword_len' binary digits.
84461 + *
84462 + * That's the basic idea, but we implement two optimizations regarding
84463 + * the format of the decode table itself:
84464 + *
84465 + * - For many compression formats, the maximum codeword length is too
84466 + *   long for it to be efficient to build the full decoding table
84467 + *   whenever a new prefix code is used.  Instead, we can build the table
84468 + *   using only 2**table_bits entries, where 'table_bits' is some number
84469 + *   less than or equal to 'max_codeword_len'.  Then, only codewords of
84470 + *   length 'table_bits' and shorter can be directly looked up.  For
84471 + *   longer codewords, the direct lookup instead produces the root of a
84472 + *   binary tree.  Using this tree, the decoder can do traditional
84473 + *   bit-by-bit decoding of the remainder of the codeword.  Child nodes
84474 + *   are allocated in extra entries at the end of the table; leaf nodes
84475 + *   contain symbols.  Note that the long-codeword case is, in general,
84476 + *   not performance critical, since in Huffman codes the most frequently
84477 + *   used symbols are assigned the shortest codeword lengths.
84478 + *
84479 + * - When we decode a symbol using a direct lookup of the table, we still
84480 + *   need to know its length so that the bitstream can be advanced by the
84481 + *   appropriate number of bits.  The simple solution is to simply retain
84482 + *   the 'lens' array and use the decoded symbol as an index into it.
84483 + *   However, this requires two separate array accesses in the fast path.
84484 + *   The optimization is to store the length directly in the decode
84485 + *   table.  We use the bottom 11 bits for the symbol and the top 5 bits
84486 + *   for the length.  In addition, to combine this optimization with the
84487 + *   previous one, we introduce a special case where the top 2 bits of
84488 + *   the length are both set if the entry is actually the root of a
84489 + *   binary tree.
84490 + *
84491 + * @decode_table:
84492 + *     The array in which to create the decoding table.  This must have
84493 + *     a length of at least ((2**table_bits) + 2 * num_syms) entries.
84494 + *
84495 + * @num_syms:
84496 + *     The number of symbols in the alphabet; also, the length of the
84497 + *     'lens' array.  Must be less than or equal to 2048.
84498 + *
84499 + * @table_bits:
84500 + *     The order of the decode table size, as explained above.  Must be
84501 + *     less than or equal to 13.
84502 + *
84503 + * @lens:
84504 + *     An array of length @num_syms, indexable by symbol, that gives the
84505 + *     length of the codeword, in bits, for that symbol.  The length can
84506 + *     be 0, which means that the symbol does not have a codeword
84507 + *     assigned.
84508 + *
84509 + * @max_codeword_len:
84510 + *     The longest codeword length allowed in the compression format.
84511 + *     All entries in 'lens' must be less than or equal to this value.
84512 + *     This must be less than or equal to 23.
84513 + *
84514 + * @working_space
84515 + *     A temporary array of length '2 * (max_codeword_len + 1) +
84516 + *     num_syms'.
84517 + *
84518 + * Returns 0 on success, or -1 if the lengths do not form a valid prefix
84519 + * code.
84520 + */
84521 +int make_huffman_decode_table(u16 decode_table[], const u32 num_syms,
84522 +                             const u32 table_bits, const u8 lens[],
84523 +                             const u32 max_codeword_len,
84524 +                             u16 working_space[])
84526 +       const u32 table_num_entries = 1 << table_bits;
84527 +       u16 * const len_counts = &working_space[0];
84528 +       u16 * const offsets = &working_space[1 * (max_codeword_len + 1)];
84529 +       u16 * const sorted_syms = &working_space[2 * (max_codeword_len + 1)];
84530 +       int left;
84531 +       void *decode_table_ptr;
84532 +       u32 sym_idx;
84533 +       u32 codeword_len;
84534 +       u32 stores_per_loop;
84535 +       u32 decode_table_pos;
84536 +       u32 len;
84537 +       u32 sym;
84539 +       /* Count how many symbols have each possible codeword length.
84540 +        * Note that a length of 0 indicates the corresponding symbol is not
84541 +        * used in the code and therefore does not have a codeword.
84542 +        */
84543 +       for (len = 0; len <= max_codeword_len; len++)
84544 +               len_counts[len] = 0;
84545 +       for (sym = 0; sym < num_syms; sym++)
84546 +               len_counts[lens[sym]]++;
84548 +       /* We can assume all lengths are <= max_codeword_len, but we
84549 +        * cannot assume they form a valid prefix code.  A codeword of
84550 +        * length n should require a proportion of the codespace equaling
84551 +        * (1/2)^n.  The code is valid if and only if the codespace is
84552 +        * exactly filled by the lengths, by this measure.
84553 +        */
84554 +       left = 1;
84555 +       for (len = 1; len <= max_codeword_len; len++) {
84556 +               left <<= 1;
84557 +               left -= len_counts[len];
84558 +               if (left < 0) {
84559 +                       /* The lengths overflow the codespace; that is, the code
84560 +                        * is over-subscribed.
84561 +                        */
84562 +                       return -1;
84563 +               }
84564 +       }
84566 +       if (left) {
84567 +               /* The lengths do not fill the codespace; that is, they form an
84568 +                * incomplete set.
84569 +                */
84570 +               if (left == (1 << max_codeword_len)) {
84571 +                       /* The code is completely empty.  This is arguably
84572 +                        * invalid, but in fact it is valid in LZX and XPRESS,
84573 +                        * so we must allow it.  By definition, no symbols can
84574 +                        * be decoded with an empty code.  Consequently, we
84575 +                        * technically don't even need to fill in the decode
84576 +                        * table.  However, to avoid accessing uninitialized
84577 +                        * memory if the algorithm nevertheless attempts to
84578 +                        * decode symbols using such a code, we zero out the
84579 +                        * decode table.
84580 +                        */
84581 +                       memset(decode_table, 0,
84582 +                              table_num_entries * sizeof(decode_table[0]));
84583 +                       return 0;
84584 +               }
84585 +               return -1;
84586 +       }
84588 +       /* Sort the symbols primarily by length and secondarily by symbol order.
84589 +        */
84591 +       /* Initialize 'offsets' so that offsets[len] for 1 <= len <=
84592 +        * max_codeword_len is the number of codewords shorter than 'len' bits.
84593 +        */
84594 +       offsets[1] = 0;
84595 +       for (len = 1; len < max_codeword_len; len++)
84596 +               offsets[len + 1] = offsets[len] + len_counts[len];
84598 +       /* Use the 'offsets' array to sort the symbols.  Note that we do not
84599 +        * include symbols that are not used in the code.  Consequently, fewer
84600 +        * than 'num_syms' entries in 'sorted_syms' may be filled.
84601 +        */
84602 +       for (sym = 0; sym < num_syms; sym++)
84603 +               if (lens[sym])
84604 +                       sorted_syms[offsets[lens[sym]]++] = sym;
84606 +       /* Fill entries for codewords with length <= table_bits
84607 +        * --- that is, those short enough for a direct mapping.
84608 +        *
84609 +        * The table will start with entries for the shortest codeword(s), which
84610 +        * have the most entries.  From there, the number of entries per
84611 +        * codeword will decrease.
84612 +        */
84613 +       decode_table_ptr = decode_table;
84614 +       sym_idx = 0;
84615 +       codeword_len = 1;
84616 +       stores_per_loop = (1 << (table_bits - codeword_len));
84617 +       for (; stores_per_loop != 0; codeword_len++, stores_per_loop >>= 1) {
84618 +               u32 end_sym_idx = sym_idx + len_counts[codeword_len];
84620 +               for (; sym_idx < end_sym_idx; sym_idx++) {
84621 +                       u16 entry;
84622 +                       u16 *p;
84623 +                       u32 n;
84625 +                       entry = ((u32)codeword_len << 11) | sorted_syms[sym_idx];
84626 +                       p = (u16 *)decode_table_ptr;
84627 +                       n = stores_per_loop;
84629 +                       do {
84630 +                               *p++ = entry;
84631 +                       } while (--n);
84633 +                       decode_table_ptr = p;
84634 +               }
84635 +       }
84637 +       /* If we've filled in the entire table, we are done.  Otherwise,
84638 +        * there are codewords longer than table_bits for which we must
84639 +        * generate binary trees.
84640 +        */
84641 +       decode_table_pos = (u16 *)decode_table_ptr - decode_table;
84642 +       if (decode_table_pos != table_num_entries) {
84643 +               u32 j;
84644 +               u32 next_free_tree_slot;
84645 +               u32 cur_codeword;
84647 +               /* First, zero out the remaining entries.  This is
84648 +                * necessary so that these entries appear as
84649 +                * "unallocated" in the next part.  Each of these entries
84650 +                * will eventually be filled with the representation of
84651 +                * the root node of a binary tree.
84652 +                */
84653 +               j = decode_table_pos;
84654 +               do {
84655 +                       decode_table[j] = 0;
84656 +               } while (++j != table_num_entries);
84658 +               /* We allocate child nodes starting at the end of the
84659 +                * direct lookup table.  Note that there should be
84660 +                * 2*num_syms extra entries for this purpose, although
84661 +                * fewer than this may actually be needed.
84662 +                */
84663 +               next_free_tree_slot = table_num_entries;
84665 +               /* Iterate through each codeword with length greater than
84666 +                * 'table_bits', primarily in order of codeword length
84667 +                * and secondarily in order of symbol.
84668 +                */
84669 +               for (cur_codeword = decode_table_pos << 1;
84670 +                    codeword_len <= max_codeword_len;
84671 +                    codeword_len++, cur_codeword <<= 1) {
84672 +                       u32 end_sym_idx = sym_idx + len_counts[codeword_len];
84674 +                       for (; sym_idx < end_sym_idx; sym_idx++, cur_codeword++) {
84675 +                               /* 'sorted_sym' is the symbol represented by the
84676 +                                * codeword.
84677 +                                */
84678 +                               u32 sorted_sym = sorted_syms[sym_idx];
84679 +                               u32 extra_bits = codeword_len - table_bits;
84680 +                               u32 node_idx = cur_codeword >> extra_bits;
84682 +                               /* Go through each bit of the current codeword
84683 +                                * beyond the prefix of length @table_bits and
84684 +                                * walk the appropriate binary tree, allocating
84685 +                                * any slots that have not yet been allocated.
84686 +                                *
84687 +                                * Note that the 'pointer' entry to the binary
84688 +                                * tree, which is stored in the direct lookup
84689 +                                * portion of the table, is represented
84690 +                                * identically to other internal (non-leaf)
84691 +                                * nodes of the binary tree; it can be thought
84692 +                                * of as simply the root of the tree.  The
84693 +                                * representation of these internal nodes is
84694 +                                * simply the index of the left child combined
84695 +                                * with the special bits 0xC000 to distingush
84696 +                                * the entry from direct mapping and leaf node
84697 +                                * entries.
84698 +                                */
84699 +                               do {
84700 +                                       /* At least one bit remains in the
84701 +                                        * codeword, but the current node is an
84702 +                                        * unallocated leaf.  Change it to an
84703 +                                        * internal node.
84704 +                                        */
84705 +                                       if (decode_table[node_idx] == 0) {
84706 +                                               decode_table[node_idx] =
84707 +                                                       next_free_tree_slot | 0xC000;
84708 +                                               decode_table[next_free_tree_slot++] = 0;
84709 +                                               decode_table[next_free_tree_slot++] = 0;
84710 +                                       }
84712 +                                       /* Go to the left child if the next bit
84713 +                                        * in the codeword is 0; otherwise go to
84714 +                                        * the right child.
84715 +                                        */
84716 +                                       node_idx = decode_table[node_idx] & 0x3FFF;
84717 +                                       --extra_bits;
84718 +                                       node_idx += (cur_codeword >> extra_bits) & 1;
84719 +                               } while (extra_bits != 0);
84721 +                               /* We've traversed the tree using the entire
84722 +                                * codeword, and we're now at the entry where
84723 +                                * the actual symbol will be stored.  This is
84724 +                                * distinguished from internal nodes by not
84725 +                                * having its high two bits set.
84726 +                                */
84727 +                               decode_table[node_idx] = sorted_sym;
84728 +                       }
84729 +               }
84730 +       }
84731 +       return 0;
84733 diff --git a/fs/ntfs3/lib/decompress_common.h b/fs/ntfs3/lib/decompress_common.h
84734 new file mode 100644
84735 index 000000000000..66297f398403
84736 --- /dev/null
84737 +++ b/fs/ntfs3/lib/decompress_common.h
84738 @@ -0,0 +1,352 @@
84739 +/* SPDX-License-Identifier: GPL-2.0-or-later */
84742 + * decompress_common.h - Code shared by the XPRESS and LZX decompressors
84743 + *
84744 + * Copyright (C) 2015 Eric Biggers
84745 + *
84746 + * This program is free software: you can redistribute it and/or modify it under
84747 + * the terms of the GNU General Public License as published by the Free Software
84748 + * Foundation, either version 2 of the License, or (at your option) any later
84749 + * version.
84750 + *
84751 + * This program is distributed in the hope that it will be useful, but WITHOUT
84752 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
84753 + * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
84754 + * details.
84755 + *
84756 + * You should have received a copy of the GNU General Public License along with
84757 + * this program.  If not, see <http://www.gnu.org/licenses/>.
84758 + */
84760 +#include <linux/string.h>
84761 +#include <linux/compiler.h>
84762 +#include <linux/types.h>
84763 +#include <linux/slab.h>
84764 +#include <asm/unaligned.h>
84767 +/* "Force inline" macro (not required, but helpful for performance)  */
84768 +#define forceinline __always_inline
84770 +/* Enable whole-word match copying on selected architectures  */
84771 +#if defined(__i386__) || defined(__x86_64__) || defined(__ARM_FEATURE_UNALIGNED)
84772 +#  define FAST_UNALIGNED_ACCESS
84773 +#endif
84775 +/* Size of a machine word  */
84776 +#define WORDBYTES (sizeof(size_t))
84778 +static forceinline void
84779 +copy_unaligned_word(const void *src, void *dst)
84781 +       put_unaligned(get_unaligned((const size_t *)src), (size_t *)dst);
84785 +/* Generate a "word" with platform-dependent size whose bytes all contain the
84786 + * value 'b'.
84787 + */
84788 +static forceinline size_t repeat_byte(u8 b)
84790 +       size_t v;
84792 +       v = b;
84793 +       v |= v << 8;
84794 +       v |= v << 16;
84795 +       v |= v << ((WORDBYTES == 8) ? 32 : 0);
84796 +       return v;
84799 +/* Structure that encapsulates a block of in-memory data being interpreted as a
84800 + * stream of bits, optionally with interwoven literal bytes.  Bits are assumed
84801 + * to be stored in little endian 16-bit coding units, with the bits ordered high
84802 + * to low.
84803 + */
84804 +struct input_bitstream {
84806 +       /* Bits that have been read from the input buffer.  The bits are
84807 +        * left-justified; the next bit is always bit 31.
84808 +        */
84809 +       u32 bitbuf;
84811 +       /* Number of bits currently held in @bitbuf.  */
84812 +       u32 bitsleft;
84814 +       /* Pointer to the next byte to be retrieved from the input buffer.  */
84815 +       const u8 *next;
84817 +       /* Pointer to just past the end of the input buffer.  */
84818 +       const u8 *end;
84821 +/* Initialize a bitstream to read from the specified input buffer.  */
84822 +static forceinline void init_input_bitstream(struct input_bitstream *is,
84823 +                                            const void *buffer, u32 size)
84825 +       is->bitbuf = 0;
84826 +       is->bitsleft = 0;
84827 +       is->next = buffer;
84828 +       is->end = is->next + size;
84831 +/* Ensure the bit buffer variable for the bitstream contains at least @num_bits
84832 + * bits.  Following this, bitstream_peek_bits() and/or bitstream_remove_bits()
84833 + * may be called on the bitstream to peek or remove up to @num_bits bits.  Note
84834 + * that @num_bits must be <= 16.
84835 + */
84836 +static forceinline void bitstream_ensure_bits(struct input_bitstream *is,
84837 +                                             u32 num_bits)
84839 +       if (is->bitsleft < num_bits) {
84840 +               if (is->end - is->next >= 2) {
84841 +                       is->bitbuf |= (u32)get_unaligned_le16(is->next)
84842 +                                       << (16 - is->bitsleft);
84843 +                       is->next += 2;
84844 +               }
84845 +               is->bitsleft += 16;
84846 +       }
84849 +/* Return the next @num_bits bits from the bitstream, without removing them.
84850 + * There must be at least @num_bits remaining in the buffer variable, from a
84851 + * previous call to bitstream_ensure_bits().
84852 + */
84853 +static forceinline u32
84854 +bitstream_peek_bits(const struct input_bitstream *is, const u32 num_bits)
84856 +       return (is->bitbuf >> 1) >> (sizeof(is->bitbuf) * 8 - num_bits - 1);
84859 +/* Remove @num_bits from the bitstream.  There must be at least @num_bits
84860 + * remaining in the buffer variable, from a previous call to
84861 + * bitstream_ensure_bits().
84862 + */
84863 +static forceinline void
84864 +bitstream_remove_bits(struct input_bitstream *is, u32 num_bits)
84866 +       is->bitbuf <<= num_bits;
84867 +       is->bitsleft -= num_bits;
84870 +/* Remove and return @num_bits bits from the bitstream.  There must be at least
84871 + * @num_bits remaining in the buffer variable, from a previous call to
84872 + * bitstream_ensure_bits().
84873 + */
84874 +static forceinline u32
84875 +bitstream_pop_bits(struct input_bitstream *is, u32 num_bits)
84877 +       u32 bits = bitstream_peek_bits(is, num_bits);
84879 +       bitstream_remove_bits(is, num_bits);
84880 +       return bits;
84883 +/* Read and return the next @num_bits bits from the bitstream.  */
84884 +static forceinline u32
84885 +bitstream_read_bits(struct input_bitstream *is, u32 num_bits)
84887 +       bitstream_ensure_bits(is, num_bits);
84888 +       return bitstream_pop_bits(is, num_bits);
84891 +/* Read and return the next literal byte embedded in the bitstream.  */
84892 +static forceinline u8
84893 +bitstream_read_byte(struct input_bitstream *is)
84895 +       if (unlikely(is->end == is->next))
84896 +               return 0;
84897 +       return *is->next++;
84900 +/* Read and return the next 16-bit integer embedded in the bitstream.  */
84901 +static forceinline u16
84902 +bitstream_read_u16(struct input_bitstream *is)
84904 +       u16 v;
84906 +       if (unlikely(is->end - is->next < 2))
84907 +               return 0;
84908 +       v = get_unaligned_le16(is->next);
84909 +       is->next += 2;
84910 +       return v;
84913 +/* Read and return the next 32-bit integer embedded in the bitstream.  */
84914 +static forceinline u32
84915 +bitstream_read_u32(struct input_bitstream *is)
84917 +       u32 v;
84919 +       if (unlikely(is->end - is->next < 4))
84920 +               return 0;
84921 +       v = get_unaligned_le32(is->next);
84922 +       is->next += 4;
84923 +       return v;
84926 +/* Read into @dst_buffer an array of literal bytes embedded in the bitstream.
84927 + * Return either a pointer to the byte past the last written, or NULL if the
84928 + * read overflows the input buffer.
84929 + */
84930 +static forceinline void *bitstream_read_bytes(struct input_bitstream *is,
84931 +                                             void *dst_buffer, size_t count)
84933 +       if ((size_t)(is->end - is->next) < count)
84934 +               return NULL;
84935 +       memcpy(dst_buffer, is->next, count);
84936 +       is->next += count;
84937 +       return (u8 *)dst_buffer + count;
84940 +/* Align the input bitstream on a coding-unit boundary.  */
84941 +static forceinline void bitstream_align(struct input_bitstream *is)
84943 +       is->bitsleft = 0;
84944 +       is->bitbuf = 0;
84947 +extern int make_huffman_decode_table(u16 decode_table[], const u32 num_syms,
84948 +                                    const u32 num_bits, const u8 lens[],
84949 +                                    const u32 max_codeword_len,
84950 +                                    u16 working_space[]);
84953 +/* Reads and returns the next Huffman-encoded symbol from a bitstream.  If the
84954 + * input data is exhausted, the Huffman symbol is decoded as if the missing bits
84955 + * are all zeroes.
84956 + */
84957 +static forceinline u32 read_huffsym(struct input_bitstream *istream,
84958 +                                        const u16 decode_table[],
84959 +                                        u32 table_bits,
84960 +                                        u32 max_codeword_len)
84962 +       u32 entry;
84963 +       u32 key_bits;
84965 +       bitstream_ensure_bits(istream, max_codeword_len);
84967 +       /* Index the decode table by the next table_bits bits of the input.  */
84968 +       key_bits = bitstream_peek_bits(istream, table_bits);
84969 +       entry = decode_table[key_bits];
84970 +       if (entry < 0xC000) {
84971 +               /* Fast case: The decode table directly provided the
84972 +                * symbol and codeword length.  The low 11 bits are the
84973 +                * symbol, and the high 5 bits are the codeword length.
84974 +                */
84975 +               bitstream_remove_bits(istream, entry >> 11);
84976 +               return entry & 0x7FF;
84977 +       }
84978 +       /* Slow case: The codeword for the symbol is longer than
84979 +        * table_bits, so the symbol does not have an entry
84980 +        * directly in the first (1 << table_bits) entries of the
84981 +        * decode table.  Traverse the appropriate binary tree
84982 +        * bit-by-bit to decode the symbol.
84983 +        */
84984 +       bitstream_remove_bits(istream, table_bits);
84985 +       do {
84986 +               key_bits = (entry & 0x3FFF) + bitstream_pop_bits(istream, 1);
84987 +       } while ((entry = decode_table[key_bits]) >= 0xC000);
84988 +       return entry;
84992 + * Copy an LZ77 match at (dst - offset) to dst.
84993 + *
84994 + * The length and offset must be already validated --- that is, (dst - offset)
84995 + * can't underrun the output buffer, and (dst + length) can't overrun the output
84996 + * buffer.  Also, the length cannot be 0.
84997 + *
84998 + * @bufend points to the byte past the end of the output buffer.  This function
84999 + * won't write any data beyond this position.
85000 + *
85001 + * Returns dst + length.
85002 + */
85003 +static forceinline u8 *lz_copy(u8 *dst, u32 length, u32 offset, const u8 *bufend,
85004 +                              u32 min_length)
85006 +       const u8 *src = dst - offset;
85008 +       /*
85009 +        * Try to copy one machine word at a time.  On i386 and x86_64 this is
85010 +        * faster than copying one byte at a time, unless the data is
85011 +        * near-random and all the matches have very short lengths.  Note that
85012 +        * since this requires unaligned memory accesses, it won't necessarily
85013 +        * be faster on every architecture.
85014 +        *
85015 +        * Also note that we might copy more than the length of the match.  For
85016 +        * example, if a word is 8 bytes and the match is of length 5, then
85017 +        * we'll simply copy 8 bytes.  This is okay as long as we don't write
85018 +        * beyond the end of the output buffer, hence the check for (bufend -
85019 +        * end >= WORDBYTES - 1).
85020 +        */
85021 +#ifdef FAST_UNALIGNED_ACCESS
85022 +       u8 * const end = dst + length;
85024 +       if (bufend - end >= (ptrdiff_t)(WORDBYTES - 1)) {
85026 +               if (offset >= WORDBYTES) {
85027 +                       /* The source and destination words don't overlap.  */
85029 +                       /* To improve branch prediction, one iteration of this
85030 +                        * loop is unrolled.  Most matches are short and will
85031 +                        * fail the first check.  But if that check passes, then
85032 +                        * it becomes increasing likely that the match is long
85033 +                        * and we'll need to continue copying.
85034 +                        */
85036 +                       copy_unaligned_word(src, dst);
85037 +                       src += WORDBYTES;
85038 +                       dst += WORDBYTES;
85040 +                       if (dst < end) {
85041 +                               do {
85042 +                                       copy_unaligned_word(src, dst);
85043 +                                       src += WORDBYTES;
85044 +                                       dst += WORDBYTES;
85045 +                               } while (dst < end);
85046 +                       }
85047 +                       return end;
85048 +               } else if (offset == 1) {
85050 +                       /* Offset 1 matches are equivalent to run-length
85051 +                        * encoding of the previous byte.  This case is common
85052 +                        * if the data contains many repeated bytes.
85053 +                        */
85054 +                       size_t v = repeat_byte(*(dst - 1));
85056 +                       do {
85057 +                               put_unaligned(v, (size_t *)dst);
85058 +                               src += WORDBYTES;
85059 +                               dst += WORDBYTES;
85060 +                       } while (dst < end);
85061 +                       return end;
85062 +               }
85063 +               /*
85064 +                * We don't bother with special cases for other 'offset <
85065 +                * WORDBYTES', which are usually rarer than 'offset == 1'.  Extra
85066 +                * checks will just slow things down.  Actually, it's possible
85067 +                * to handle all the 'offset < WORDBYTES' cases using the same
85068 +                * code, but it still becomes more complicated doesn't seem any
85069 +                * faster overall; it definitely slows down the more common
85070 +                * 'offset == 1' case.
85071 +                */
85072 +       }
85073 +#endif /* FAST_UNALIGNED_ACCESS */
85075 +       /* Fall back to a bytewise copy.  */
85077 +       if (min_length >= 2) {
85078 +               *dst++ = *src++;
85079 +               length--;
85080 +       }
85081 +       if (min_length >= 3) {
85082 +               *dst++ = *src++;
85083 +               length--;
85084 +       }
85085 +       do {
85086 +               *dst++ = *src++;
85087 +       } while (--length);
85089 +       return dst;
85091 diff --git a/fs/ntfs3/lib/lib.h b/fs/ntfs3/lib/lib.h
85092 new file mode 100644
85093 index 000000000000..f508fbad2e71
85094 --- /dev/null
85095 +++ b/fs/ntfs3/lib/lib.h
85096 @@ -0,0 +1,26 @@
85097 +/* SPDX-License-Identifier: GPL-2.0-or-later */
85099 + * Adapted for linux kernel by Alexander Mamaev:
85100 + * - remove implementations of get_unaligned_
85101 + * - assume GCC is always defined
85102 + * - ISO C90
85103 + * - linux kernel code style
85104 + */
85107 +/* globals from xpress_decompress.c */
85108 +struct xpress_decompressor *xpress_allocate_decompressor(void);
85109 +void xpress_free_decompressor(struct xpress_decompressor *d);
85110 +int xpress_decompress(struct xpress_decompressor *__restrict d,
85111 +                     const void *__restrict compressed_data,
85112 +                     size_t compressed_size,
85113 +                     void *__restrict uncompressed_data,
85114 +                     size_t uncompressed_size);
85116 +/* globals from lzx_decompress.c */
85117 +struct lzx_decompressor *lzx_allocate_decompressor(void);
85118 +void lzx_free_decompressor(struct lzx_decompressor *d);
85119 +int lzx_decompress(struct lzx_decompressor *__restrict d,
85120 +                  const void *__restrict compressed_data,
85121 +                  size_t compressed_size, void *__restrict uncompressed_data,
85122 +                  size_t uncompressed_size);
85123 diff --git a/fs/ntfs3/lib/lzx_decompress.c b/fs/ntfs3/lib/lzx_decompress.c
85124 new file mode 100644
85125 index 000000000000..77a381a693d1
85126 --- /dev/null
85127 +++ b/fs/ntfs3/lib/lzx_decompress.c
85128 @@ -0,0 +1,683 @@
85129 +// SPDX-License-Identifier: GPL-2.0-or-later
85131 + * lzx_decompress.c - A decompressor for the LZX compression format, which can
85132 + * be used in "System Compressed" files.  This is based on the code from wimlib.
85133 + * This code only supports a window size (dictionary size) of 32768 bytes, since
85134 + * this is the only size used in System Compression.
85135 + *
85136 + * Copyright (C) 2015 Eric Biggers
85137 + *
85138 + * This program is free software: you can redistribute it and/or modify it under
85139 + * the terms of the GNU General Public License as published by the Free Software
85140 + * Foundation, either version 2 of the License, or (at your option) any later
85141 + * version.
85142 + *
85143 + * This program is distributed in the hope that it will be useful, but WITHOUT
85144 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
85145 + * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
85146 + * details.
85147 + *
85148 + * You should have received a copy of the GNU General Public License along with
85149 + * this program.  If not, see <http://www.gnu.org/licenses/>.
85150 + */
85152 +#include "decompress_common.h"
85153 +#include "lib.h"
85155 +/* Number of literal byte values  */
85156 +#define LZX_NUM_CHARS                  256
85158 +/* The smallest and largest allowed match lengths  */
85159 +#define LZX_MIN_MATCH_LEN              2
85160 +#define LZX_MAX_MATCH_LEN              257
85162 +/* Number of distinct match lengths that can be represented  */
85163 +#define LZX_NUM_LENS                   (LZX_MAX_MATCH_LEN - LZX_MIN_MATCH_LEN + 1)
85165 +/* Number of match lengths for which no length symbol is required  */
85166 +#define LZX_NUM_PRIMARY_LENS           7
85167 +#define LZX_NUM_LEN_HEADERS            (LZX_NUM_PRIMARY_LENS + 1)
85169 +/* Valid values of the 3-bit block type field  */
85170 +#define LZX_BLOCKTYPE_VERBATIM         1
85171 +#define LZX_BLOCKTYPE_ALIGNED          2
85172 +#define LZX_BLOCKTYPE_UNCOMPRESSED     3
85174 +/* Number of offset slots for a window size of 32768  */
85175 +#define LZX_NUM_OFFSET_SLOTS           30
85177 +/* Number of symbols in the main code for a window size of 32768  */
85178 +#define LZX_MAINCODE_NUM_SYMBOLS       \
85179 +       (LZX_NUM_CHARS + (LZX_NUM_OFFSET_SLOTS * LZX_NUM_LEN_HEADERS))
85181 +/* Number of symbols in the length code  */
85182 +#define LZX_LENCODE_NUM_SYMBOLS                (LZX_NUM_LENS - LZX_NUM_PRIMARY_LENS)
85184 +/* Number of symbols in the precode  */
85185 +#define LZX_PRECODE_NUM_SYMBOLS                20
85187 +/* Number of bits in which each precode codeword length is represented  */
85188 +#define LZX_PRECODE_ELEMENT_SIZE       4
85190 +/* Number of low-order bits of each match offset that are entropy-encoded in
85191 + * aligned offset blocks
85192 + */
85193 +#define LZX_NUM_ALIGNED_OFFSET_BITS    3
85195 +/* Number of symbols in the aligned offset code  */
85196 +#define LZX_ALIGNEDCODE_NUM_SYMBOLS    (1 << LZX_NUM_ALIGNED_OFFSET_BITS)
85198 +/* Mask for the match offset bits that are entropy-encoded in aligned offset
85199 + * blocks
85200 + */
85201 +#define LZX_ALIGNED_OFFSET_BITMASK     ((1 << LZX_NUM_ALIGNED_OFFSET_BITS) - 1)
85203 +/* Number of bits in which each aligned offset codeword length is represented  */
85204 +#define LZX_ALIGNEDCODE_ELEMENT_SIZE   3
85206 +/* Maximum lengths (in bits) of the codewords in each Huffman code  */
85207 +#define LZX_MAX_MAIN_CODEWORD_LEN      16
85208 +#define LZX_MAX_LEN_CODEWORD_LEN       16
85209 +#define LZX_MAX_PRE_CODEWORD_LEN       ((1 << LZX_PRECODE_ELEMENT_SIZE) - 1)
85210 +#define LZX_MAX_ALIGNED_CODEWORD_LEN   ((1 << LZX_ALIGNEDCODE_ELEMENT_SIZE) - 1)
85212 +/* The default "filesize" value used in pre/post-processing.  In the LZX format
85213 + * used in cabinet files this value must be given to the decompressor, whereas
85214 + * in the LZX format used in WIM files and system-compressed files this value is
85215 + * fixed at 12000000.
85216 + */
85217 +#define LZX_DEFAULT_FILESIZE           12000000
85219 +/* Assumed block size when the encoded block size begins with a 0 bit.  */
85220 +#define LZX_DEFAULT_BLOCK_SIZE         32768
85222 +/* Number of offsets in the recent (or "repeat") offsets queue.  */
85223 +#define LZX_NUM_RECENT_OFFSETS         3
85225 +/* These values are chosen for fast decompression.  */
85226 +#define LZX_MAINCODE_TABLEBITS         11
85227 +#define LZX_LENCODE_TABLEBITS          10
85228 +#define LZX_PRECODE_TABLEBITS          6
85229 +#define LZX_ALIGNEDCODE_TABLEBITS      7
85231 +#define LZX_READ_LENS_MAX_OVERRUN      50
85233 +/* Mapping: offset slot => first match offset that uses that offset slot.
85234 + */
85235 +static const u32 lzx_offset_slot_base[LZX_NUM_OFFSET_SLOTS + 1] = {
85236 +       0,      1,      2,      3,      4,      /* 0  --- 4  */
85237 +       6,      8,      12,     16,     24,     /* 5  --- 9  */
85238 +       32,     48,     64,     96,     128,    /* 10 --- 14 */
85239 +       192,    256,    384,    512,    768,    /* 15 --- 19 */
85240 +       1024,   1536,   2048,   3072,   4096,   /* 20 --- 24 */
85241 +       6144,   8192,   12288,  16384,  24576,  /* 25 --- 29 */
85242 +       32768,                                  /* extra     */
85245 +/* Mapping: offset slot => how many extra bits must be read and added to the
85246 + * corresponding offset slot base to decode the match offset.
85247 + */
85248 +static const u8 lzx_extra_offset_bits[LZX_NUM_OFFSET_SLOTS] = {
85249 +       0,      0,      0,      0,      1,
85250 +       1,      2,      2,      3,      3,
85251 +       4,      4,      5,      5,      6,
85252 +       6,      7,      7,      8,      8,
85253 +       9,      9,      10,     10,     11,
85254 +       11,     12,     12,     13,     13,
85257 +/* Reusable heap-allocated memory for LZX decompression  */
85258 +struct lzx_decompressor {
85260 +       /* Huffman decoding tables, and arrays that map symbols to codeword
85261 +        * lengths
85262 +        */
85264 +       u16 maincode_decode_table[(1 << LZX_MAINCODE_TABLEBITS) +
85265 +                                       (LZX_MAINCODE_NUM_SYMBOLS * 2)];
85266 +       u8 maincode_lens[LZX_MAINCODE_NUM_SYMBOLS + LZX_READ_LENS_MAX_OVERRUN];
85269 +       u16 lencode_decode_table[(1 << LZX_LENCODE_TABLEBITS) +
85270 +                                       (LZX_LENCODE_NUM_SYMBOLS * 2)];
85271 +       u8 lencode_lens[LZX_LENCODE_NUM_SYMBOLS + LZX_READ_LENS_MAX_OVERRUN];
85274 +       u16 alignedcode_decode_table[(1 << LZX_ALIGNEDCODE_TABLEBITS) +
85275 +                                       (LZX_ALIGNEDCODE_NUM_SYMBOLS * 2)];
85276 +       u8 alignedcode_lens[LZX_ALIGNEDCODE_NUM_SYMBOLS];
85278 +       u16 precode_decode_table[(1 << LZX_PRECODE_TABLEBITS) +
85279 +                                (LZX_PRECODE_NUM_SYMBOLS * 2)];
85280 +       u8 precode_lens[LZX_PRECODE_NUM_SYMBOLS];
85282 +       /* Temporary space for make_huffman_decode_table()  */
85283 +       u16 working_space[2 * (1 + LZX_MAX_MAIN_CODEWORD_LEN) +
85284 +                         LZX_MAINCODE_NUM_SYMBOLS];
85287 +static void undo_e8_translation(void *target, s32 input_pos)
85289 +       s32 abs_offset, rel_offset;
85291 +       abs_offset = get_unaligned_le32(target);
85292 +       if (abs_offset >= 0) {
85293 +               if (abs_offset < LZX_DEFAULT_FILESIZE) {
85294 +                       /* "good translation" */
85295 +                       rel_offset = abs_offset - input_pos;
85296 +                       put_unaligned_le32(rel_offset, target);
85297 +               }
85298 +       } else {
85299 +               if (abs_offset >= -input_pos) {
85300 +                       /* "compensating translation" */
85301 +                       rel_offset = abs_offset + LZX_DEFAULT_FILESIZE;
85302 +                       put_unaligned_le32(rel_offset, target);
85303 +               }
85304 +       }
85308 + * Undo the 'E8' preprocessing used in LZX.  Before compression, the
85309 + * uncompressed data was preprocessed by changing the targets of suspected x86
85310 + * CALL instructions from relative offsets to absolute offsets.  After
85311 + * match/literal decoding, the decompressor must undo the translation.
85312 + */
85313 +static void lzx_postprocess(u8 *data, u32 size)
85315 +       /*
85316 +        * A worthwhile optimization is to push the end-of-buffer check into the
85317 +        * relatively rare E8 case.  This is possible if we replace the last six
85318 +        * bytes of data with E8 bytes; then we are guaranteed to hit an E8 byte
85319 +        * before reaching end-of-buffer.  In addition, this scheme guarantees
85320 +        * that no translation can begin following an E8 byte in the last 10
85321 +        * bytes because a 4-byte offset containing E8 as its high byte is a
85322 +        * large negative number that is not valid for translation.  That is
85323 +        * exactly what we need.
85324 +        */
85325 +       u8 *tail;
85326 +       u8 saved_bytes[6];
85327 +       u8 *p;
85329 +       if (size <= 10)
85330 +               return;
85332 +       tail = &data[size - 6];
85333 +       memcpy(saved_bytes, tail, 6);
85334 +       memset(tail, 0xE8, 6);
85335 +       p = data;
85336 +       for (;;) {
85337 +               while (*p != 0xE8)
85338 +                       p++;
85339 +               if (p >= tail)
85340 +                       break;
85341 +               undo_e8_translation(p + 1, p - data);
85342 +               p += 5;
85343 +       }
85344 +       memcpy(tail, saved_bytes, 6);
85347 +/* Read a Huffman-encoded symbol using the precode.  */
85348 +static forceinline u32 read_presym(const struct lzx_decompressor *d,
85349 +                                       struct input_bitstream *is)
85351 +       return read_huffsym(is, d->precode_decode_table,
85352 +                           LZX_PRECODE_TABLEBITS, LZX_MAX_PRE_CODEWORD_LEN);
85355 +/* Read a Huffman-encoded symbol using the main code.  */
85356 +static forceinline u32 read_mainsym(const struct lzx_decompressor *d,
85357 +                                        struct input_bitstream *is)
85359 +       return read_huffsym(is, d->maincode_decode_table,
85360 +                           LZX_MAINCODE_TABLEBITS, LZX_MAX_MAIN_CODEWORD_LEN);
85363 +/* Read a Huffman-encoded symbol using the length code.  */
85364 +static forceinline u32 read_lensym(const struct lzx_decompressor *d,
85365 +                                       struct input_bitstream *is)
85367 +       return read_huffsym(is, d->lencode_decode_table,
85368 +                           LZX_LENCODE_TABLEBITS, LZX_MAX_LEN_CODEWORD_LEN);
85371 +/* Read a Huffman-encoded symbol using the aligned offset code.  */
85372 +static forceinline u32 read_alignedsym(const struct lzx_decompressor *d,
85373 +                                           struct input_bitstream *is)
85375 +       return read_huffsym(is, d->alignedcode_decode_table,
85376 +                           LZX_ALIGNEDCODE_TABLEBITS,
85377 +                           LZX_MAX_ALIGNED_CODEWORD_LEN);
85381 + * Read the precode from the compressed input bitstream, then use it to decode
85382 + * @num_lens codeword length values.
85383 + *
85384 + * @is:                The input bitstream.
85385 + *
85386 + * @lens:      An array that contains the length values from the previous time
85387 + *             the codeword lengths for this Huffman code were read, or all 0's
85388 + *             if this is the first time.  This array must have at least
85389 + *             (@num_lens + LZX_READ_LENS_MAX_OVERRUN) entries.
85390 + *
85391 + * @num_lens:  Number of length values to decode.
85392 + *
85393 + * Returns 0 on success, or -1 if the data was invalid.
85394 + */
85395 +static int lzx_read_codeword_lens(struct lzx_decompressor *d,
85396 +                                 struct input_bitstream *is,
85397 +                                 u8 *lens, u32 num_lens)
85399 +       u8 *len_ptr = lens;
85400 +       u8 *lens_end = lens + num_lens;
85401 +       int i;
85403 +       /* Read the lengths of the precode codewords.  These are given
85404 +        * explicitly.
85405 +        */
85406 +       for (i = 0; i < LZX_PRECODE_NUM_SYMBOLS; i++) {
85407 +               d->precode_lens[i] =
85408 +                       bitstream_read_bits(is, LZX_PRECODE_ELEMENT_SIZE);
85409 +       }
85411 +       /* Make the decoding table for the precode.  */
85412 +       if (make_huffman_decode_table(d->precode_decode_table,
85413 +                                     LZX_PRECODE_NUM_SYMBOLS,
85414 +                                     LZX_PRECODE_TABLEBITS,
85415 +                                     d->precode_lens,
85416 +                                     LZX_MAX_PRE_CODEWORD_LEN,
85417 +                                     d->working_space))
85418 +               return -1;
85420 +       /* Decode the codeword lengths.  */
85421 +       do {
85422 +               u32 presym;
85423 +               u8 len;
85425 +               /* Read the next precode symbol.  */
85426 +               presym = read_presym(d, is);
85427 +               if (presym < 17) {
85428 +                       /* Difference from old length  */
85429 +                       len = *len_ptr - presym;
85430 +                       if ((s8)len < 0)
85431 +                               len += 17;
85432 +                       *len_ptr++ = len;
85433 +               } else {
85434 +                       /* Special RLE values  */
85436 +                       u32 run_len;
85438 +                       if (presym == 17) {
85439 +                               /* Run of 0's  */
85440 +                               run_len = 4 + bitstream_read_bits(is, 4);
85441 +                               len = 0;
85442 +                       } else if (presym == 18) {
85443 +                               /* Longer run of 0's  */
85444 +                               run_len = 20 + bitstream_read_bits(is, 5);
85445 +                               len = 0;
85446 +                       } else {
85447 +                               /* Run of identical lengths  */
85448 +                               run_len = 4 + bitstream_read_bits(is, 1);
85449 +                               presym = read_presym(d, is);
85450 +                               if (presym > 17)
85451 +                                       return -1;
85452 +                               len = *len_ptr - presym;
85453 +                               if ((s8)len < 0)
85454 +                                       len += 17;
85455 +                       }
85457 +                       do {
85458 +                               *len_ptr++ = len;
85459 +                       } while (--run_len);
85460 +                       /* Worst case overrun is when presym == 18,
85461 +                        * run_len == 20 + 31, and only 1 length was remaining.
85462 +                        * So LZX_READ_LENS_MAX_OVERRUN == 50.
85463 +                        *
85464 +                        * Overrun while reading the first half of maincode_lens
85465 +                        * can corrupt the previous values in the second half.
85466 +                        * This doesn't really matter because the resulting
85467 +                        * lengths will still be in range, and data that
85468 +                        * generates overruns is invalid anyway.
85469 +                        */
85470 +               }
85471 +       } while (len_ptr < lens_end);
85473 +       return 0;
85477 + * Read the header of an LZX block and save the block type and (uncompressed)
85478 + * size in *block_type_ret and *block_size_ret, respectively.
85479 + *
85480 + * If the block is compressed, also update the Huffman decode @tables with the
85481 + * new Huffman codes.  If the block is uncompressed, also update the match
85482 + * offset @queue with the new match offsets.
85483 + *
85484 + * Return 0 on success, or -1 if the data was invalid.
85485 + */
85486 +static int lzx_read_block_header(struct lzx_decompressor *d,
85487 +                                struct input_bitstream *is,
85488 +                                int *block_type_ret,
85489 +                                u32 *block_size_ret,
85490 +                                u32 recent_offsets[])
85492 +       int block_type;
85493 +       u32 block_size;
85494 +       int i;
85496 +       bitstream_ensure_bits(is, 4);
85498 +       /* The first three bits tell us what kind of block it is, and should be
85499 +        * one of the LZX_BLOCKTYPE_* values.
85500 +        */
85501 +       block_type = bitstream_pop_bits(is, 3);
85503 +       /* Read the block size.  */
85504 +       if (bitstream_pop_bits(is, 1)) {
85505 +               block_size = LZX_DEFAULT_BLOCK_SIZE;
85506 +       } else {
85507 +               block_size = 0;
85508 +               block_size |= bitstream_read_bits(is, 8);
85509 +               block_size <<= 8;
85510 +               block_size |= bitstream_read_bits(is, 8);
85511 +       }
85513 +       switch (block_type) {
85515 +       case LZX_BLOCKTYPE_ALIGNED:
85517 +               /* Read the aligned offset code and prepare its decode table.
85518 +                */
85520 +               for (i = 0; i < LZX_ALIGNEDCODE_NUM_SYMBOLS; i++) {
85521 +                       d->alignedcode_lens[i] =
85522 +                               bitstream_read_bits(is,
85523 +                                                   LZX_ALIGNEDCODE_ELEMENT_SIZE);
85524 +               }
85526 +               if (make_huffman_decode_table(d->alignedcode_decode_table,
85527 +                                             LZX_ALIGNEDCODE_NUM_SYMBOLS,
85528 +                                             LZX_ALIGNEDCODE_TABLEBITS,
85529 +                                             d->alignedcode_lens,
85530 +                                             LZX_MAX_ALIGNED_CODEWORD_LEN,
85531 +                                             d->working_space))
85532 +                       return -1;
85534 +               /* Fall though, since the rest of the header for aligned offset
85535 +                * blocks is the same as that for verbatim blocks.
85536 +                */
85537 +               fallthrough;
85539 +       case LZX_BLOCKTYPE_VERBATIM:
85541 +               /* Read the main code and prepare its decode table.
85542 +                *
85543 +                * Note that the codeword lengths in the main code are encoded
85544 +                * in two parts: one part for literal symbols, and one part for
85545 +                * match symbols.
85546 +                */
85548 +               if (lzx_read_codeword_lens(d, is, d->maincode_lens,
85549 +                                          LZX_NUM_CHARS))
85550 +                       return -1;
85552 +               if (lzx_read_codeword_lens(d, is,
85553 +                                          d->maincode_lens + LZX_NUM_CHARS,
85554 +                                          LZX_MAINCODE_NUM_SYMBOLS - LZX_NUM_CHARS))
85555 +                       return -1;
85557 +               if (make_huffman_decode_table(d->maincode_decode_table,
85558 +                                             LZX_MAINCODE_NUM_SYMBOLS,
85559 +                                             LZX_MAINCODE_TABLEBITS,
85560 +                                             d->maincode_lens,
85561 +                                             LZX_MAX_MAIN_CODEWORD_LEN,
85562 +                                             d->working_space))
85563 +                       return -1;
85565 +               /* Read the length code and prepare its decode table.  */
85567 +               if (lzx_read_codeword_lens(d, is, d->lencode_lens,
85568 +                                          LZX_LENCODE_NUM_SYMBOLS))
85569 +                       return -1;
85571 +               if (make_huffman_decode_table(d->lencode_decode_table,
85572 +                                             LZX_LENCODE_NUM_SYMBOLS,
85573 +                                             LZX_LENCODE_TABLEBITS,
85574 +                                             d->lencode_lens,
85575 +                                             LZX_MAX_LEN_CODEWORD_LEN,
85576 +                                             d->working_space))
85577 +                       return -1;
85579 +               break;
85581 +       case LZX_BLOCKTYPE_UNCOMPRESSED:
85583 +               /* Before reading the three recent offsets from the uncompressed
85584 +                * block header, the stream must be aligned on a 16-bit
85585 +                * boundary.  But if the stream is *already* aligned, then the
85586 +                * next 16 bits must be discarded.
85587 +                */
85588 +               bitstream_ensure_bits(is, 1);
85589 +               bitstream_align(is);
85591 +               recent_offsets[0] = bitstream_read_u32(is);
85592 +               recent_offsets[1] = bitstream_read_u32(is);
85593 +               recent_offsets[2] = bitstream_read_u32(is);
85595 +               /* Offsets of 0 are invalid.  */
85596 +               if (recent_offsets[0] == 0 || recent_offsets[1] == 0 ||
85597 +                   recent_offsets[2] == 0)
85598 +                       return -1;
85599 +               break;
85601 +       default:
85602 +               /* Unrecognized block type.  */
85603 +               return -1;
85604 +       }
85606 +       *block_type_ret = block_type;
85607 +       *block_size_ret = block_size;
85608 +       return 0;
85611 +/* Decompress a block of LZX-compressed data.  */
85612 +static int lzx_decompress_block(const struct lzx_decompressor *d,
85613 +                               struct input_bitstream *is,
85614 +                               int block_type, u32 block_size,
85615 +                               u8 * const out_begin, u8 *out_next,
85616 +                               u32 recent_offsets[])
85618 +       u8 * const block_end = out_next + block_size;
85619 +       u32 ones_if_aligned = 0U - (block_type == LZX_BLOCKTYPE_ALIGNED);
85621 +       do {
85622 +               u32 mainsym;
85623 +               u32 match_len;
85624 +               u32 match_offset;
85625 +               u32 offset_slot;
85626 +               u32 num_extra_bits;
85628 +               mainsym = read_mainsym(d, is);
85629 +               if (mainsym < LZX_NUM_CHARS) {
85630 +                       /* Literal  */
85631 +                       *out_next++ = mainsym;
85632 +                       continue;
85633 +               }
85635 +               /* Match  */
85637 +               /* Decode the length header and offset slot.  */
85638 +               mainsym -= LZX_NUM_CHARS;
85639 +               match_len = mainsym % LZX_NUM_LEN_HEADERS;
85640 +               offset_slot = mainsym / LZX_NUM_LEN_HEADERS;
85642 +               /* If needed, read a length symbol to decode the full length. */
85643 +               if (match_len == LZX_NUM_PRIMARY_LENS)
85644 +                       match_len += read_lensym(d, is);
85645 +               match_len += LZX_MIN_MATCH_LEN;
85647 +               if (offset_slot < LZX_NUM_RECENT_OFFSETS) {
85648 +                       /* Repeat offset  */
85650 +                       /* Note: This isn't a real LRU queue, since using the R2
85651 +                        * offset doesn't bump the R1 offset down to R2.  This
85652 +                        * quirk allows all 3 recent offsets to be handled by
85653 +                        * the same code.  (For R0, the swap is a no-op.)
85654 +                        */
85655 +                       match_offset = recent_offsets[offset_slot];
85656 +                       recent_offsets[offset_slot] = recent_offsets[0];
85657 +                       recent_offsets[0] = match_offset;
85658 +               } else {
85659 +                       /* Explicit offset  */
85661 +                       /* Look up the number of extra bits that need to be read
85662 +                        * to decode offsets with this offset slot.
85663 +                        */
85664 +                       num_extra_bits = lzx_extra_offset_bits[offset_slot];
85666 +                       /* Start with the offset slot base value.  */
85667 +                       match_offset = lzx_offset_slot_base[offset_slot];
85669 +                       /* In aligned offset blocks, the low-order 3 bits of
85670 +                        * each offset are encoded using the aligned offset
85671 +                        * code.  Otherwise, all the extra bits are literal.
85672 +                        */
85674 +                       if ((num_extra_bits & ones_if_aligned) >= LZX_NUM_ALIGNED_OFFSET_BITS) {
85675 +                               match_offset +=
85676 +                                       bitstream_read_bits(is, num_extra_bits -
85677 +                                                               LZX_NUM_ALIGNED_OFFSET_BITS)
85678 +                                                       << LZX_NUM_ALIGNED_OFFSET_BITS;
85679 +                               match_offset += read_alignedsym(d, is);
85680 +                       } else {
85681 +                               match_offset += bitstream_read_bits(is, num_extra_bits);
85682 +                       }
85684 +                       /* Adjust the offset.  */
85685 +                       match_offset -= (LZX_NUM_RECENT_OFFSETS - 1);
85687 +                       /* Update the recent offsets.  */
85688 +                       recent_offsets[2] = recent_offsets[1];
85689 +                       recent_offsets[1] = recent_offsets[0];
85690 +                       recent_offsets[0] = match_offset;
85691 +               }
85693 +               /* Validate the match, then copy it to the current position.  */
85695 +               if (match_len > (size_t)(block_end - out_next))
85696 +                       return -1;
85698 +               if (match_offset > (size_t)(out_next - out_begin))
85699 +                       return -1;
85701 +               out_next = lz_copy(out_next, match_len, match_offset,
85702 +                                  block_end, LZX_MIN_MATCH_LEN);
85704 +       } while (out_next != block_end);
85706 +       return 0;
85710 + * lzx_allocate_decompressor - Allocate an LZX decompressor
85711 + *
85712 + * Return the pointer to the decompressor on success, or return NULL and set
85713 + * errno on failure.
85714 + */
85715 +struct lzx_decompressor *lzx_allocate_decompressor(void)
85717 +       return kmalloc(sizeof(struct lzx_decompressor), GFP_NOFS);
85721 + * lzx_decompress - Decompress a buffer of LZX-compressed data
85722 + *
85723 + * @decompressor:      A decompressor allocated with lzx_allocate_decompressor()
85724 + * @compressed_data:   The buffer of data to decompress
85725 + * @compressed_size:   Number of bytes of compressed data
85726 + * @uncompressed_data: The buffer in which to store the decompressed data
85727 + * @uncompressed_size: The number of bytes the data decompresses into
85728 + *
85729 + * Return 0 on success, or return -1 and set errno on failure.
85730 + */
85731 +int lzx_decompress(struct lzx_decompressor *decompressor,
85732 +                  const void *compressed_data, size_t compressed_size,
85733 +                  void *uncompressed_data, size_t uncompressed_size)
85735 +       struct lzx_decompressor *d = decompressor;
85736 +       u8 * const out_begin = uncompressed_data;
85737 +       u8 *out_next = out_begin;
85738 +       u8 * const out_end = out_begin + uncompressed_size;
85739 +       struct input_bitstream is;
85740 +       u32 recent_offsets[LZX_NUM_RECENT_OFFSETS] = {1, 1, 1};
85741 +       int e8_status = 0;
85743 +       init_input_bitstream(&is, compressed_data, compressed_size);
85745 +       /* Codeword lengths begin as all 0's for delta encoding purposes.  */
85746 +       memset(d->maincode_lens, 0, LZX_MAINCODE_NUM_SYMBOLS);
85747 +       memset(d->lencode_lens, 0, LZX_LENCODE_NUM_SYMBOLS);
85749 +       /* Decompress blocks until we have all the uncompressed data.  */
85751 +       while (out_next != out_end) {
85752 +               int block_type;
85753 +               u32 block_size;
85755 +               if (lzx_read_block_header(d, &is, &block_type, &block_size,
85756 +                                         recent_offsets))
85757 +                       goto invalid;
85759 +               if (block_size < 1 || block_size > (size_t)(out_end - out_next))
85760 +                       goto invalid;
85762 +               if (block_type != LZX_BLOCKTYPE_UNCOMPRESSED) {
85764 +                       /* Compressed block  */
85766 +                       if (lzx_decompress_block(d,
85767 +                                                &is,
85768 +                                                block_type,
85769 +                                                block_size,
85770 +                                                out_begin,
85771 +                                                out_next,
85772 +                                                recent_offsets))
85773 +                               goto invalid;
85775 +                       e8_status |= d->maincode_lens[0xe8];
85776 +                       out_next += block_size;
85777 +               } else {
85778 +                       /* Uncompressed block  */
85780 +                       out_next = bitstream_read_bytes(&is, out_next,
85781 +                                                       block_size);
85782 +                       if (!out_next)
85783 +                               goto invalid;
85785 +                       if (block_size & 1)
85786 +                               bitstream_read_byte(&is);
85788 +                       e8_status = 1;
85789 +               }
85790 +       }
85792 +       /* Postprocess the data unless it cannot possibly contain 0xe8 bytes. */
85793 +       if (e8_status)
85794 +               lzx_postprocess(uncompressed_data, uncompressed_size);
85796 +       return 0;
85798 +invalid:
85799 +       return -1;
85803 + * lzx_free_decompressor - Free an LZX decompressor
85804 + *
85805 + * @decompressor:       A decompressor that was allocated with
85806 + *                     lzx_allocate_decompressor(), or NULL.
85807 + */
85808 +void lzx_free_decompressor(struct lzx_decompressor *decompressor)
85810 +       kfree(decompressor);
85812 diff --git a/fs/ntfs3/lib/xpress_decompress.c b/fs/ntfs3/lib/xpress_decompress.c
85813 new file mode 100644
85814 index 000000000000..3d98f36a981e
85815 --- /dev/null
85816 +++ b/fs/ntfs3/lib/xpress_decompress.c
85817 @@ -0,0 +1,155 @@
85818 +// SPDX-License-Identifier: GPL-2.0-or-later
85820 + * xpress_decompress.c - A decompressor for the XPRESS compression format
85821 + * (Huffman variant), which can be used in "System Compressed" files.  This is
85822 + * based on the code from wimlib.
85823 + *
85824 + * Copyright (C) 2015 Eric Biggers
85825 + *
85826 + * This program is free software: you can redistribute it and/or modify it under
85827 + * the terms of the GNU General Public License as published by the Free Software
85828 + * Foundation, either version 2 of the License, or (at your option) any later
85829 + * version.
85830 + *
85831 + * This program is distributed in the hope that it will be useful, but WITHOUT
85832 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
85833 + * FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
85834 + * details.
85835 + *
85836 + * You should have received a copy of the GNU General Public License along with
85837 + * this program.  If not, see <http://www.gnu.org/licenses/>.
85838 + */
85840 +#include "decompress_common.h"
85841 +#include "lib.h"
85843 +#define XPRESS_NUM_SYMBOLS     512
85844 +#define XPRESS_MAX_CODEWORD_LEN        15
85845 +#define XPRESS_MIN_MATCH_LEN   3
85847 +/* This value is chosen for fast decompression.  */
85848 +#define XPRESS_TABLEBITS 12
85850 +/* Reusable heap-allocated memory for XPRESS decompression  */
85851 +struct xpress_decompressor {
85853 +       /* The Huffman decoding table  */
85854 +       u16 decode_table[(1 << XPRESS_TABLEBITS) + 2 * XPRESS_NUM_SYMBOLS];
85856 +       /* An array that maps symbols to codeword lengths  */
85857 +       u8 lens[XPRESS_NUM_SYMBOLS];
85859 +       /* Temporary space for make_huffman_decode_table()  */
85860 +       u16 working_space[2 * (1 + XPRESS_MAX_CODEWORD_LEN) +
85861 +                         XPRESS_NUM_SYMBOLS];
85865 + * xpress_allocate_decompressor - Allocate an XPRESS decompressor
85866 + *
85867 + * Return the pointer to the decompressor on success, or return NULL and set
85868 + * errno on failure.
85869 + */
85870 +struct xpress_decompressor *xpress_allocate_decompressor(void)
85872 +       return kmalloc(sizeof(struct xpress_decompressor), GFP_NOFS);
85876 + * xpress_decompress - Decompress a buffer of XPRESS-compressed data
85877 + *
85878 + * @decompressor:       A decompressor that was allocated with
85879 + *                     xpress_allocate_decompressor()
85880 + * @compressed_data:   The buffer of data to decompress
85881 + * @compressed_size:   Number of bytes of compressed data
85882 + * @uncompressed_data: The buffer in which to store the decompressed data
85883 + * @uncompressed_size: The number of bytes the data decompresses into
85884 + *
85885 + * Return 0 on success, or return -1 and set errno on failure.
85886 + */
85887 +int xpress_decompress(struct xpress_decompressor *decompressor,
85888 +                     const void *compressed_data, size_t compressed_size,
85889 +                     void *uncompressed_data, size_t uncompressed_size)
85891 +       struct xpress_decompressor *d = decompressor;
85892 +       const u8 * const in_begin = compressed_data;
85893 +       u8 * const out_begin = uncompressed_data;
85894 +       u8 *out_next = out_begin;
85895 +       u8 * const out_end = out_begin + uncompressed_size;
85896 +       struct input_bitstream is;
85897 +       u32 i;
85899 +       /* Read the Huffman codeword lengths.  */
85900 +       if (compressed_size < XPRESS_NUM_SYMBOLS / 2)
85901 +               goto invalid;
85902 +       for (i = 0; i < XPRESS_NUM_SYMBOLS / 2; i++) {
85903 +               d->lens[i*2 + 0] = in_begin[i] & 0xF;
85904 +               d->lens[i*2 + 1] = in_begin[i] >> 4;
85905 +       }
85907 +       /* Build a decoding table for the Huffman code.  */
85908 +       if (make_huffman_decode_table(d->decode_table, XPRESS_NUM_SYMBOLS,
85909 +                                     XPRESS_TABLEBITS, d->lens,
85910 +                                     XPRESS_MAX_CODEWORD_LEN,
85911 +                                     d->working_space))
85912 +               goto invalid;
85914 +       /* Decode the matches and literals.  */
85916 +       init_input_bitstream(&is, in_begin + XPRESS_NUM_SYMBOLS / 2,
85917 +                            compressed_size - XPRESS_NUM_SYMBOLS / 2);
85919 +       while (out_next != out_end) {
85920 +               u32 sym;
85921 +               u32 log2_offset;
85922 +               u32 length;
85923 +               u32 offset;
85925 +               sym = read_huffsym(&is, d->decode_table,
85926 +                                  XPRESS_TABLEBITS, XPRESS_MAX_CODEWORD_LEN);
85927 +               if (sym < 256) {
85928 +                       /* Literal  */
85929 +                       *out_next++ = sym;
85930 +               } else {
85931 +                       /* Match  */
85932 +                       length = sym & 0xf;
85933 +                       log2_offset = (sym >> 4) & 0xf;
85935 +                       bitstream_ensure_bits(&is, 16);
85937 +                       offset = ((u32)1 << log2_offset) |
85938 +                                bitstream_pop_bits(&is, log2_offset);
85940 +                       if (length == 0xf) {
85941 +                               length += bitstream_read_byte(&is);
85942 +                               if (length == 0xf + 0xff)
85943 +                                       length = bitstream_read_u16(&is);
85944 +                       }
85945 +                       length += XPRESS_MIN_MATCH_LEN;
85947 +                       if (offset > (size_t)(out_next - out_begin))
85948 +                               goto invalid;
85950 +                       if (length > (size_t)(out_end - out_next))
85951 +                               goto invalid;
85953 +                       out_next = lz_copy(out_next, length, offset, out_end,
85954 +                                          XPRESS_MIN_MATCH_LEN);
85955 +               }
85956 +       }
85957 +       return 0;
85959 +invalid:
85960 +       return -1;
85964 + * xpress_free_decompressor - Free an XPRESS decompressor
85965 + *
85966 + * @decompressor:       A decompressor that was allocated with
85967 + *                     xpress_allocate_decompressor(), or NULL.
85968 + */
85969 +void xpress_free_decompressor(struct xpress_decompressor *decompressor)
85971 +       kfree(decompressor);
85973 diff --git a/fs/ntfs3/lznt.c b/fs/ntfs3/lznt.c
85974 new file mode 100644
85975 index 000000000000..ead9ab7d69b3
85976 --- /dev/null
85977 +++ b/fs/ntfs3/lznt.c
85978 @@ -0,0 +1,452 @@
85979 +// SPDX-License-Identifier: GPL-2.0
85981 + *
85982 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
85983 + *
85984 + */
85985 +#include <linux/blkdev.h>
85986 +#include <linux/buffer_head.h>
85987 +#include <linux/fs.h>
85988 +#include <linux/nls.h>
85990 +#include "debug.h"
85991 +#include "ntfs.h"
85992 +#include "ntfs_fs.h"
85994 +// clang-format off
85995 +/* src buffer is zero */
85996 +#define LZNT_ERROR_ALL_ZEROS   1
85997 +#define LZNT_CHUNK_SIZE                0x1000
85998 +// clang-format on
86000 +struct lznt_hash {
86001 +       const u8 *p1;
86002 +       const u8 *p2;
86005 +struct lznt {
86006 +       const u8 *unc;
86007 +       const u8 *unc_end;
86008 +       const u8 *best_match;
86009 +       size_t max_len;
86010 +       bool std;
86012 +       struct lznt_hash hash[LZNT_CHUNK_SIZE];
86015 +static inline size_t get_match_len(const u8 *ptr, const u8 *end, const u8 *prev,
86016 +                                  size_t max_len)
86018 +       size_t len = 0;
86020 +       while (ptr + len < end && ptr[len] == prev[len] && ++len < max_len)
86021 +               ;
86022 +       return len;
86025 +static size_t longest_match_std(const u8 *src, struct lznt *ctx)
86027 +       size_t hash_index;
86028 +       size_t len1 = 0, len2 = 0;
86029 +       const u8 **hash;
86031 +       hash_index =
86032 +               ((40543U * ((((src[0] << 4) ^ src[1]) << 4) ^ src[2])) >> 4) &
86033 +               (LZNT_CHUNK_SIZE - 1);
86035 +       hash = &(ctx->hash[hash_index].p1);
86037 +       if (hash[0] >= ctx->unc && hash[0] < src && hash[0][0] == src[0] &&
86038 +           hash[0][1] == src[1] && hash[0][2] == src[2]) {
86039 +               len1 = 3;
86040 +               if (ctx->max_len > 3)
86041 +                       len1 += get_match_len(src + 3, ctx->unc_end,
86042 +                                             hash[0] + 3, ctx->max_len - 3);
86043 +       }
86045 +       if (hash[1] >= ctx->unc && hash[1] < src && hash[1][0] == src[0] &&
86046 +           hash[1][1] == src[1] && hash[1][2] == src[2]) {
86047 +               len2 = 3;
86048 +               if (ctx->max_len > 3)
86049 +                       len2 += get_match_len(src + 3, ctx->unc_end,
86050 +                                             hash[1] + 3, ctx->max_len - 3);
86051 +       }
86053 +       /* Compare two matches and select the best one */
86054 +       if (len1 < len2) {
86055 +               ctx->best_match = hash[1];
86056 +               len1 = len2;
86057 +       } else {
86058 +               ctx->best_match = hash[0];
86059 +       }
86061 +       hash[1] = hash[0];
86062 +       hash[0] = src;
86063 +       return len1;
86066 +static size_t longest_match_best(const u8 *src, struct lznt *ctx)
86068 +       size_t max_len;
86069 +       const u8 *ptr;
86071 +       if (ctx->unc >= src || !ctx->max_len)
86072 +               return 0;
86074 +       max_len = 0;
86075 +       for (ptr = ctx->unc; ptr < src; ++ptr) {
86076 +               size_t len =
86077 +                       get_match_len(src, ctx->unc_end, ptr, ctx->max_len);
86078 +               if (len >= max_len) {
86079 +                       max_len = len;
86080 +                       ctx->best_match = ptr;
86081 +               }
86082 +       }
86084 +       return max_len >= 3 ? max_len : 0;
86087 +static const size_t s_max_len[] = {
86088 +       0x1002, 0x802, 0x402, 0x202, 0x102, 0x82, 0x42, 0x22, 0x12,
86091 +static const size_t s_max_off[] = {
86092 +       0x10, 0x20, 0x40, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
86095 +static inline u16 make_pair(size_t offset, size_t len, size_t index)
86097 +       return ((offset - 1) << (12 - index)) |
86098 +              ((len - 3) & (((1 << (12 - index)) - 1)));
86101 +static inline size_t parse_pair(u16 pair, size_t *offset, size_t index)
86103 +       *offset = 1 + (pair >> (12 - index));
86104 +       return 3 + (pair & ((1 << (12 - index)) - 1));
86108 + * compress_chunk
86109 + *
86110 + * returns one of the three values:
86111 + * 0 - ok, 'cmpr' contains 'cmpr_chunk_size' bytes of compressed data
86112 + * 1 - input buffer is full zero
86113 + * -2 - the compressed buffer is too small to hold the compressed data
86114 + */
86115 +static inline int compress_chunk(size_t (*match)(const u8 *, struct lznt *),
86116 +                                const u8 *unc, const u8 *unc_end, u8 *cmpr,
86117 +                                u8 *cmpr_end, size_t *cmpr_chunk_size,
86118 +                                struct lznt *ctx)
86120 +       size_t cnt = 0;
86121 +       size_t idx = 0;
86122 +       const u8 *up = unc;
86123 +       u8 *cp = cmpr + 3;
86124 +       u8 *cp2 = cmpr + 2;
86125 +       u8 not_zero = 0;
86126 +       /* Control byte of 8-bit values: ( 0 - means byte as is, 1 - short pair ) */
86127 +       u8 ohdr = 0;
86128 +       u8 *last;
86129 +       u16 t16;
86131 +       if (unc + LZNT_CHUNK_SIZE < unc_end)
86132 +               unc_end = unc + LZNT_CHUNK_SIZE;
86134 +       last = min(cmpr + LZNT_CHUNK_SIZE + sizeof(short), cmpr_end);
86136 +       ctx->unc = unc;
86137 +       ctx->unc_end = unc_end;
86138 +       ctx->max_len = s_max_len[0];
86140 +       while (up < unc_end) {
86141 +               size_t max_len;
86143 +               while (unc + s_max_off[idx] < up)
86144 +                       ctx->max_len = s_max_len[++idx];
86146 +               // Find match
86147 +               max_len = up + 3 <= unc_end ? (*match)(up, ctx) : 0;
86149 +               if (!max_len) {
86150 +                       if (cp >= last)
86151 +                               goto NotCompressed;
86152 +                       not_zero |= *cp++ = *up++;
86153 +               } else if (cp + 1 >= last) {
86154 +                       goto NotCompressed;
86155 +               } else {
86156 +                       t16 = make_pair(up - ctx->best_match, max_len, idx);
86157 +                       *cp++ = t16;
86158 +                       *cp++ = t16 >> 8;
86160 +                       ohdr |= 1 << cnt;
86161 +                       up += max_len;
86162 +               }
86164 +               cnt = (cnt + 1) & 7;
86165 +               if (!cnt) {
86166 +                       *cp2 = ohdr;
86167 +                       ohdr = 0;
86168 +                       cp2 = cp;
86169 +                       cp += 1;
86170 +               }
86171 +       }
86173 +       if (cp2 < last)
86174 +               *cp2 = ohdr;
86175 +       else
86176 +               cp -= 1;
86178 +       *cmpr_chunk_size = cp - cmpr;
86180 +       t16 = (*cmpr_chunk_size - 3) | 0xB000;
86181 +       cmpr[0] = t16;
86182 +       cmpr[1] = t16 >> 8;
86184 +       return not_zero ? 0 : LZNT_ERROR_ALL_ZEROS;
86186 +NotCompressed:
86188 +       if ((cmpr + LZNT_CHUNK_SIZE + sizeof(short)) > last)
86189 +               return -2;
86191 +       /*
86192 +        * Copy non cmpr data
86193 +        * 0x3FFF == ((LZNT_CHUNK_SIZE + 2 - 3) | 0x3000)
86194 +        */
86195 +       cmpr[0] = 0xff;
86196 +       cmpr[1] = 0x3f;
86198 +       memcpy(cmpr + sizeof(short), unc, LZNT_CHUNK_SIZE);
86199 +       *cmpr_chunk_size = LZNT_CHUNK_SIZE + sizeof(short);
86201 +       return 0;
86204 +static inline ssize_t decompress_chunk(u8 *unc, u8 *unc_end, const u8 *cmpr,
86205 +                                      const u8 *cmpr_end)
86207 +       u8 *up = unc;
86208 +       u8 ch = *cmpr++;
86209 +       size_t bit = 0;
86210 +       size_t index = 0;
86211 +       u16 pair;
86212 +       size_t offset, length;
86214 +       /* Do decompression until pointers are inside range */
86215 +       while (up < unc_end && cmpr < cmpr_end) {
86216 +               /* Correct index */
86217 +               while (unc + s_max_off[index] < up)
86218 +                       index += 1;
86220 +               /* Check the current flag for zero */
86221 +               if (!(ch & (1 << bit))) {
86222 +                       /* Just copy byte */
86223 +                       *up++ = *cmpr++;
86224 +                       goto next;
86225 +               }
86227 +               /* Check for boundary */
86228 +               if (cmpr + 1 >= cmpr_end)
86229 +                       return -EINVAL;
86231 +               /* Read a short from little endian stream */
86232 +               pair = cmpr[1];
86233 +               pair <<= 8;
86234 +               pair |= cmpr[0];
86236 +               cmpr += 2;
86238 +               /* Translate packed information into offset and length */
86239 +               length = parse_pair(pair, &offset, index);
86241 +               /* Check offset for boundary */
86242 +               if (unc + offset > up)
86243 +                       return -EINVAL;
86245 +               /* Truncate the length if necessary */
86246 +               if (up + length >= unc_end)
86247 +                       length = unc_end - up;
86249 +               /* Now we copy bytes. This is the heart of LZ algorithm. */
86250 +               for (; length > 0; length--, up++)
86251 +                       *up = *(up - offset);
86253 +next:
86254 +               /* Advance flag bit value */
86255 +               bit = (bit + 1) & 7;
86257 +               if (!bit) {
86258 +                       if (cmpr >= cmpr_end)
86259 +                               break;
86261 +                       ch = *cmpr++;
86262 +               }
86263 +       }
86265 +       /* return the size of uncompressed data */
86266 +       return up - unc;
86270 + * 0 - standard compression
86271 + * !0 - best compression, requires a lot of cpu
86272 + */
86273 +struct lznt *get_lznt_ctx(int level)
86275 +       struct lznt *r = ntfs_zalloc(level ? offsetof(struct lznt, hash)
86276 +                                          : sizeof(struct lznt));
86278 +       if (r)
86279 +               r->std = !level;
86280 +       return r;
86284 + * compress_lznt
86285 + *
86286 + * Compresses "unc" into "cmpr"
86287 + * +x - ok, 'cmpr' contains 'final_compressed_size' bytes of compressed data
86288 + * 0 - input buffer is full zero
86289 + */
86290 +size_t compress_lznt(const void *unc, size_t unc_size, void *cmpr,
86291 +                    size_t cmpr_size, struct lznt *ctx)
86293 +       int err;
86294 +       size_t (*match)(const u8 *src, struct lznt *ctx);
86295 +       u8 *p = cmpr;
86296 +       u8 *end = p + cmpr_size;
86297 +       const u8 *unc_chunk = unc;
86298 +       const u8 *unc_end = unc_chunk + unc_size;
86299 +       bool is_zero = true;
86301 +       if (ctx->std) {
86302 +               match = &longest_match_std;
86303 +               memset(ctx->hash, 0, sizeof(ctx->hash));
86304 +       } else {
86305 +               match = &longest_match_best;
86306 +       }
86308 +       /* compression cycle */
86309 +       for (; unc_chunk < unc_end; unc_chunk += LZNT_CHUNK_SIZE) {
86310 +               cmpr_size = 0;
86311 +               err = compress_chunk(match, unc_chunk, unc_end, p, end,
86312 +                                    &cmpr_size, ctx);
86313 +               if (err < 0)
86314 +                       return unc_size;
86316 +               if (is_zero && err != LZNT_ERROR_ALL_ZEROS)
86317 +                       is_zero = false;
86319 +               p += cmpr_size;
86320 +       }
86322 +       if (p <= end - 2)
86323 +               p[0] = p[1] = 0;
86325 +       return is_zero ? 0 : PtrOffset(cmpr, p);
86329 + * decompress_lznt
86330 + *
86331 + * decompresses "cmpr" into "unc"
86332 + */
86333 +ssize_t decompress_lznt(const void *cmpr, size_t cmpr_size, void *unc,
86334 +                       size_t unc_size)
86336 +       const u8 *cmpr_chunk = cmpr;
86337 +       const u8 *cmpr_end = cmpr_chunk + cmpr_size;
86338 +       u8 *unc_chunk = unc;
86339 +       u8 *unc_end = unc_chunk + unc_size;
86340 +       u16 chunk_hdr;
86342 +       if (cmpr_size < sizeof(short))
86343 +               return -EINVAL;
86345 +       /* read chunk header */
86346 +       chunk_hdr = cmpr_chunk[1];
86347 +       chunk_hdr <<= 8;
86348 +       chunk_hdr |= cmpr_chunk[0];
86350 +       /* loop through decompressing chunks */
86351 +       for (;;) {
86352 +               size_t chunk_size_saved;
86353 +               size_t unc_use;
86354 +               size_t cmpr_use = 3 + (chunk_hdr & (LZNT_CHUNK_SIZE - 1));
86356 +               /* Check that the chunk actually fits the supplied buffer */
86357 +               if (cmpr_chunk + cmpr_use > cmpr_end)
86358 +                       return -EINVAL;
86360 +               /* First make sure the chunk contains compressed data */
86361 +               if (chunk_hdr & 0x8000) {
86362 +                       /* Decompress a chunk and return if we get an error */
86363 +                       ssize_t err =
86364 +                               decompress_chunk(unc_chunk, unc_end,
86365 +                                                cmpr_chunk + sizeof(chunk_hdr),
86366 +                                                cmpr_chunk + cmpr_use);
86367 +                       if (err < 0)
86368 +                               return err;
86369 +                       unc_use = err;
86370 +               } else {
86371 +                       /* This chunk does not contain compressed data */
86372 +                       unc_use = unc_chunk + LZNT_CHUNK_SIZE > unc_end
86373 +                                         ? unc_end - unc_chunk
86374 +                                         : LZNT_CHUNK_SIZE;
86376 +                       if (cmpr_chunk + sizeof(chunk_hdr) + unc_use >
86377 +                           cmpr_end) {
86378 +                               return -EINVAL;
86379 +                       }
86381 +                       memcpy(unc_chunk, cmpr_chunk + sizeof(chunk_hdr),
86382 +                              unc_use);
86383 +               }
86385 +               /* Advance pointers */
86386 +               cmpr_chunk += cmpr_use;
86387 +               unc_chunk += unc_use;
86389 +               /* Check for the end of unc buffer */
86390 +               if (unc_chunk >= unc_end)
86391 +                       break;
86393 +               /* Proceed the next chunk */
86394 +               if (cmpr_chunk > cmpr_end - 2)
86395 +                       break;
86397 +               chunk_size_saved = LZNT_CHUNK_SIZE;
86399 +               /* read chunk header */
86400 +               chunk_hdr = cmpr_chunk[1];
86401 +               chunk_hdr <<= 8;
86402 +               chunk_hdr |= cmpr_chunk[0];
86404 +               if (!chunk_hdr)
86405 +                       break;
86407 +               /* Check the size of unc buffer */
86408 +               if (unc_use < chunk_size_saved) {
86409 +                       size_t t1 = chunk_size_saved - unc_use;
86410 +                       u8 *t2 = unc_chunk + t1;
86412 +                       /* 'Zero' memory */
86413 +                       if (t2 >= unc_end)
86414 +                               break;
86416 +                       memset(unc_chunk, 0, t1);
86417 +                       unc_chunk = t2;
86418 +               }
86419 +       }
86421 +       /* Check compression boundary */
86422 +       if (cmpr_chunk > cmpr_end)
86423 +               return -EINVAL;
86425 +       /*
86426 +        * The unc size is just a difference between current
86427 +        * pointer and original one
86428 +        */
86429 +       return PtrOffset(unc, unc_chunk);
86431 diff --git a/fs/ntfs3/namei.c b/fs/ntfs3/namei.c
86432 new file mode 100644
86433 index 000000000000..f5db12cd3b20
86434 --- /dev/null
86435 +++ b/fs/ntfs3/namei.c
86436 @@ -0,0 +1,578 @@
86437 +// SPDX-License-Identifier: GPL-2.0
86439 + *
86440 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
86441 + *
86442 + */
86444 +#include <linux/blkdev.h>
86445 +#include <linux/buffer_head.h>
86446 +#include <linux/fs.h>
86447 +#include <linux/iversion.h>
86448 +#include <linux/namei.h>
86449 +#include <linux/nls.h>
86451 +#include "debug.h"
86452 +#include "ntfs.h"
86453 +#include "ntfs_fs.h"
86456 + * fill_name_de
86457 + *
86458 + * formats NTFS_DE in 'buf'
86459 + */
86460 +int fill_name_de(struct ntfs_sb_info *sbi, void *buf, const struct qstr *name,
86461 +                const struct cpu_str *uni)
86463 +       int err;
86464 +       struct NTFS_DE *e = buf;
86465 +       u16 data_size;
86466 +       struct ATTR_FILE_NAME *fname = (struct ATTR_FILE_NAME *)(e + 1);
86468 +#ifndef CONFIG_NTFS3_64BIT_CLUSTER
86469 +       e->ref.high = fname->home.high = 0;
86470 +#endif
86471 +       if (uni) {
86472 +#ifdef __BIG_ENDIAN
86473 +               int ulen = uni->len;
86474 +               __le16 *uname = fname->name;
86475 +               const u16 *name_cpu = uni->name;
86477 +               while (ulen--)
86478 +                       *uname++ = cpu_to_le16(*name_cpu++);
86479 +#else
86480 +               memcpy(fname->name, uni->name, uni->len * sizeof(u16));
86481 +#endif
86482 +               fname->name_len = uni->len;
86484 +       } else {
86485 +               /* Convert input string to unicode */
86486 +               err = ntfs_nls_to_utf16(sbi, name->name, name->len,
86487 +                                       (struct cpu_str *)&fname->name_len,
86488 +                                       NTFS_NAME_LEN, UTF16_LITTLE_ENDIAN);
86489 +               if (err < 0)
86490 +                       return err;
86491 +       }
86493 +       fname->type = FILE_NAME_POSIX;
86494 +       data_size = fname_full_size(fname);
86496 +       e->size = cpu_to_le16(QuadAlign(data_size) + sizeof(struct NTFS_DE));
86497 +       e->key_size = cpu_to_le16(data_size);
86498 +       e->flags = 0;
86499 +       e->res = 0;
86501 +       return 0;
86505 + * ntfs_lookup
86506 + *
86507 + * inode_operations::lookup
86508 + */
86509 +static struct dentry *ntfs_lookup(struct inode *dir, struct dentry *dentry,
86510 +                                 u32 flags)
86512 +       struct ntfs_inode *ni = ntfs_i(dir);
86513 +       struct cpu_str *uni = __getname();
86514 +       struct inode *inode;
86515 +       int err;
86517 +       if (!uni)
86518 +               inode = ERR_PTR(-ENOMEM);
86519 +       else {
86520 +               err = ntfs_nls_to_utf16(ni->mi.sbi, dentry->d_name.name,
86521 +                                       dentry->d_name.len, uni, NTFS_NAME_LEN,
86522 +                                       UTF16_HOST_ENDIAN);
86523 +               if (err < 0)
86524 +                       inode = ERR_PTR(err);
86525 +               else {
86526 +                       ni_lock(ni);
86527 +                       inode = dir_search_u(dir, uni, NULL);
86528 +                       ni_unlock(ni);
86529 +               }
86530 +               __putname(uni);
86531 +       }
86533 +       return d_splice_alias(inode, dentry);
86537 + * ntfs_create
86538 + *
86539 + * inode_operations::create
86540 + */
86541 +static int ntfs_create(struct user_namespace *mnt_userns, struct inode *dir,
86542 +                      struct dentry *dentry, umode_t mode, bool excl)
86544 +       struct ntfs_inode *ni = ntfs_i(dir);
86545 +       struct inode *inode;
86547 +       ni_lock_dir(ni);
86549 +       inode = ntfs_create_inode(mnt_userns, dir, dentry, NULL, S_IFREG | mode,
86550 +                                 0, NULL, 0, excl, NULL);
86552 +       ni_unlock(ni);
86554 +       return IS_ERR(inode) ? PTR_ERR(inode) : 0;
86558 + * ntfs_link
86559 + *
86560 + * inode_operations::link
86561 + */
86562 +static int ntfs_link(struct dentry *ode, struct inode *dir, struct dentry *de)
86564 +       int err;
86565 +       struct inode *inode = d_inode(ode);
86566 +       struct ntfs_inode *ni = ntfs_i(inode);
86568 +       if (S_ISDIR(inode->i_mode))
86569 +               return -EPERM;
86571 +       if (inode->i_nlink >= NTFS_LINK_MAX)
86572 +               return -EMLINK;
86574 +       ni_lock_dir(ntfs_i(dir));
86575 +       if (inode != dir)
86576 +               ni_lock(ni);
86578 +       dir->i_ctime = dir->i_mtime = inode->i_ctime = current_time(inode);
86579 +       inc_nlink(inode);
86580 +       ihold(inode);
86582 +       err = ntfs_link_inode(inode, de);
86583 +       if (!err) {
86584 +               mark_inode_dirty(inode);
86585 +               mark_inode_dirty(dir);
86586 +               d_instantiate(de, inode);
86587 +       } else {
86588 +               drop_nlink(inode);
86589 +               iput(inode);
86590 +       }
86592 +       if (inode != dir)
86593 +               ni_unlock(ni);
86594 +       ni_unlock(ntfs_i(dir));
86596 +       return err;
86600 + * ntfs_unlink
86601 + *
86602 + * inode_operations::unlink
86603 + */
86604 +static int ntfs_unlink(struct inode *dir, struct dentry *dentry)
86606 +       struct ntfs_inode *ni = ntfs_i(dir);
86607 +       int err;
86609 +       ni_lock_dir(ni);
86611 +       err = ntfs_unlink_inode(dir, dentry);
86613 +       ni_unlock(ni);
86615 +       return err;
86619 + * ntfs_symlink
86620 + *
86621 + * inode_operations::symlink
86622 + */
86623 +static int ntfs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
86624 +                       struct dentry *dentry, const char *symname)
86626 +       u32 size = strlen(symname);
86627 +       struct inode *inode;
86628 +       struct ntfs_inode *ni = ntfs_i(dir);
86630 +       ni_lock_dir(ni);
86632 +       inode = ntfs_create_inode(mnt_userns, dir, dentry, NULL, S_IFLNK | 0777,
86633 +                                 0, symname, size, 0, NULL);
86635 +       ni_unlock(ni);
86637 +       return IS_ERR(inode) ? PTR_ERR(inode) : 0;
86641 + * ntfs_mkdir
86642 + *
86643 + * inode_operations::mkdir
86644 + */
86645 +static int ntfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
86646 +                     struct dentry *dentry, umode_t mode)
86648 +       struct inode *inode;
86649 +       struct ntfs_inode *ni = ntfs_i(dir);
86651 +       ni_lock_dir(ni);
86653 +       inode = ntfs_create_inode(mnt_userns, dir, dentry, NULL, S_IFDIR | mode,
86654 +                                 0, NULL, -1, 0, NULL);
86656 +       ni_unlock(ni);
86658 +       return IS_ERR(inode) ? PTR_ERR(inode) : 0;
86662 + * ntfs_rmdir
86663 + *
86664 + * inode_operations::rm_dir
86665 + */
86666 +static int ntfs_rmdir(struct inode *dir, struct dentry *dentry)
86668 +       struct ntfs_inode *ni = ntfs_i(dir);
86669 +       int err;
86671 +       ni_lock_dir(ni);
86673 +       err = ntfs_unlink_inode(dir, dentry);
86675 +       ni_unlock(ni);
86677 +       return err;
86681 + * ntfs_rename
86682 + *
86683 + * inode_operations::rename
86684 + */
86685 +static int ntfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
86686 +                      struct dentry *old_dentry, struct inode *new_dir,
86687 +                      struct dentry *new_dentry, u32 flags)
86689 +       int err;
86690 +       struct super_block *sb = old_dir->i_sb;
86691 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
86692 +       struct ntfs_inode *old_dir_ni = ntfs_i(old_dir);
86693 +       struct ntfs_inode *new_dir_ni = ntfs_i(new_dir);
86694 +       struct ntfs_inode *old_ni;
86695 +       struct ATTR_FILE_NAME *old_name, *new_name, *fname;
86696 +       u8 name_type;
86697 +       bool is_same;
86698 +       struct inode *old_inode, *new_inode;
86699 +       struct NTFS_DE *old_de, *new_de;
86700 +       struct ATTRIB *attr;
86701 +       struct ATTR_LIST_ENTRY *le;
86702 +       u16 new_de_key_size;
86704 +       static_assert(SIZEOF_ATTRIBUTE_FILENAME_MAX + SIZEOF_RESIDENT < 1024);
86705 +       static_assert(SIZEOF_ATTRIBUTE_FILENAME_MAX + sizeof(struct NTFS_DE) <
86706 +                     1024);
86707 +       static_assert(PATH_MAX >= 4 * 1024);
86709 +       if (flags & ~RENAME_NOREPLACE)
86710 +               return -EINVAL;
86712 +       old_inode = d_inode(old_dentry);
86713 +       new_inode = d_inode(new_dentry);
86715 +       old_ni = ntfs_i(old_inode);
86717 +       is_same = old_dentry->d_name.len == new_dentry->d_name.len &&
86718 +                 !memcmp(old_dentry->d_name.name, new_dentry->d_name.name,
86719 +                         old_dentry->d_name.len);
86721 +       if (is_same && old_dir == new_dir) {
86722 +               /* Nothing to do */
86723 +               err = 0;
86724 +               goto out;
86725 +       }
86727 +       if (ntfs_is_meta_file(sbi, old_inode->i_ino)) {
86728 +               err = -EINVAL;
86729 +               goto out;
86730 +       }
86732 +       if (new_inode) {
86733 +               /*target name exists. unlink it*/
86734 +               dget(new_dentry);
86735 +               ni_lock_dir(new_dir_ni);
86736 +               err = ntfs_unlink_inode(new_dir, new_dentry);
86737 +               ni_unlock(new_dir_ni);
86738 +               dput(new_dentry);
86739 +               if (err)
86740 +                       goto out;
86741 +       }
86743 +       /* allocate PATH_MAX bytes */
86744 +       old_de = __getname();
86745 +       if (!old_de) {
86746 +               err = -ENOMEM;
86747 +               goto out;
86748 +       }
86750 +       err = fill_name_de(sbi, old_de, &old_dentry->d_name, NULL);
86751 +       if (err < 0)
86752 +               goto out1;
86754 +       old_name = (struct ATTR_FILE_NAME *)(old_de + 1);
86756 +       if (is_same) {
86757 +               new_de = old_de;
86758 +       } else {
86759 +               new_de = Add2Ptr(old_de, 1024);
86760 +               err = fill_name_de(sbi, new_de, &new_dentry->d_name, NULL);
86761 +               if (err < 0)
86762 +                       goto out1;
86763 +       }
86765 +       ni_lock_dir(old_dir_ni);
86766 +       ni_lock(old_ni);
86768 +       mi_get_ref(&old_dir_ni->mi, &old_name->home);
86770 +       /*get pointer to file_name in mft*/
86771 +       fname = ni_fname_name(old_ni, (struct cpu_str *)&old_name->name_len,
86772 +                             &old_name->home, &le);
86773 +       if (!fname) {
86774 +               err = -EINVAL;
86775 +               goto out2;
86776 +       }
86778 +       /* Copy fname info from record into new fname */
86779 +       new_name = (struct ATTR_FILE_NAME *)(new_de + 1);
86780 +       memcpy(&new_name->dup, &fname->dup, sizeof(fname->dup));
86782 +       name_type = paired_name(fname->type);
86784 +       /* remove first name from directory */
86785 +       err = indx_delete_entry(&old_dir_ni->dir, old_dir_ni, old_de + 1,
86786 +                               le16_to_cpu(old_de->key_size), sbi);
86787 +       if (err)
86788 +               goto out3;
86790 +       /* remove first name from mft */
86791 +       err = ni_remove_attr_le(old_ni, attr_from_name(fname), le);
86792 +       if (err)
86793 +               goto out4;
86795 +       le16_add_cpu(&old_ni->mi.mrec->hard_links, -1);
86796 +       old_ni->mi.dirty = true;
86798 +       if (name_type != FILE_NAME_POSIX) {
86799 +               /* get paired name */
86800 +               fname = ni_fname_type(old_ni, name_type, &le);
86801 +               if (fname) {
86802 +                       /* remove second name from directory */
86803 +                       err = indx_delete_entry(&old_dir_ni->dir, old_dir_ni,
86804 +                                               fname, fname_full_size(fname),
86805 +                                               sbi);
86806 +                       if (err)
86807 +                               goto out5;
86809 +                       /* remove second name from mft */
86810 +                       err = ni_remove_attr_le(old_ni, attr_from_name(fname),
86811 +                                               le);
86812 +                       if (err)
86813 +                               goto out6;
86815 +                       le16_add_cpu(&old_ni->mi.mrec->hard_links, -1);
86816 +                       old_ni->mi.dirty = true;
86817 +               }
86818 +       }
86820 +       /* Add new name */
86821 +       mi_get_ref(&old_ni->mi, &new_de->ref);
86822 +       mi_get_ref(&ntfs_i(new_dir)->mi, &new_name->home);
86824 +       new_de_key_size = le16_to_cpu(new_de->key_size);
86826 +       /* insert new name in mft */
86827 +       err = ni_insert_resident(old_ni, new_de_key_size, ATTR_NAME, NULL, 0,
86828 +                                &attr, NULL);
86829 +       if (err)
86830 +               goto out7;
86832 +       attr->res.flags = RESIDENT_FLAG_INDEXED;
86834 +       memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), new_name, new_de_key_size);
86836 +       le16_add_cpu(&old_ni->mi.mrec->hard_links, 1);
86837 +       old_ni->mi.dirty = true;
86839 +       /* insert new name in directory */
86840 +       err = indx_insert_entry(&new_dir_ni->dir, new_dir_ni, new_de, sbi,
86841 +                               NULL);
86842 +       if (err)
86843 +               goto out8;
86845 +       if (IS_DIRSYNC(new_dir))
86846 +               err = ntfs_sync_inode(old_inode);
86847 +       else
86848 +               mark_inode_dirty(old_inode);
86850 +       old_dir->i_ctime = old_dir->i_mtime = current_time(old_dir);
86851 +       if (IS_DIRSYNC(old_dir))
86852 +               (void)ntfs_sync_inode(old_dir);
86853 +       else
86854 +               mark_inode_dirty(old_dir);
86856 +       if (old_dir != new_dir) {
86857 +               new_dir->i_mtime = new_dir->i_ctime = old_dir->i_ctime;
86858 +               mark_inode_dirty(new_dir);
86859 +       }
86861 +       if (old_inode) {
86862 +               old_inode->i_ctime = old_dir->i_ctime;
86863 +               mark_inode_dirty(old_inode);
86864 +       }
86866 +       err = 0;
86867 +       /* normal way */
86868 +       goto out2;
86870 +out8:
86871 +       /* undo
86872 +        * ni_insert_resident(old_ni, new_de_key_size, ATTR_NAME, NULL, 0,
86873 +        *                       &attr, NULL);
86874 +        */
86875 +       mi_remove_attr(&old_ni->mi, attr);
86876 +out7:
86877 +       /* undo
86878 +        * ni_remove_attr_le(old_ni, attr_from_name(fname), le);
86879 +        */
86880 +out6:
86881 +       /* undo
86882 +        * indx_delete_entry(&old_dir_ni->dir, old_dir_ni,
86883 +        *                                      fname, fname_full_size(fname),
86884 +        *                                      sbi);
86885 +        */
86886 +out5:
86887 +       /* undo
86888 +        * ni_remove_attr_le(old_ni, attr_from_name(fname), le);
86889 +        */
86890 +out4:
86891 +       /* undo:
86892 +        * indx_delete_entry(&old_dir_ni->dir, old_dir_ni, old_de + 1,
86893 +        *                      old_de->key_size, NULL);
86894 +        */
86895 +out3:
86896 +out2:
86897 +       ni_unlock(old_ni);
86898 +       ni_unlock(old_dir_ni);
86899 +out1:
86900 +       __putname(old_de);
86901 +out:
86902 +       return err;
86906 + * ntfs_atomic_open
86907 + *
86908 + * inode_operations::atomic_open
86909 + */
86910 +static int ntfs_atomic_open(struct inode *dir, struct dentry *dentry,
86911 +                           struct file *file, u32 flags, umode_t mode)
86913 +       int err;
86914 +       bool excl = !!(flags & O_EXCL);
86915 +       struct inode *inode;
86916 +       struct ntfs_fnd *fnd = NULL;
86917 +       struct ntfs_inode *ni = ntfs_i(dir);
86918 +       struct dentry *d = NULL;
86919 +       struct cpu_str *uni = __getname();
86921 +       if (!uni)
86922 +               return -ENOMEM;
86924 +       err = ntfs_nls_to_utf16(ni->mi.sbi, dentry->d_name.name,
86925 +                               dentry->d_name.len, uni, NTFS_NAME_LEN,
86926 +                               UTF16_HOST_ENDIAN);
86927 +       if (err < 0)
86928 +               goto out;
86930 +       ni_lock_dir(ni);
86932 +       if (d_in_lookup(dentry)) {
86933 +               fnd = fnd_get();
86934 +               if (!fnd) {
86935 +                       err = -ENOMEM;
86936 +                       goto out1;
86937 +               }
86939 +               d = d_splice_alias(dir_search_u(dir, uni, fnd), dentry);
86940 +               if (IS_ERR(d)) {
86941 +                       err = PTR_ERR(d);
86942 +                       d = NULL;
86943 +                       goto out2;
86944 +               }
86946 +               if (d)
86947 +                       dentry = d;
86948 +       }
86950 +       if (!(flags & O_CREAT) || d_really_is_positive(dentry)) {
86951 +               err = finish_no_open(file, d);
86952 +               goto out2;
86953 +       }
86955 +       file->f_mode |= FMODE_CREATED;
86957 +       /*fnd contains tree's path to insert to*/
86958 +       /* TODO: init_user_ns? */
86959 +       inode = ntfs_create_inode(&init_user_ns, dir, dentry, uni, mode, 0,
86960 +                                 NULL, 0, excl, fnd);
86961 +       err = IS_ERR(inode) ? PTR_ERR(inode)
86962 +                           : finish_open(file, dentry, ntfs_file_open);
86963 +       dput(d);
86965 +out2:
86966 +       fnd_put(fnd);
86967 +out1:
86968 +       ni_unlock(ni);
86969 +out:
86970 +       __putname(uni);
86972 +       return err;
86975 +struct dentry *ntfs3_get_parent(struct dentry *child)
86977 +       struct inode *inode = d_inode(child);
86978 +       struct ntfs_inode *ni = ntfs_i(inode);
86980 +       struct ATTR_LIST_ENTRY *le = NULL;
86981 +       struct ATTRIB *attr = NULL;
86982 +       struct ATTR_FILE_NAME *fname;
86984 +       while ((attr = ni_find_attr(ni, attr, &le, ATTR_NAME, NULL, 0, NULL,
86985 +                                   NULL))) {
86986 +               fname = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME);
86987 +               if (!fname)
86988 +                       continue;
86990 +               return d_obtain_alias(
86991 +                       ntfs_iget5(inode->i_sb, &fname->home, NULL));
86992 +       }
86994 +       return ERR_PTR(-ENOENT);
86997 +const struct inode_operations ntfs_dir_inode_operations = {
86998 +       .lookup = ntfs_lookup,
86999 +       .create = ntfs_create,
87000 +       .link = ntfs_link,
87001 +       .unlink = ntfs_unlink,
87002 +       .symlink = ntfs_symlink,
87003 +       .mkdir = ntfs_mkdir,
87004 +       .rmdir = ntfs_rmdir,
87005 +       .rename = ntfs_rename,
87006 +       .permission = ntfs_permission,
87007 +       .get_acl = ntfs_get_acl,
87008 +       .set_acl = ntfs_set_acl,
87009 +       .setattr = ntfs3_setattr,
87010 +       .getattr = ntfs_getattr,
87011 +       .listxattr = ntfs_listxattr,
87012 +       .atomic_open = ntfs_atomic_open,
87013 +       .fiemap = ntfs_fiemap,
87015 diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h
87016 new file mode 100644
87017 index 000000000000..40398e6c39c9
87018 --- /dev/null
87019 +++ b/fs/ntfs3/ntfs.h
87020 @@ -0,0 +1,1238 @@
87021 +/* SPDX-License-Identifier: GPL-2.0 */
87023 + *
87024 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
87025 + *
87026 + * on-disk ntfs structs
87027 + */
87029 +// clang-format off
87031 +/* TODO:
87032 + * - Check 4K mft record and 512 bytes cluster
87033 + */
87036 + * Activate this define to use binary search in indexes
87037 + */
87038 +#define NTFS3_INDEX_BINARY_SEARCH
87041 + * Check each run for marked clusters
87042 + */
87043 +#define NTFS3_CHECK_FREE_CLST
87045 +#define NTFS_NAME_LEN 255
87048 + * ntfs.sys used 500 maximum links
87049 + * on-disk struct allows up to 0xffff
87050 + */
87051 +#define NTFS_LINK_MAX 0x400
87052 +//#define NTFS_LINK_MAX 0xffff
87055 + * Activate to use 64 bit clusters instead of 32 bits in ntfs.sys
87056 + * Logical and virtual cluster number
87057 + * If needed, may be redefined to use 64 bit value
87058 + */
87059 +//#define CONFIG_NTFS3_64BIT_CLUSTER
87061 +#define NTFS_LZNT_MAX_CLUSTER  4096
87062 +#define NTFS_LZNT_CUNIT                4
87063 +#define NTFS_LZNT_CLUSTERS     (1u<<NTFS_LZNT_CUNIT)
87065 +struct GUID {
87066 +       __le32 Data1;
87067 +       __le16 Data2;
87068 +       __le16 Data3;
87069 +       u8 Data4[8];
87073 + * this struct repeats layout of ATTR_FILE_NAME
87074 + * at offset 0x40
87075 + * it used to store global constants NAME_MFT/NAME_MIRROR...
87076 + * most constant names are shorter than 10
87077 + */
87078 +struct cpu_str {
87079 +       u8 len;
87080 +       u8 unused;
87081 +       u16 name[10];
87084 +struct le_str {
87085 +       u8 len;
87086 +       u8 unused;
87087 +       __le16 name[];
87090 +static_assert(SECTOR_SHIFT == 9);
87092 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
87093 +typedef u64 CLST;
87094 +static_assert(sizeof(size_t) == 8);
87095 +#else
87096 +typedef u32 CLST;
87097 +#endif
87099 +#define SPARSE_LCN64   ((u64)-1)
87100 +#define SPARSE_LCN     ((CLST)-1)
87101 +#define RESIDENT_LCN   ((CLST)-2)
87102 +#define COMPRESSED_LCN ((CLST)-3)
87104 +#define COMPRESSION_UNIT     4
87105 +#define COMPRESS_MAX_CLUSTER 0x1000
87106 +#define MFT_INCREASE_CHUNK   1024
87108 +enum RECORD_NUM {
87109 +       MFT_REC_MFT             = 0,
87110 +       MFT_REC_MIRR            = 1,
87111 +       MFT_REC_LOG             = 2,
87112 +       MFT_REC_VOL             = 3,
87113 +       MFT_REC_ATTR            = 4,
87114 +       MFT_REC_ROOT            = 5,
87115 +       MFT_REC_BITMAP          = 6,
87116 +       MFT_REC_BOOT            = 7,
87117 +       MFT_REC_BADCLUST        = 8,
87118 +       //MFT_REC_QUOTA         = 9,
87119 +       MFT_REC_SECURE          = 9, // NTFS 3.0
87120 +       MFT_REC_UPCASE          = 10,
87121 +       MFT_REC_EXTEND          = 11, // NTFS 3.0
87122 +       MFT_REC_RESERVED        = 11,
87123 +       MFT_REC_FREE            = 16,
87124 +       MFT_REC_USER            = 24,
87127 +enum ATTR_TYPE {
87128 +       ATTR_ZERO               = cpu_to_le32(0x00),
87129 +       ATTR_STD                = cpu_to_le32(0x10),
87130 +       ATTR_LIST               = cpu_to_le32(0x20),
87131 +       ATTR_NAME               = cpu_to_le32(0x30),
87132 +       // ATTR_VOLUME_VERSION on Nt4
87133 +       ATTR_ID                 = cpu_to_le32(0x40),
87134 +       ATTR_SECURE             = cpu_to_le32(0x50),
87135 +       ATTR_LABEL              = cpu_to_le32(0x60),
87136 +       ATTR_VOL_INFO           = cpu_to_le32(0x70),
87137 +       ATTR_DATA               = cpu_to_le32(0x80),
87138 +       ATTR_ROOT               = cpu_to_le32(0x90),
87139 +       ATTR_ALLOC              = cpu_to_le32(0xA0),
87140 +       ATTR_BITMAP             = cpu_to_le32(0xB0),
87141 +       // ATTR_SYMLINK on Nt4
87142 +       ATTR_REPARSE            = cpu_to_le32(0xC0),
87143 +       ATTR_EA_INFO            = cpu_to_le32(0xD0),
87144 +       ATTR_EA                 = cpu_to_le32(0xE0),
87145 +       ATTR_PROPERTYSET        = cpu_to_le32(0xF0),
87146 +       ATTR_LOGGED_UTILITY_STREAM = cpu_to_le32(0x100),
87147 +       ATTR_END                = cpu_to_le32(0xFFFFFFFF)
87150 +static_assert(sizeof(enum ATTR_TYPE) == 4);
87152 +enum FILE_ATTRIBUTE {
87153 +       FILE_ATTRIBUTE_READONLY         = cpu_to_le32(0x00000001),
87154 +       FILE_ATTRIBUTE_HIDDEN           = cpu_to_le32(0x00000002),
87155 +       FILE_ATTRIBUTE_SYSTEM           = cpu_to_le32(0x00000004),
87156 +       FILE_ATTRIBUTE_ARCHIVE          = cpu_to_le32(0x00000020),
87157 +       FILE_ATTRIBUTE_DEVICE           = cpu_to_le32(0x00000040),
87158 +       FILE_ATTRIBUTE_TEMPORARY        = cpu_to_le32(0x00000100),
87159 +       FILE_ATTRIBUTE_SPARSE_FILE      = cpu_to_le32(0x00000200),
87160 +       FILE_ATTRIBUTE_REPARSE_POINT    = cpu_to_le32(0x00000400),
87161 +       FILE_ATTRIBUTE_COMPRESSED       = cpu_to_le32(0x00000800),
87162 +       FILE_ATTRIBUTE_OFFLINE          = cpu_to_le32(0x00001000),
87163 +       FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = cpu_to_le32(0x00002000),
87164 +       FILE_ATTRIBUTE_ENCRYPTED        = cpu_to_le32(0x00004000),
87165 +       FILE_ATTRIBUTE_VALID_FLAGS      = cpu_to_le32(0x00007fb7),
87166 +       FILE_ATTRIBUTE_DIRECTORY        = cpu_to_le32(0x10000000),
87169 +static_assert(sizeof(enum FILE_ATTRIBUTE) == 4);
87171 +extern const struct cpu_str NAME_MFT;
87172 +extern const struct cpu_str NAME_MIRROR;
87173 +extern const struct cpu_str NAME_LOGFILE;
87174 +extern const struct cpu_str NAME_VOLUME;
87175 +extern const struct cpu_str NAME_ATTRDEF;
87176 +extern const struct cpu_str NAME_ROOT;
87177 +extern const struct cpu_str NAME_BITMAP;
87178 +extern const struct cpu_str NAME_BOOT;
87179 +extern const struct cpu_str NAME_BADCLUS;
87180 +extern const struct cpu_str NAME_QUOTA;
87181 +extern const struct cpu_str NAME_SECURE;
87182 +extern const struct cpu_str NAME_UPCASE;
87183 +extern const struct cpu_str NAME_EXTEND;
87184 +extern const struct cpu_str NAME_OBJID;
87185 +extern const struct cpu_str NAME_REPARSE;
87186 +extern const struct cpu_str NAME_USNJRNL;
87188 +extern const __le16 I30_NAME[4];
87189 +extern const __le16 SII_NAME[4];
87190 +extern const __le16 SDH_NAME[4];
87191 +extern const __le16 SO_NAME[2];
87192 +extern const __le16 SQ_NAME[2];
87193 +extern const __le16 SR_NAME[2];
87195 +extern const __le16 BAD_NAME[4];
87196 +extern const __le16 SDS_NAME[4];
87197 +extern const __le16 WOF_NAME[17];      /* WofCompressedData */
87199 +/* MFT record number structure */
87200 +struct MFT_REF {
87201 +       __le32 low;     // The low part of the number
87202 +       __le16 high;    // The high part of the number
87203 +       __le16 seq;     // The sequence number of MFT record
87206 +static_assert(sizeof(__le64) == sizeof(struct MFT_REF));
87208 +static inline CLST ino_get(const struct MFT_REF *ref)
87210 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
87211 +       return le32_to_cpu(ref->low) | ((u64)le16_to_cpu(ref->high) << 32);
87212 +#else
87213 +       return le32_to_cpu(ref->low);
87214 +#endif
87217 +struct NTFS_BOOT {
87218 +       u8 jump_code[3];        // 0x00: Jump to boot code
87219 +       u8 system_id[8];        // 0x03: System ID, equals "NTFS    "
87221 +       // NOTE: this member is not aligned(!)
87222 +       // bytes_per_sector[0] must be 0
87223 +       // bytes_per_sector[1] must be multiplied by 256
87224 +       u8 bytes_per_sector[2]; // 0x0B: Bytes per sector
87226 +       u8 sectors_per_clusters;// 0x0D: Sectors per cluster
87227 +       u8 unused1[7];
87228 +       u8 media_type;          // 0x15: Media type (0xF8 - harddisk)
87229 +       u8 unused2[2];
87230 +       __le16 sct_per_track;   // 0x18: number of sectors per track
87231 +       __le16 heads;           // 0x1A: number of heads per cylinder
87232 +       __le32 hidden_sectors;  // 0x1C: number of 'hidden' sectors
87233 +       u8 unused3[4];
87234 +       u8 bios_drive_num;      // 0x24: BIOS drive number =0x80
87235 +       u8 unused4;
87236 +       u8 signature_ex;        // 0x26: Extended BOOT signature =0x80
87237 +       u8 unused5;
87238 +       __le64 sectors_per_volume;// 0x28: size of volume in sectors
87239 +       __le64 mft_clst;        // 0x30: first cluster of $MFT
87240 +       __le64 mft2_clst;       // 0x38: first cluster of $MFTMirr
87241 +       s8 record_size;         // 0x40: size of MFT record in clusters(sectors)
87242 +       u8 unused6[3];
87243 +       s8 index_size;          // 0x44: size of INDX record in clusters(sectors)
87244 +       u8 unused7[3];
87245 +       __le64 serial_num;      // 0x48: Volume serial number
87246 +       __le32 check_sum;       // 0x50: Simple additive checksum of all
87247 +                               // of the u32's which precede the 'check_sum'
87249 +       u8 boot_code[0x200 - 0x50 - 2 - 4]; // 0x54:
87250 +       u8 boot_magic[2];       // 0x1FE: Boot signature =0x55 + 0xAA
87253 +static_assert(sizeof(struct NTFS_BOOT) == 0x200);
87255 +enum NTFS_SIGNATURE {
87256 +       NTFS_FILE_SIGNATURE = cpu_to_le32(0x454C4946), // 'FILE'
87257 +       NTFS_INDX_SIGNATURE = cpu_to_le32(0x58444E49), // 'INDX'
87258 +       NTFS_CHKD_SIGNATURE = cpu_to_le32(0x444B4843), // 'CHKD'
87259 +       NTFS_RSTR_SIGNATURE = cpu_to_le32(0x52545352), // 'RSTR'
87260 +       NTFS_RCRD_SIGNATURE = cpu_to_le32(0x44524352), // 'RCRD'
87261 +       NTFS_BAAD_SIGNATURE = cpu_to_le32(0x44414142), // 'BAAD'
87262 +       NTFS_HOLE_SIGNATURE = cpu_to_le32(0x454C4F48), // 'HOLE'
87263 +       NTFS_FFFF_SIGNATURE = cpu_to_le32(0xffffffff),
87266 +static_assert(sizeof(enum NTFS_SIGNATURE) == 4);
87268 +/* MFT Record header structure */
87269 +struct NTFS_RECORD_HEADER {
87270 +       /* Record magic number, equals 'FILE'/'INDX'/'RSTR'/'RCRD' */
87271 +       enum NTFS_SIGNATURE sign; // 0x00:
87272 +       __le16 fix_off;         // 0x04:
87273 +       __le16 fix_num;         // 0x06:
87274 +       __le64 lsn;             // 0x08: Log file sequence number
87277 +static_assert(sizeof(struct NTFS_RECORD_HEADER) == 0x10);
87279 +static inline int is_baad(const struct NTFS_RECORD_HEADER *hdr)
87281 +       return hdr->sign == NTFS_BAAD_SIGNATURE;
87284 +/* Possible bits in struct MFT_REC.flags */
87285 +enum RECORD_FLAG {
87286 +       RECORD_FLAG_IN_USE      = cpu_to_le16(0x0001),
87287 +       RECORD_FLAG_DIR         = cpu_to_le16(0x0002),
87288 +       RECORD_FLAG_SYSTEM      = cpu_to_le16(0x0004),
87289 +       RECORD_FLAG_UNKNOWN     = cpu_to_le16(0x0008),
87292 +/* MFT Record structure */
87293 +struct MFT_REC {
87294 +       struct NTFS_RECORD_HEADER rhdr; // 'FILE'
87296 +       __le16 seq;             // 0x10: Sequence number for this record
87297 +       __le16 hard_links;      // 0x12: The number of hard links to record
87298 +       __le16 attr_off;        // 0x14: Offset to attributes
87299 +       __le16 flags;           // 0x16: See RECORD_FLAG
87300 +       __le32 used;            // 0x18: The size of used part
87301 +       __le32 total;           // 0x1C: Total record size
87303 +       struct MFT_REF parent_ref; // 0x20: Parent MFT record
87304 +       __le16 next_attr_id;    // 0x28: The next attribute Id
87306 +       __le16 res;             // 0x2A: High part of mft record?
87307 +       __le32 mft_record;      // 0x2C: Current mft record number
87308 +       __le16 fixups[];        // 0x30:
87311 +#define MFTRECORD_FIXUP_OFFSET_1 offsetof(struct MFT_REC, res)
87312 +#define MFTRECORD_FIXUP_OFFSET_3 offsetof(struct MFT_REC, fixups)
87314 +static_assert(MFTRECORD_FIXUP_OFFSET_1 == 0x2A);
87315 +static_assert(MFTRECORD_FIXUP_OFFSET_3 == 0x30);
87317 +static inline bool is_rec_base(const struct MFT_REC *rec)
87319 +       const struct MFT_REF *r = &rec->parent_ref;
87321 +       return !r->low && !r->high && !r->seq;
87324 +static inline bool is_mft_rec5(const struct MFT_REC *rec)
87326 +       return le16_to_cpu(rec->rhdr.fix_off) >=
87327 +              offsetof(struct MFT_REC, fixups);
87330 +static inline bool is_rec_inuse(const struct MFT_REC *rec)
87332 +       return rec->flags & RECORD_FLAG_IN_USE;
87335 +static inline bool clear_rec_inuse(struct MFT_REC *rec)
87337 +       return rec->flags &= ~RECORD_FLAG_IN_USE;
87340 +/* Possible values of ATTR_RESIDENT.flags */
87341 +#define RESIDENT_FLAG_INDEXED 0x01
87343 +struct ATTR_RESIDENT {
87344 +       __le32 data_size;       // 0x10: The size of data
87345 +       __le16 data_off;        // 0x14: Offset to data
87346 +       u8 flags;               // 0x16: resident flags ( 1 - indexed )
87347 +       u8 res;                 // 0x17:
87348 +}; // sizeof() = 0x18
87350 +struct ATTR_NONRESIDENT {
87351 +       __le64 svcn;            // 0x10: Starting VCN of this segment
87352 +       __le64 evcn;            // 0x18: End VCN of this segment
87353 +       __le16 run_off;         // 0x20: Offset to packed runs
87354 +       //  Unit of Compression size for this stream, expressed
87355 +       //  as a log of the cluster size.
87356 +       //
87357 +       //      0 means file is not compressed
87358 +       //      1, 2, 3, and 4 are potentially legal values if the
87359 +       //          stream is compressed, however the implementation
87360 +       //          may only choose to use 4, or possibly 3.  Note
87361 +       //          that 4 means cluster size time 16.  If convenient
87362 +       //          the implementation may wish to accept a
87363 +       //          reasonable range of legal values here (1-5?),
87364 +       //          even if the implementation only generates
87365 +       //          a smaller set of values itself.
87366 +       u8 c_unit;              // 0x22
87367 +       u8 res1[5];             // 0x23:
87368 +       __le64 alloc_size;      // 0x28: The allocated size of attribute in bytes
87369 +                               // (multiple of cluster size)
87370 +       __le64 data_size;       // 0x30: The size of attribute  in bytes <= alloc_size
87371 +       __le64 valid_size;      // 0x38: The size of valid part in bytes <= data_size
87372 +       __le64 total_size;      // 0x40: The sum of the allocated clusters for a file
87373 +                               // (present only for the first segment (0 == vcn)
87374 +                               // of compressed attribute)
87376 +}; // sizeof()=0x40 or 0x48 (if compressed)
87378 +/* Possible values of ATTRIB.flags: */
87379 +#define ATTR_FLAG_COMPRESSED     cpu_to_le16(0x0001)
87380 +#define ATTR_FLAG_COMPRESSED_MASK cpu_to_le16(0x00FF)
87381 +#define ATTR_FLAG_ENCRYPTED      cpu_to_le16(0x4000)
87382 +#define ATTR_FLAG_SPARSED        cpu_to_le16(0x8000)
87384 +struct ATTRIB {
87385 +       enum ATTR_TYPE type;    // 0x00: The type of this attribute
87386 +       __le32 size;            // 0x04: The size of this attribute
87387 +       u8 non_res;             // 0x08: Is this attribute non-resident ?
87388 +       u8 name_len;            // 0x09: This attribute name length
87389 +       __le16 name_off;        // 0x0A: Offset to the attribute name
87390 +       __le16 flags;           // 0x0C: See ATTR_FLAG_XXX
87391 +       __le16 id;              // 0x0E: unique id (per record)
87393 +       union {
87394 +               struct ATTR_RESIDENT res;     // 0x10
87395 +               struct ATTR_NONRESIDENT nres; // 0x10
87396 +       };
87399 +/* Define attribute sizes */
87400 +#define SIZEOF_RESIDENT                        0x18
87401 +#define SIZEOF_NONRESIDENT_EX          0x48
87402 +#define SIZEOF_NONRESIDENT             0x40
87404 +#define SIZEOF_RESIDENT_LE             cpu_to_le16(0x18)
87405 +#define SIZEOF_NONRESIDENT_EX_LE       cpu_to_le16(0x48)
87406 +#define SIZEOF_NONRESIDENT_LE          cpu_to_le16(0x40)
87408 +static inline u64 attr_ondisk_size(const struct ATTRIB *attr)
87410 +       return attr->non_res ? ((attr->flags &
87411 +                                (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED)) ?
87412 +                                       le64_to_cpu(attr->nres.total_size) :
87413 +                                       le64_to_cpu(attr->nres.alloc_size)) :
87414 +                              QuadAlign(le32_to_cpu(attr->res.data_size));
87417 +static inline u64 attr_size(const struct ATTRIB *attr)
87419 +       return attr->non_res ? le64_to_cpu(attr->nres.data_size) :
87420 +                              le32_to_cpu(attr->res.data_size);
87423 +static inline bool is_attr_encrypted(const struct ATTRIB *attr)
87425 +       return attr->flags & ATTR_FLAG_ENCRYPTED;
87428 +static inline bool is_attr_sparsed(const struct ATTRIB *attr)
87430 +       return attr->flags & ATTR_FLAG_SPARSED;
87433 +static inline bool is_attr_compressed(const struct ATTRIB *attr)
87435 +       return attr->flags & ATTR_FLAG_COMPRESSED;
87438 +static inline bool is_attr_ext(const struct ATTRIB *attr)
87440 +       return attr->flags & (ATTR_FLAG_SPARSED | ATTR_FLAG_COMPRESSED);
87443 +static inline bool is_attr_indexed(const struct ATTRIB *attr)
87445 +       return !attr->non_res && (attr->res.flags & RESIDENT_FLAG_INDEXED);
87448 +static inline __le16 const *attr_name(const struct ATTRIB *attr)
87450 +       return Add2Ptr(attr, le16_to_cpu(attr->name_off));
87453 +static inline u64 attr_svcn(const struct ATTRIB *attr)
87455 +       return attr->non_res ? le64_to_cpu(attr->nres.svcn) : 0;
87458 +/* the size of resident attribute by its resident size */
87459 +#define BYTES_PER_RESIDENT(b) (0x18 + (b))
87461 +static_assert(sizeof(struct ATTRIB) == 0x48);
87462 +static_assert(sizeof(((struct ATTRIB *)NULL)->res) == 0x08);
87463 +static_assert(sizeof(((struct ATTRIB *)NULL)->nres) == 0x38);
87465 +static inline void *resident_data_ex(const struct ATTRIB *attr, u32 datasize)
87467 +       u32 asize, rsize;
87468 +       u16 off;
87470 +       if (attr->non_res)
87471 +               return NULL;
87473 +       asize = le32_to_cpu(attr->size);
87474 +       off = le16_to_cpu(attr->res.data_off);
87476 +       if (asize < datasize + off)
87477 +               return NULL;
87479 +       rsize = le32_to_cpu(attr->res.data_size);
87480 +       if (rsize < datasize)
87481 +               return NULL;
87483 +       return Add2Ptr(attr, off);
87486 +static inline void *resident_data(const struct ATTRIB *attr)
87488 +       return Add2Ptr(attr, le16_to_cpu(attr->res.data_off));
87491 +static inline void *attr_run(const struct ATTRIB *attr)
87493 +       return Add2Ptr(attr, le16_to_cpu(attr->nres.run_off));
87496 +/* Standard information attribute (0x10) */
87497 +struct ATTR_STD_INFO {
87498 +       __le64 cr_time;         // 0x00: File creation file
87499 +       __le64 m_time;          // 0x08: File modification time
87500 +       __le64 c_time;          // 0x10: Last time any attribute was modified
87501 +       __le64 a_time;          // 0x18: File last access time
87502 +       enum FILE_ATTRIBUTE fa; // 0x20: Standard DOS attributes & more
87503 +       __le32 max_ver_num;     // 0x24: Maximum Number of Versions
87504 +       __le32 ver_num;         // 0x28: Version Number
87505 +       __le32 class_id;        // 0x2C: Class Id from bidirectional Class Id index
87508 +static_assert(sizeof(struct ATTR_STD_INFO) == 0x30);
87510 +#define SECURITY_ID_INVALID 0x00000000
87511 +#define SECURITY_ID_FIRST 0x00000100
87513 +struct ATTR_STD_INFO5 {
87514 +       __le64 cr_time;         // 0x00: File creation file
87515 +       __le64 m_time;          // 0x08: File modification time
87516 +       __le64 c_time;          // 0x10: Last time any attribute was modified
87517 +       __le64 a_time;          // 0x18: File last access time
87518 +       enum FILE_ATTRIBUTE fa; // 0x20: Standard DOS attributes & more
87519 +       __le32 max_ver_num;     // 0x24: Maximum Number of Versions
87520 +       __le32 ver_num;         // 0x28: Version Number
87521 +       __le32 class_id;        // 0x2C: Class Id from bidirectional Class Id index
87523 +       __le32 owner_id;        // 0x30: Owner Id of the user owning the file.
87524 +       __le32 security_id;     // 0x34: The Security Id is a key in the $SII Index and $SDS
87525 +       __le64 quota_charge;    // 0x38:
87526 +       __le64 usn;             // 0x40: Last Update Sequence Number of the file. This is a direct
87527 +                               // index into the file $UsnJrnl. If zero, the USN Journal is
87528 +                               // disabled.
87531 +static_assert(sizeof(struct ATTR_STD_INFO5) == 0x48);
87533 +/* attribute list entry structure (0x20) */
87534 +struct ATTR_LIST_ENTRY {
87535 +       enum ATTR_TYPE type;    // 0x00: The type of attribute
87536 +       __le16 size;            // 0x04: The size of this record
87537 +       u8 name_len;            // 0x06: The length of attribute name
87538 +       u8 name_off;            // 0x07: The offset to attribute name
87539 +       __le64 vcn;             // 0x08: Starting VCN of this attribute
87540 +       struct MFT_REF ref;     // 0x10: MFT record number with attribute
87541 +       __le16 id;              // 0x18: struct ATTRIB ID
87542 +       __le16 name[3];         // 0x1A: Just to align. To get real name can use bNameOffset
87544 +}; // sizeof(0x20)
87546 +static_assert(sizeof(struct ATTR_LIST_ENTRY) == 0x20);
87548 +static inline u32 le_size(u8 name_len)
87550 +       return QuadAlign(offsetof(struct ATTR_LIST_ENTRY, name) +
87551 +                        name_len * sizeof(short));
87554 +/* returns 0 if 'attr' has the same type and name */
87555 +static inline int le_cmp(const struct ATTR_LIST_ENTRY *le,
87556 +                        const struct ATTRIB *attr)
87558 +       return le->type != attr->type || le->name_len != attr->name_len ||
87559 +              (!le->name_len &&
87560 +               memcmp(Add2Ptr(le, le->name_off),
87561 +                      Add2Ptr(attr, le16_to_cpu(attr->name_off)),
87562 +                      le->name_len * sizeof(short)));
87565 +static inline __le16 const *le_name(const struct ATTR_LIST_ENTRY *le)
87567 +       return Add2Ptr(le, le->name_off);
87570 +/* File name types (the field type in struct ATTR_FILE_NAME ) */
87571 +#define FILE_NAME_POSIX   0
87572 +#define FILE_NAME_UNICODE 1
87573 +#define FILE_NAME_DOS    2
87574 +#define FILE_NAME_UNICODE_AND_DOS (FILE_NAME_DOS | FILE_NAME_UNICODE)
87576 +/* Filename attribute structure (0x30) */
87577 +struct NTFS_DUP_INFO {
87578 +       __le64 cr_time;         // 0x00: File creation file
87579 +       __le64 m_time;          // 0x08: File modification time
87580 +       __le64 c_time;          // 0x10: Last time any attribute was modified
87581 +       __le64 a_time;          // 0x18: File last access time
87582 +       __le64 alloc_size;      // 0x20: Data attribute allocated size, multiple of cluster size
87583 +       __le64 data_size;       // 0x28: Data attribute size <= Dataalloc_size
87584 +       enum FILE_ATTRIBUTE fa; // 0x30: Standard DOS attributes & more
87585 +       __le16 ea_size;         // 0x34: Packed EAs
87586 +       __le16 reparse;         // 0x36: Used by Reparse
87588 +}; // 0x38
87590 +struct ATTR_FILE_NAME {
87591 +       struct MFT_REF home;    // 0x00: MFT record for directory
87592 +       struct NTFS_DUP_INFO dup;// 0x08
87593 +       u8 name_len;            // 0x40: File name length in words
87594 +       u8 type;                // 0x41: File name type
87595 +       __le16 name[];          // 0x42: File name
87598 +static_assert(sizeof(((struct ATTR_FILE_NAME *)NULL)->dup) == 0x38);
87599 +static_assert(offsetof(struct ATTR_FILE_NAME, name) == 0x42);
87600 +#define SIZEOF_ATTRIBUTE_FILENAME     0x44
87601 +#define SIZEOF_ATTRIBUTE_FILENAME_MAX (0x42 + 255 * 2)
87603 +static inline struct ATTRIB *attr_from_name(struct ATTR_FILE_NAME *fname)
87605 +       return (struct ATTRIB *)((char *)fname - SIZEOF_RESIDENT);
87608 +static inline u16 fname_full_size(const struct ATTR_FILE_NAME *fname)
87610 +       // don't return struct_size(fname, name, fname->name_len);
87611 +       return offsetof(struct ATTR_FILE_NAME, name) +
87612 +              fname->name_len * sizeof(short);
87615 +static inline u8 paired_name(u8 type)
87617 +       if (type == FILE_NAME_UNICODE)
87618 +               return FILE_NAME_DOS;
87619 +       if (type == FILE_NAME_DOS)
87620 +               return FILE_NAME_UNICODE;
87621 +       return FILE_NAME_POSIX;
87624 +/* Index entry defines ( the field flags in NtfsDirEntry ) */
87625 +#define NTFS_IE_HAS_SUBNODES   cpu_to_le16(1)
87626 +#define NTFS_IE_LAST           cpu_to_le16(2)
87628 +/* Directory entry structure */
87629 +struct NTFS_DE {
87630 +       union {
87631 +               struct MFT_REF ref; // 0x00: MFT record number with this file
87632 +               struct {
87633 +                       __le16 data_off;  // 0x00:
87634 +                       __le16 data_size; // 0x02:
87635 +                       __le32 res;       // 0x04: must be 0
87636 +               } view;
87637 +       };
87638 +       __le16 size;            // 0x08: The size of this entry
87639 +       __le16 key_size;        // 0x0A: The size of File name length in bytes + 0x42
87640 +       __le16 flags;           // 0x0C: Entry flags: NTFS_IE_XXX
87641 +       __le16 res;             // 0x0E:
87643 +       // Here any indexed attribute can be placed
87644 +       // One of them is:
87645 +       // struct ATTR_FILE_NAME AttrFileName;
87646 +       //
87648 +       // The last 8 bytes of this structure contains
87649 +       // the VBN of subnode
87650 +       // !!! Note !!!
87651 +       // This field is presented only if (flags & NTFS_IE_HAS_SUBNODES)
87652 +       // __le64 vbn;
87655 +static_assert(sizeof(struct NTFS_DE) == 0x10);
87657 +static inline void de_set_vbn_le(struct NTFS_DE *e, __le64 vcn)
87659 +       __le64 *v = Add2Ptr(e, le16_to_cpu(e->size) - sizeof(__le64));
87661 +       *v = vcn;
87664 +static inline void de_set_vbn(struct NTFS_DE *e, CLST vcn)
87666 +       __le64 *v = Add2Ptr(e, le16_to_cpu(e->size) - sizeof(__le64));
87668 +       *v = cpu_to_le64(vcn);
87671 +static inline __le64 de_get_vbn_le(const struct NTFS_DE *e)
87673 +       return *(__le64 *)Add2Ptr(e, le16_to_cpu(e->size) - sizeof(__le64));
87676 +static inline CLST de_get_vbn(const struct NTFS_DE *e)
87678 +       __le64 *v = Add2Ptr(e, le16_to_cpu(e->size) - sizeof(__le64));
87680 +       return le64_to_cpu(*v);
87683 +static inline struct NTFS_DE *de_get_next(const struct NTFS_DE *e)
87685 +       return Add2Ptr(e, le16_to_cpu(e->size));
87688 +static inline struct ATTR_FILE_NAME *de_get_fname(const struct NTFS_DE *e)
87690 +       return le16_to_cpu(e->key_size) >= SIZEOF_ATTRIBUTE_FILENAME ?
87691 +                      Add2Ptr(e, sizeof(struct NTFS_DE)) :
87692 +                      NULL;
87695 +static inline bool de_is_last(const struct NTFS_DE *e)
87697 +       return e->flags & NTFS_IE_LAST;
87700 +static inline bool de_has_vcn(const struct NTFS_DE *e)
87702 +       return e->flags & NTFS_IE_HAS_SUBNODES;
87705 +static inline bool de_has_vcn_ex(const struct NTFS_DE *e)
87707 +       return (e->flags & NTFS_IE_HAS_SUBNODES) &&
87708 +              (u64)(-1) != *((u64 *)Add2Ptr(e, le16_to_cpu(e->size) -
87709 +                                                       sizeof(__le64)));
87712 +#define MAX_BYTES_PER_NAME_ENTRY                                              \
87713 +       QuadAlign(sizeof(struct NTFS_DE) +                                     \
87714 +                 offsetof(struct ATTR_FILE_NAME, name) +                      \
87715 +                 NTFS_NAME_LEN * sizeof(short))
87717 +struct INDEX_HDR {
87718 +       __le32 de_off;  // 0x00: The offset from the start of this structure
87719 +                       // to the first NTFS_DE
87720 +       __le32 used;    // 0x04: The size of this structure plus all
87721 +                       // entries (quad-word aligned)
87722 +       __le32 total;   // 0x08: The allocated size of for this structure plus all entries
87723 +       u8 flags;       // 0x0C: 0x00 = Small directory, 0x01 = Large directory
87724 +       u8 res[3];
87726 +       //
87727 +       // de_off + used <= total
87728 +       //
87731 +static_assert(sizeof(struct INDEX_HDR) == 0x10);
87733 +static inline struct NTFS_DE *hdr_first_de(const struct INDEX_HDR *hdr)
87735 +       u32 de_off = le32_to_cpu(hdr->de_off);
87736 +       u32 used = le32_to_cpu(hdr->used);
87737 +       struct NTFS_DE *e = Add2Ptr(hdr, de_off);
87738 +       u16 esize;
87740 +       if (de_off >= used || de_off >= le32_to_cpu(hdr->total))
87741 +               return NULL;
87743 +       esize = le16_to_cpu(e->size);
87744 +       if (esize < sizeof(struct NTFS_DE) || de_off + esize > used)
87745 +               return NULL;
87747 +       return e;
87750 +static inline struct NTFS_DE *hdr_next_de(const struct INDEX_HDR *hdr,
87751 +                                         const struct NTFS_DE *e)
87753 +       size_t off = PtrOffset(hdr, e);
87754 +       u32 used = le32_to_cpu(hdr->used);
87755 +       u16 esize;
87757 +       if (off >= used)
87758 +               return NULL;
87760 +       esize = le16_to_cpu(e->size);
87762 +       if (esize < sizeof(struct NTFS_DE) ||
87763 +           off + esize + sizeof(struct NTFS_DE) > used)
87764 +               return NULL;
87766 +       return Add2Ptr(e, esize);
87769 +static inline bool hdr_has_subnode(const struct INDEX_HDR *hdr)
87771 +       return hdr->flags & 1;
87774 +struct INDEX_BUFFER {
87775 +       struct NTFS_RECORD_HEADER rhdr; // 'INDX'
87776 +       __le64 vbn; // 0x10: vcn if index >= cluster or vsn id index < cluster
87777 +       struct INDEX_HDR ihdr; // 0x18:
87780 +static_assert(sizeof(struct INDEX_BUFFER) == 0x28);
87782 +static inline bool ib_is_empty(const struct INDEX_BUFFER *ib)
87784 +       const struct NTFS_DE *first = hdr_first_de(&ib->ihdr);
87786 +       return !first || de_is_last(first);
87789 +static inline bool ib_is_leaf(const struct INDEX_BUFFER *ib)
87791 +       return !(ib->ihdr.flags & 1);
87794 +/* Index root structure ( 0x90 ) */
87795 +enum COLLATION_RULE {
87796 +       NTFS_COLLATION_TYPE_BINARY      = cpu_to_le32(0),
87797 +       // $I30
87798 +       NTFS_COLLATION_TYPE_FILENAME    = cpu_to_le32(0x01),
87799 +       // $SII of $Secure and $Q of Quota
87800 +       NTFS_COLLATION_TYPE_UINT        = cpu_to_le32(0x10),
87801 +       // $O of Quota
87802 +       NTFS_COLLATION_TYPE_SID         = cpu_to_le32(0x11),
87803 +       // $SDH of $Secure
87804 +       NTFS_COLLATION_TYPE_SECURITY_HASH = cpu_to_le32(0x12),
87805 +       // $O of ObjId and "$R" for Reparse
87806 +       NTFS_COLLATION_TYPE_UINTS       = cpu_to_le32(0x13)
87809 +static_assert(sizeof(enum COLLATION_RULE) == 4);
87812 +struct INDEX_ROOT {
87813 +       enum ATTR_TYPE type;    // 0x00: The type of attribute to index on
87814 +       enum COLLATION_RULE rule; // 0x04: The rule
87815 +       __le32 index_block_size;// 0x08: The size of index record
87816 +       u8 index_block_clst;    // 0x0C: The number of clusters or sectors per index
87817 +       u8 res[3];
87818 +       struct INDEX_HDR ihdr;  // 0x10:
87821 +static_assert(sizeof(struct INDEX_ROOT) == 0x20);
87822 +static_assert(offsetof(struct INDEX_ROOT, ihdr) == 0x10);
87824 +#define VOLUME_FLAG_DIRTY          cpu_to_le16(0x0001)
87825 +#define VOLUME_FLAG_RESIZE_LOG_FILE cpu_to_le16(0x0002)
87827 +struct VOLUME_INFO {
87828 +       __le64 res1;    // 0x00
87829 +       u8 major_ver;   // 0x08: NTFS major version number (before .)
87830 +       u8 minor_ver;   // 0x09: NTFS minor version number (after .)
87831 +       __le16 flags;   // 0x0A: Volume flags, see VOLUME_FLAG_XXX
87833 +}; // sizeof=0xC
87835 +#define SIZEOF_ATTRIBUTE_VOLUME_INFO 0xc
87837 +#define NTFS_LABEL_MAX_LENGTH          (0x100 / sizeof(short))
87838 +#define NTFS_ATTR_INDEXABLE            cpu_to_le32(0x00000002)
87839 +#define NTFS_ATTR_DUPALLOWED           cpu_to_le32(0x00000004)
87840 +#define NTFS_ATTR_MUST_BE_INDEXED      cpu_to_le32(0x00000010)
87841 +#define NTFS_ATTR_MUST_BE_NAMED                cpu_to_le32(0x00000020)
87842 +#define NTFS_ATTR_MUST_BE_RESIDENT     cpu_to_le32(0x00000040)
87843 +#define NTFS_ATTR_LOG_ALWAYS           cpu_to_le32(0x00000080)
87845 +/* $AttrDef file entry */
87846 +struct ATTR_DEF_ENTRY {
87847 +       __le16 name[0x40];      // 0x00: Attr name
87848 +       enum ATTR_TYPE type;    // 0x80: struct ATTRIB type
87849 +       __le32 res;             // 0x84:
87850 +       enum COLLATION_RULE rule; // 0x88:
87851 +       __le32 flags;           // 0x8C: NTFS_ATTR_XXX (see above)
87852 +       __le64 min_sz;          // 0x90: Minimum attribute data size
87853 +       __le64 max_sz;          // 0x98: Maximum attribute data size
87856 +static_assert(sizeof(struct ATTR_DEF_ENTRY) == 0xa0);
87858 +/* Object ID (0x40) */
87859 +struct OBJECT_ID {
87860 +       struct GUID ObjId;      // 0x00: Unique Id assigned to file
87861 +       struct GUID BirthVolumeId;// 0x10: Birth Volume Id is the Object Id of the Volume on
87862 +                               // which the Object Id was allocated. It never changes
87863 +       struct GUID BirthObjectId; // 0x20: Birth Object Id is the first Object Id that was
87864 +                               // ever assigned to this MFT Record. I.e. If the Object Id
87865 +                               // is changed for some reason, this field will reflect the
87866 +                               // original value of the Object Id.
87867 +       struct GUID DomainId;   // 0x30: Domain Id is currently unused but it is intended to be
87868 +                               // used in a network environment where the local machine is
87869 +                               // part of a Windows 2000 Domain. This may be used in a Windows
87870 +                               // 2000 Advanced Server managed domain.
87873 +static_assert(sizeof(struct OBJECT_ID) == 0x40);
87875 +/* O Directory entry structure ( rule = 0x13 ) */
87876 +struct NTFS_DE_O {
87877 +       struct NTFS_DE de;
87878 +       struct GUID ObjId;      // 0x10: Unique Id assigned to file
87879 +       struct MFT_REF ref;     // 0x20: MFT record number with this file
87880 +       struct GUID BirthVolumeId; // 0x28: Birth Volume Id is the Object Id of the Volume on
87881 +                               // which the Object Id was allocated. It never changes
87882 +       struct GUID BirthObjectId; // 0x38: Birth Object Id is the first Object Id that was
87883 +                               // ever assigned to this MFT Record. I.e. If the Object Id
87884 +                               // is changed for some reason, this field will reflect the
87885 +                               // original value of the Object Id.
87886 +                               // This field is valid if data_size == 0x48
87887 +       struct GUID BirthDomainId; // 0x48: Domain Id is currently unused but it is intended
87888 +                               // to be used in a network environment where the local
87889 +                               // machine is part of a Windows 2000 Domain. This may be
87890 +                               // used in a Windows 2000 Advanced Server managed domain.
87893 +static_assert(sizeof(struct NTFS_DE_O) == 0x58);
87895 +#define NTFS_OBJECT_ENTRY_DATA_SIZE1                                          \
87896 +       0x38 // struct NTFS_DE_O.BirthDomainId is not used
87897 +#define NTFS_OBJECT_ENTRY_DATA_SIZE2                                          \
87898 +       0x48 // struct NTFS_DE_O.BirthDomainId is used
87900 +/* Q Directory entry structure ( rule = 0x11 ) */
87901 +struct NTFS_DE_Q {
87902 +       struct NTFS_DE de;
87903 +       __le32 owner_id;        // 0x10: Unique Id assigned to file
87904 +       __le32 Version;         // 0x14: 0x02
87905 +       __le32 flags2;          // 0x18: Quota flags, see above
87906 +       __le64 BytesUsed;       // 0x1C:
87907 +       __le64 ChangeTime;      // 0x24:
87908 +       __le64 WarningLimit;    // 0x28:
87909 +       __le64 HardLimit;       // 0x34:
87910 +       __le64 ExceededTime;    // 0x3C:
87912 +       // SID is placed here
87913 +}; // sizeof() = 0x44
87915 +#define SIZEOF_NTFS_DE_Q 0x44
87917 +#define SecurityDescriptorsBlockSize 0x40000 // 256K
87918 +#define SecurityDescriptorMaxSize    0x20000 // 128K
87919 +#define Log2OfSecurityDescriptorsBlockSize 18
87921 +struct SECURITY_KEY {
87922 +       __le32 hash; //  Hash value for descriptor
87923 +       __le32 sec_id; //  Security Id (guaranteed unique)
87926 +/* Security descriptors (the content of $Secure::SDS data stream) */
87927 +struct SECURITY_HDR {
87928 +       struct SECURITY_KEY key;        // 0x00: Security Key
87929 +       __le64 off;                     // 0x08: Offset of this entry in the file
87930 +       __le32 size;                    // 0x10: Size of this entry, 8 byte aligned
87931 +       //
87932 +       // Security descriptor itself is placed here
87933 +       // Total size is 16 byte aligned
87934 +       //
87935 +} __packed;
87937 +#define SIZEOF_SECURITY_HDR 0x14
87939 +/* SII Directory entry structure */
87940 +struct NTFS_DE_SII {
87941 +       struct NTFS_DE de;
87942 +       __le32 sec_id;                  // 0x10: Key: sizeof(security_id) = wKeySize
87943 +       struct SECURITY_HDR sec_hdr;    // 0x14:
87944 +} __packed;
87946 +#define SIZEOF_SII_DIRENTRY 0x28
87948 +/* SDH Directory entry structure */
87949 +struct NTFS_DE_SDH {
87950 +       struct NTFS_DE de;
87951 +       struct SECURITY_KEY key;        // 0x10: Key
87952 +       struct SECURITY_HDR sec_hdr;    // 0x18: Data
87953 +       __le16 magic[2];                // 0x2C: 0x00490049 "I I"
87956 +#define SIZEOF_SDH_DIRENTRY 0x30
87958 +struct REPARSE_KEY {
87959 +       __le32 ReparseTag;              // 0x00: Reparse Tag
87960 +       struct MFT_REF ref;             // 0x04: MFT record number with this file
87961 +}; // sizeof() = 0x0C
87963 +static_assert(offsetof(struct REPARSE_KEY, ref) == 0x04);
87964 +#define SIZEOF_REPARSE_KEY 0x0C
87966 +/* Reparse Directory entry structure */
87967 +struct NTFS_DE_R {
87968 +       struct NTFS_DE de;
87969 +       struct REPARSE_KEY key;         // 0x10: Reparse Key
87970 +       u32 zero;                       // 0x1c
87971 +}; // sizeof() = 0x20
87973 +static_assert(sizeof(struct NTFS_DE_R) == 0x20);
87975 +/* CompressReparseBuffer.WofVersion */
87976 +#define WOF_CURRENT_VERSION            cpu_to_le32(1)
87977 +/* CompressReparseBuffer.WofProvider */
87978 +#define WOF_PROVIDER_WIM               cpu_to_le32(1)
87979 +/* CompressReparseBuffer.WofProvider */
87980 +#define WOF_PROVIDER_SYSTEM            cpu_to_le32(2)
87981 +/* CompressReparseBuffer.ProviderVer */
87982 +#define WOF_PROVIDER_CURRENT_VERSION   cpu_to_le32(1)
87984 +#define WOF_COMPRESSION_XPRESS4K       cpu_to_le32(0) // 4k
87985 +#define WOF_COMPRESSION_LZX32K         cpu_to_le32(1) // 32k
87986 +#define WOF_COMPRESSION_XPRESS8K       cpu_to_le32(2) // 8k
87987 +#define WOF_COMPRESSION_XPRESS16K      cpu_to_le32(3) // 16k
87990 + * ATTR_REPARSE (0xC0)
87991 + *
87992 + * The reparse struct GUID structure is used by all 3rd party layered drivers to
87993 + * store data in a reparse point. For non-Microsoft tags, The struct GUID field
87994 + * cannot be GUID_NULL.
87995 + * The constraints on reparse tags are defined below.
87996 + * Microsoft tags can also be used with this format of the reparse point buffer.
87997 + */
87998 +struct REPARSE_POINT {
87999 +       __le32 ReparseTag;      // 0x00:
88000 +       __le16 ReparseDataLength;// 0x04:
88001 +       __le16 Reserved;
88003 +       struct GUID Guid;       // 0x08:
88005 +       //
88006 +       // Here GenericReparseBuffer is placed
88007 +       //
88010 +static_assert(sizeof(struct REPARSE_POINT) == 0x18);
88013 +// Maximum allowed size of the reparse data.
88015 +#define MAXIMUM_REPARSE_DATA_BUFFER_SIZE       (16 * 1024)
88018 +// The value of the following constant needs to satisfy the following
88019 +// conditions:
88020 +//  (1) Be at least as large as the largest of the reserved tags.
88021 +//  (2) Be strictly smaller than all the tags in use.
88023 +#define IO_REPARSE_TAG_RESERVED_RANGE          1
88026 +// The reparse tags are a ULONG. The 32 bits are laid out as follows:
88028 +//   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
88029 +//   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
88030 +//  +-+-+-+-+-----------------------+-------------------------------+
88031 +//  |M|R|N|R|    Reserved bits     |       Reparse Tag Value       |
88032 +//  +-+-+-+-+-----------------------+-------------------------------+
88034 +// M is the Microsoft bit. When set to 1, it denotes a tag owned by Microsoft.
88035 +//   All ISVs must use a tag with a 0 in this position.
88036 +//   Note: If a Microsoft tag is used by non-Microsoft software, the
88037 +//   behavior is not defined.
88039 +// R is reserved.  Must be zero for non-Microsoft tags.
88041 +// N is name surrogate. When set to 1, the file represents another named
88042 +//   entity in the system.
88044 +// The M and N bits are OR-able.
88045 +// The following macros check for the M and N bit values:
88049 +// Macro to determine whether a reparse point tag corresponds to a tag
88050 +// owned by Microsoft.
88052 +#define IsReparseTagMicrosoft(_tag)    (((_tag)&IO_REPARSE_TAG_MICROSOFT))
88055 +// Macro to determine whether a reparse point tag is a name surrogate
88057 +#define IsReparseTagNameSurrogate(_tag)        (((_tag)&IO_REPARSE_TAG_NAME_SURROGATE))
88060 +// The following constant represents the bits that are valid to use in
88061 +// reparse tags.
88063 +#define IO_REPARSE_TAG_VALID_VALUES    0xF000FFFF
88066 +// Macro to determine whether a reparse tag is a valid tag.
88068 +#define IsReparseTagValid(_tag)                                                       \
88069 +       (!((_tag) & ~IO_REPARSE_TAG_VALID_VALUES) &&                           \
88070 +        ((_tag) > IO_REPARSE_TAG_RESERVED_RANGE))
88073 +// Microsoft tags for reparse points.
88076 +enum IO_REPARSE_TAG {
88077 +       IO_REPARSE_TAG_SYMBOLIC_LINK    = cpu_to_le32(0),
88078 +       IO_REPARSE_TAG_NAME_SURROGATE   = cpu_to_le32(0x20000000),
88079 +       IO_REPARSE_TAG_MICROSOFT        = cpu_to_le32(0x80000000),
88080 +       IO_REPARSE_TAG_MOUNT_POINT      = cpu_to_le32(0xA0000003),
88081 +       IO_REPARSE_TAG_SYMLINK          = cpu_to_le32(0xA000000C),
88082 +       IO_REPARSE_TAG_HSM              = cpu_to_le32(0xC0000004),
88083 +       IO_REPARSE_TAG_SIS              = cpu_to_le32(0x80000007),
88084 +       IO_REPARSE_TAG_DEDUP            = cpu_to_le32(0x80000013),
88085 +       IO_REPARSE_TAG_COMPRESS         = cpu_to_le32(0x80000017),
88087 +       //
88088 +       // The reparse tag 0x80000008 is reserved for Microsoft internal use
88089 +       // (may be published in the future)
88090 +       //
88092 +       //
88093 +       // Microsoft reparse tag reserved for DFS
88094 +       //
88095 +       IO_REPARSE_TAG_DFS              = cpu_to_le32(0x8000000A),
88097 +       //
88098 +       // Microsoft reparse tag reserved for the file system filter manager
88099 +       //
88100 +       IO_REPARSE_TAG_FILTER_MANAGER   = cpu_to_le32(0x8000000B),
88102 +       //
88103 +       // Non-Microsoft tags for reparse points
88104 +       //
88106 +       //
88107 +       // Tag allocated to CONGRUENT, May 2000. Used by IFSTEST
88108 +       //
88109 +       IO_REPARSE_TAG_IFSTEST_CONGRUENT = cpu_to_le32(0x00000009),
88111 +       //
88112 +       // Tag allocated to ARKIVIO
88113 +       //
88114 +       IO_REPARSE_TAG_ARKIVIO          = cpu_to_le32(0x0000000C),
88116 +       //
88117 +       //  Tag allocated to SOLUTIONSOFT
88118 +       //
88119 +       IO_REPARSE_TAG_SOLUTIONSOFT     = cpu_to_le32(0x2000000D),
88121 +       //
88122 +       //  Tag allocated to COMMVAULT
88123 +       //
88124 +       IO_REPARSE_TAG_COMMVAULT        = cpu_to_le32(0x0000000E),
88126 +       // OneDrive??
88127 +       IO_REPARSE_TAG_CLOUD            = cpu_to_le32(0x9000001A),
88128 +       IO_REPARSE_TAG_CLOUD_1          = cpu_to_le32(0x9000101A),
88129 +       IO_REPARSE_TAG_CLOUD_2          = cpu_to_le32(0x9000201A),
88130 +       IO_REPARSE_TAG_CLOUD_3          = cpu_to_le32(0x9000301A),
88131 +       IO_REPARSE_TAG_CLOUD_4          = cpu_to_le32(0x9000401A),
88132 +       IO_REPARSE_TAG_CLOUD_5          = cpu_to_le32(0x9000501A),
88133 +       IO_REPARSE_TAG_CLOUD_6          = cpu_to_le32(0x9000601A),
88134 +       IO_REPARSE_TAG_CLOUD_7          = cpu_to_le32(0x9000701A),
88135 +       IO_REPARSE_TAG_CLOUD_8          = cpu_to_le32(0x9000801A),
88136 +       IO_REPARSE_TAG_CLOUD_9          = cpu_to_le32(0x9000901A),
88137 +       IO_REPARSE_TAG_CLOUD_A          = cpu_to_le32(0x9000A01A),
88138 +       IO_REPARSE_TAG_CLOUD_B          = cpu_to_le32(0x9000B01A),
88139 +       IO_REPARSE_TAG_CLOUD_C          = cpu_to_le32(0x9000C01A),
88140 +       IO_REPARSE_TAG_CLOUD_D          = cpu_to_le32(0x9000D01A),
88141 +       IO_REPARSE_TAG_CLOUD_E          = cpu_to_le32(0x9000E01A),
88142 +       IO_REPARSE_TAG_CLOUD_F          = cpu_to_le32(0x9000F01A),
88146 +#define SYMLINK_FLAG_RELATIVE          1
88148 +/* Microsoft reparse buffer. (see DDK for details) */
88149 +struct REPARSE_DATA_BUFFER {
88150 +       __le32 ReparseTag;              // 0x00:
88151 +       __le16 ReparseDataLength;       // 0x04:
88152 +       __le16 Reserved;
88154 +       union {
88155 +               // If ReparseTag == 0xA0000003 (IO_REPARSE_TAG_MOUNT_POINT)
88156 +               struct {
88157 +                       __le16 SubstituteNameOffset; // 0x08
88158 +                       __le16 SubstituteNameLength; // 0x0A
88159 +                       __le16 PrintNameOffset;      // 0x0C
88160 +                       __le16 PrintNameLength;      // 0x0E
88161 +                       __le16 PathBuffer[];         // 0x10
88162 +               } MountPointReparseBuffer;
88164 +               // If ReparseTag == 0xA000000C (IO_REPARSE_TAG_SYMLINK)
88165 +               // https://msdn.microsoft.com/en-us/library/cc232006.aspx
88166 +               struct {
88167 +                       __le16 SubstituteNameOffset; // 0x08
88168 +                       __le16 SubstituteNameLength; // 0x0A
88169 +                       __le16 PrintNameOffset;      // 0x0C
88170 +                       __le16 PrintNameLength;      // 0x0E
88171 +                       // 0-absolute path 1- relative path, SYMLINK_FLAG_RELATIVE
88172 +                       __le32 Flags;                // 0x10
88173 +                       __le16 PathBuffer[];         // 0x14
88174 +               } SymbolicLinkReparseBuffer;
88176 +               // If ReparseTag == 0x80000017U
88177 +               struct {
88178 +                       __le32 WofVersion;  // 0x08 == 1
88179 +                       /* 1 - WIM backing provider ("WIMBoot"),
88180 +                        * 2 - System compressed file provider
88181 +                        */
88182 +                       __le32 WofProvider; // 0x0C
88183 +                       __le32 ProviderVer; // 0x10: == 1 WOF_FILE_PROVIDER_CURRENT_VERSION == 1
88184 +                       __le32 CompressionFormat; // 0x14: 0, 1, 2, 3. See WOF_COMPRESSION_XXX
88185 +               } CompressReparseBuffer;
88187 +               struct {
88188 +                       u8 DataBuffer[1];   // 0x08
88189 +               } GenericReparseBuffer;
88190 +       };
88193 +/* ATTR_EA_INFO (0xD0) */
88195 +#define FILE_NEED_EA 0x80 // See ntifs.h
88196 +/* FILE_NEED_EA, indicates that the file to which the EA belongs cannot be
88197 + * interpreted without understanding the associated extended attributes.
88198 + */
88199 +struct EA_INFO {
88200 +       __le16 size_pack;       // 0x00: Size of buffer to hold in packed form
88201 +       __le16 count;           // 0x02: Count of EA's with FILE_NEED_EA bit set
88202 +       __le32 size;            // 0x04: Size of buffer to hold in unpacked form
88205 +static_assert(sizeof(struct EA_INFO) == 8);
88207 +/* ATTR_EA (0xE0) */
88208 +struct EA_FULL {
88209 +       __le32 size;            // 0x00: (not in packed)
88210 +       u8 flags;               // 0x04
88211 +       u8 name_len;            // 0x05
88212 +       __le16 elength;         // 0x06
88213 +       u8 name[];              // 0x08
88216 +static_assert(offsetof(struct EA_FULL, name) == 8);
88218 +#define ACL_REVISION   2
88219 +#define ACL_REVISION_DS 4
88221 +#define SE_SELF_RELATIVE cpu_to_le16(0x8000)
88223 +struct SECURITY_DESCRIPTOR_RELATIVE {
88224 +       u8 Revision;
88225 +       u8 Sbz1;
88226 +       __le16 Control;
88227 +       __le32 Owner;
88228 +       __le32 Group;
88229 +       __le32 Sacl;
88230 +       __le32 Dacl;
88232 +static_assert(sizeof(struct SECURITY_DESCRIPTOR_RELATIVE) == 0x14);
88234 +struct ACE_HEADER {
88235 +       u8 AceType;
88236 +       u8 AceFlags;
88237 +       __le16 AceSize;
88239 +static_assert(sizeof(struct ACE_HEADER) == 4);
88241 +struct ACL {
88242 +       u8 AclRevision;
88243 +       u8 Sbz1;
88244 +       __le16 AclSize;
88245 +       __le16 AceCount;
88246 +       __le16 Sbz2;
88248 +static_assert(sizeof(struct ACL) == 8);
88250 +struct SID {
88251 +       u8 Revision;
88252 +       u8 SubAuthorityCount;
88253 +       u8 IdentifierAuthority[6];
88254 +       __le32 SubAuthority[];
88256 +static_assert(offsetof(struct SID, SubAuthority) == 8);
88258 +// clang-format on
88259 diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
88260 new file mode 100644
88261 index 000000000000..5e1dd628d3cc
88262 --- /dev/null
88263 +++ b/fs/ntfs3/ntfs_fs.h
88264 @@ -0,0 +1,1085 @@
88265 +/* SPDX-License-Identifier: GPL-2.0 */
88267 + *
88268 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
88269 + *
88270 + */
88272 +// clang-format off
88273 +#define MINUS_ONE_T                    ((size_t)(-1))
88274 +/* Biggest MFT / smallest cluster */
88275 +#define MAXIMUM_BYTES_PER_MFT          4096
88276 +#define NTFS_BLOCKS_PER_MFT_RECORD     (MAXIMUM_BYTES_PER_MFT / 512)
88278 +#define MAXIMUM_BYTES_PER_INDEX                4096
88279 +#define NTFS_BLOCKS_PER_INODE          (MAXIMUM_BYTES_PER_INDEX / 512)
88281 +/* ntfs specific error code when fixup failed*/
88282 +#define E_NTFS_FIXUP                   555
88283 +/* ntfs specific error code about resident->nonresident*/
88284 +#define E_NTFS_NONRESIDENT             556
88286 +/* sbi->flags */
88287 +#define NTFS_FLAGS_NODISCARD           0x00000001
88288 +/* Set when LogFile is replaying */
88289 +#define NTFS_FLAGS_LOG_REPLAYING       0x00000008
88290 +/* Set when we changed first MFT's which copy must be updated in $MftMirr */
88291 +#define NTFS_FLAGS_MFTMIRR             0x00001000
88292 +#define NTFS_FLAGS_NEED_REPLAY         0x04000000
88295 +/* ni->ni_flags */
88297 + * Data attribute is external compressed (lzx/xpress)
88298 + * 1 - WOF_COMPRESSION_XPRESS4K
88299 + * 2 - WOF_COMPRESSION_XPRESS8K
88300 + * 3 - WOF_COMPRESSION_XPRESS16K
88301 + * 4 - WOF_COMPRESSION_LZX32K
88302 + */
88303 +#define NI_FLAG_COMPRESSED_MASK                0x0000000f
88304 +/* Data attribute is deduplicated */
88305 +#define NI_FLAG_DEDUPLICATED           0x00000010
88306 +#define NI_FLAG_EA                     0x00000020
88307 +#define NI_FLAG_DIR                    0x00000040
88308 +#define NI_FLAG_RESIDENT               0x00000080
88309 +#define NI_FLAG_UPDATE_PARENT          0x00000100
88310 +// clang-format on
88312 +struct ntfs_mount_options {
88313 +       struct nls_table *nls;
88315 +       kuid_t fs_uid;
88316 +       kgid_t fs_gid;
88317 +       u16 fs_fmask_inv;
88318 +       u16 fs_dmask_inv;
88320 +       unsigned uid : 1, /* uid was set */
88321 +               gid : 1, /* gid was set */
88322 +               fmask : 1, /* fmask was set */
88323 +               dmask : 1, /*dmask was set*/
88324 +               sys_immutable : 1, /* immutable system files */
88325 +               discard : 1, /* issue discard requests on deletions */
88326 +               sparse : 1, /*create sparse files*/
88327 +               showmeta : 1, /*show meta files*/
88328 +               nohidden : 1, /*do not show hidden files*/
88329 +               force : 1, /*rw mount dirty volume*/
88330 +               no_acs_rules : 1, /*exclude acs rules*/
88331 +               prealloc : 1 /*preallocate space when file is growing*/
88332 +               ;
88335 +/* special value to unpack and deallocate*/
88336 +#define RUN_DEALLOCATE ((struct runs_tree *)(size_t)1)
88338 +/* TODO: use rb tree instead of array */
88339 +struct runs_tree {
88340 +       struct ntfs_run *runs;
88341 +       size_t count; // Currently used size a ntfs_run storage.
88342 +       size_t allocated; // Currently allocated ntfs_run storage size.
88345 +struct ntfs_buffers {
88346 +       /* Biggest MFT / smallest cluster = 4096 / 512 = 8 */
88347 +       /* Biggest index / smallest cluster = 4096 / 512 = 8 */
88348 +       struct buffer_head *bh[PAGE_SIZE >> SECTOR_SHIFT];
88349 +       u32 bytes;
88350 +       u32 nbufs;
88351 +       u32 off;
88354 +enum ALLOCATE_OPT {
88355 +       ALLOCATE_DEF = 0, // Allocate all clusters
88356 +       ALLOCATE_MFT = 1, // Allocate for MFT
88359 +enum bitmap_mutex_classes {
88360 +       BITMAP_MUTEX_CLUSTERS = 0,
88361 +       BITMAP_MUTEX_MFT = 1,
88364 +struct wnd_bitmap {
88365 +       struct super_block *sb;
88366 +       struct rw_semaphore rw_lock;
88368 +       struct runs_tree run;
88369 +       size_t nbits;
88371 +       size_t total_zeroes; // total number of free bits
88372 +       u16 *free_bits; // free bits in each window
88373 +       size_t nwnd;
88374 +       u32 bits_last; // bits in last window
88376 +       struct rb_root start_tree; // extents, sorted by 'start'
88377 +       struct rb_root count_tree; // extents, sorted by 'count + start'
88378 +       size_t count; // extents count
88380 +       /*
88381 +        * -1 Tree is activated but not updated (too many fragments)
88382 +        * 0 - Tree is not activated
88383 +        * 1 - Tree is activated and updated
88384 +        */
88385 +       int uptodated;
88386 +       size_t extent_min; // Minimal extent used while building
88387 +       size_t extent_max; // Upper estimate of biggest free block
88389 +       /* Zone [bit, end) */
88390 +       size_t zone_bit;
88391 +       size_t zone_end;
88393 +       bool set_tail; // not necessary in driver
88394 +       bool inited;
88397 +typedef int (*NTFS_CMP_FUNC)(const void *key1, size_t len1, const void *key2,
88398 +                            size_t len2, const void *param);
88400 +enum index_mutex_classed {
88401 +       INDEX_MUTEX_I30 = 0,
88402 +       INDEX_MUTEX_SII = 1,
88403 +       INDEX_MUTEX_SDH = 2,
88404 +       INDEX_MUTEX_SO = 3,
88405 +       INDEX_MUTEX_SQ = 4,
88406 +       INDEX_MUTEX_SR = 5,
88407 +       INDEX_MUTEX_TOTAL
88410 +/* ntfs_index - allocation unit inside directory */
88411 +struct ntfs_index {
88412 +       struct runs_tree bitmap_run;
88413 +       struct runs_tree alloc_run;
88414 +       /* read/write access to 'bitmap_run'/'alloc_run' while ntfs_readdir */
88415 +       struct rw_semaphore run_lock;
88417 +       /*TODO: remove 'cmp'*/
88418 +       NTFS_CMP_FUNC cmp;
88420 +       u8 index_bits; // log2(root->index_block_size)
88421 +       u8 idx2vbn_bits; // log2(root->index_block_clst)
88422 +       u8 vbn2vbo_bits; // index_block_size < cluster? 9 : cluster_bits
88423 +       u8 type; // index_mutex_classed
88426 +/* Minimum mft zone */
88427 +#define NTFS_MIN_MFT_ZONE 100
88429 +/* ntfs file system in-core superblock data */
88430 +struct ntfs_sb_info {
88431 +       struct super_block *sb;
88433 +       u32 discard_granularity;
88434 +       u64 discard_granularity_mask_inv; // ~(discard_granularity_mask_inv-1)
88436 +       u32 cluster_size; // bytes per cluster
88437 +       u32 cluster_mask; // == cluster_size - 1
88438 +       u64 cluster_mask_inv; // ~(cluster_size - 1)
88439 +       u32 block_mask; // sb->s_blocksize - 1
88440 +       u32 blocks_per_cluster; // cluster_size / sb->s_blocksize
88442 +       u32 record_size;
88443 +       u32 sector_size;
88444 +       u32 index_size;
88446 +       u8 sector_bits;
88447 +       u8 cluster_bits;
88448 +       u8 record_bits;
88450 +       u64 maxbytes; // Maximum size for normal files
88451 +       u64 maxbytes_sparse; // Maximum size for sparse file
88453 +       u32 flags; // See NTFS_FLAGS_XXX
88455 +       CLST bad_clusters; // The count of marked bad clusters
88457 +       u16 max_bytes_per_attr; // maximum attribute size in record
88458 +       u16 attr_size_tr; // attribute size threshold (320 bytes)
88460 +       /* Records in $Extend */
88461 +       CLST objid_no;
88462 +       CLST quota_no;
88463 +       CLST reparse_no;
88464 +       CLST usn_jrnl_no;
88466 +       struct ATTR_DEF_ENTRY *def_table; // attribute definition table
88467 +       u32 def_entries;
88468 +       u32 ea_max_size;
88470 +       struct MFT_REC *new_rec;
88472 +       u16 *upcase;
88474 +       struct {
88475 +               u64 lbo, lbo2;
88476 +               struct ntfs_inode *ni;
88477 +               struct wnd_bitmap bitmap; // $MFT::Bitmap
88478 +               /*
88479 +                * MFT records [11-24) used to expand MFT itself
88480 +                * They always marked as used in $MFT::Bitmap
88481 +                * 'reserved_bitmap' contains real bitmap of these records
88482 +                */
88483 +               ulong reserved_bitmap; // bitmap of used records [11 - 24)
88484 +               size_t next_free; // The next record to allocate from
88485 +               size_t used; // mft valid size in records
88486 +               u32 recs_mirr; // Number of records in MFTMirr
88487 +               u8 next_reserved;
88488 +               u8 reserved_bitmap_inited;
88489 +       } mft;
88491 +       struct {
88492 +               struct wnd_bitmap bitmap; // $Bitmap::Data
88493 +               CLST next_free_lcn;
88494 +       } used;
88496 +       struct {
88497 +               u64 size; // in bytes
88498 +               u64 blocks; // in blocks
88499 +               u64 ser_num;
88500 +               struct ntfs_inode *ni;
88501 +               __le16 flags; // cached current VOLUME_INFO::flags, VOLUME_FLAG_DIRTY
88502 +               u8 major_ver;
88503 +               u8 minor_ver;
88504 +               char label[65];
88505 +               bool real_dirty; /* real fs state*/
88506 +       } volume;
88508 +       struct {
88509 +               struct ntfs_index index_sii;
88510 +               struct ntfs_index index_sdh;
88511 +               struct ntfs_inode *ni;
88512 +               u32 next_id;
88513 +               u64 next_off;
88515 +               __le32 def_security_id;
88516 +       } security;
88518 +       struct {
88519 +               struct ntfs_index index_r;
88520 +               struct ntfs_inode *ni;
88521 +               u64 max_size; // 16K
88522 +       } reparse;
88524 +       struct {
88525 +               struct ntfs_index index_o;
88526 +               struct ntfs_inode *ni;
88527 +       } objid;
88529 +       struct {
88530 +               struct mutex mtx_lznt;
88531 +               struct lznt *lznt;
88532 +#ifdef CONFIG_NTFS3_LZX_XPRESS
88533 +               struct mutex mtx_xpress;
88534 +               struct xpress_decompressor *xpress;
88535 +               struct mutex mtx_lzx;
88536 +               struct lzx_decompressor *lzx;
88537 +#endif
88538 +       } compress;
88540 +       struct ntfs_mount_options options;
88541 +       struct ratelimit_state msg_ratelimit;
88545 + * one MFT record(usually 1024 bytes), consists of attributes
88546 + */
88547 +struct mft_inode {
88548 +       struct rb_node node;
88549 +       struct ntfs_sb_info *sbi;
88551 +       struct MFT_REC *mrec;
88552 +       struct ntfs_buffers nb;
88554 +       CLST rno;
88555 +       bool dirty;
88558 +/* nested class for ntfs_inode::ni_lock */
88559 +enum ntfs_inode_mutex_lock_class {
88560 +       NTFS_INODE_MUTEX_DIRTY,
88561 +       NTFS_INODE_MUTEX_SECURITY,
88562 +       NTFS_INODE_MUTEX_OBJID,
88563 +       NTFS_INODE_MUTEX_REPARSE,
88564 +       NTFS_INODE_MUTEX_NORMAL,
88565 +       NTFS_INODE_MUTEX_PARENT,
88569 + * ntfs inode - extends linux inode. consists of one or more mft inodes
88570 + */
88571 +struct ntfs_inode {
88572 +       struct mft_inode mi; // base record
88574 +       /*
88575 +        * Valid size: [0 - i_valid) - these range in file contains valid data
88576 +        * Range [i_valid - inode->i_size) - contains 0
88577 +        * Usually i_valid <= inode->i_size
88578 +        */
88579 +       u64 i_valid;
88580 +       struct timespec64 i_crtime;
88582 +       struct mutex ni_lock;
88584 +       /* file attributes from std */
88585 +       enum FILE_ATTRIBUTE std_fa;
88586 +       __le32 std_security_id;
88588 +       /*
88589 +        * tree of mft_inode
88590 +        * not empty when primary MFT record (usually 1024 bytes) can't save all attributes
88591 +        * e.g. file becomes too fragmented or contains a lot of names
88592 +        */
88593 +       struct rb_root mi_tree;
88595 +       /*
88596 +        * This member is used in ntfs_readdir to ensure that all subrecords are loaded
88597 +        */
88598 +       u8 mi_loaded;
88600 +       union {
88601 +               struct ntfs_index dir;
88602 +               struct {
88603 +                       struct rw_semaphore run_lock;
88604 +                       struct runs_tree run;
88605 +#ifdef CONFIG_NTFS3_LZX_XPRESS
88606 +                       struct page *offs_page;
88607 +#endif
88608 +               } file;
88609 +       };
88611 +       struct {
88612 +               struct runs_tree run;
88613 +               struct ATTR_LIST_ENTRY *le; // 1K aligned memory
88614 +               size_t size;
88615 +               bool dirty;
88616 +       } attr_list;
88618 +       size_t ni_flags; // NI_FLAG_XXX
88620 +       struct inode vfs_inode;
88623 +struct indx_node {
88624 +       struct ntfs_buffers nb;
88625 +       struct INDEX_BUFFER *index;
88628 +struct ntfs_fnd {
88629 +       int level;
88630 +       struct indx_node *nodes[20];
88631 +       struct NTFS_DE *de[20];
88632 +       struct NTFS_DE *root_de;
88635 +enum REPARSE_SIGN {
88636 +       REPARSE_NONE = 0,
88637 +       REPARSE_COMPRESSED = 1,
88638 +       REPARSE_DEDUPLICATED = 2,
88639 +       REPARSE_LINK = 3
88642 +/* functions from attrib.c*/
88643 +int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
88644 +                  struct runs_tree *run, const CLST *vcn);
88645 +int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
88646 +                          CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
88647 +                          enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
88648 +                          CLST *new_lcn);
88649 +int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
88650 +                         struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
88651 +                         u64 new_size, struct runs_tree *run,
88652 +                         struct ATTRIB **ins_attr, struct page *page);
88653 +int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
88654 +                 const __le16 *name, u8 name_len, struct runs_tree *run,
88655 +                 u64 new_size, const u64 *new_valid, bool keep_prealloc,
88656 +                 struct ATTRIB **ret);
88657 +int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
88658 +                       CLST *len, bool *new);
88659 +int attr_data_read_resident(struct ntfs_inode *ni, struct page *page);
88660 +int attr_data_write_resident(struct ntfs_inode *ni, struct page *page);
88661 +int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
88662 +                      const __le16 *name, u8 name_len, struct runs_tree *run,
88663 +                      CLST vcn);
88664 +int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
88665 +                        const __le16 *name, u8 name_len, struct runs_tree *run,
88666 +                        u64 from, u64 to);
88667 +int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
88668 +                       struct runs_tree *run, u64 frame, u64 frames,
88669 +                       u8 frame_bits, u32 *ondisk_size, u64 *vbo_data);
88670 +int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
88671 +                            CLST frame, CLST *clst_data);
88672 +int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
88673 +                       u64 new_valid);
88674 +int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes);
88675 +int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes);
88677 +/* functions from attrlist.c*/
88678 +void al_destroy(struct ntfs_inode *ni);
88679 +bool al_verify(struct ntfs_inode *ni);
88680 +int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr);
88681 +struct ATTR_LIST_ENTRY *al_enumerate(struct ntfs_inode *ni,
88682 +                                    struct ATTR_LIST_ENTRY *le);
88683 +struct ATTR_LIST_ENTRY *al_find_le(struct ntfs_inode *ni,
88684 +                                  struct ATTR_LIST_ENTRY *le,
88685 +                                  const struct ATTRIB *attr);
88686 +struct ATTR_LIST_ENTRY *al_find_ex(struct ntfs_inode *ni,
88687 +                                  struct ATTR_LIST_ENTRY *le,
88688 +                                  enum ATTR_TYPE type, const __le16 *name,
88689 +                                  u8 name_len, const CLST *vcn);
88690 +int al_add_le(struct ntfs_inode *ni, enum ATTR_TYPE type, const __le16 *name,
88691 +             u8 name_len, CLST svcn, __le16 id, const struct MFT_REF *ref,
88692 +             struct ATTR_LIST_ENTRY **new_le);
88693 +bool al_remove_le(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le);
88694 +bool al_delete_le(struct ntfs_inode *ni, enum ATTR_TYPE type, CLST vcn,
88695 +                 const __le16 *name, size_t name_len,
88696 +                 const struct MFT_REF *ref);
88697 +int al_update(struct ntfs_inode *ni);
88698 +static inline size_t al_aligned(size_t size)
88700 +       return (size + 1023) & ~(size_t)1023;
88703 +/* globals from bitfunc.c */
88704 +bool are_bits_clear(const ulong *map, size_t bit, size_t nbits);
88705 +bool are_bits_set(const ulong *map, size_t bit, size_t nbits);
88706 +size_t get_set_bits_ex(const ulong *map, size_t bit, size_t nbits);
88708 +/* globals from dir.c */
88709 +int ntfs_utf16_to_nls(struct ntfs_sb_info *sbi, const struct le_str *uni,
88710 +                     u8 *buf, int buf_len);
88711 +int ntfs_nls_to_utf16(struct ntfs_sb_info *sbi, const u8 *name, u32 name_len,
88712 +                     struct cpu_str *uni, u32 max_ulen,
88713 +                     enum utf16_endian endian);
88714 +struct inode *dir_search_u(struct inode *dir, const struct cpu_str *uni,
88715 +                          struct ntfs_fnd *fnd);
88716 +bool dir_is_empty(struct inode *dir);
88717 +extern const struct file_operations ntfs_dir_operations;
88719 +/* globals from file.c*/
88720 +int ntfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
88721 +                struct kstat *stat, u32 request_mask, u32 flags);
88722 +void ntfs_sparse_cluster(struct inode *inode, struct page *page0, CLST vcn,
88723 +                        CLST len);
88724 +int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
88725 +                 struct iattr *attr);
88726 +int ntfs_file_open(struct inode *inode, struct file *file);
88727 +int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
88728 +               __u64 start, __u64 len);
88729 +extern const struct inode_operations ntfs_special_inode_operations;
88730 +extern const struct inode_operations ntfs_file_inode_operations;
88731 +extern const struct file_operations ntfs_file_operations;
88733 +/* globals from frecord.c */
88734 +void ni_remove_mi(struct ntfs_inode *ni, struct mft_inode *mi);
88735 +struct ATTR_STD_INFO *ni_std(struct ntfs_inode *ni);
88736 +struct ATTR_STD_INFO5 *ni_std5(struct ntfs_inode *ni);
88737 +void ni_clear(struct ntfs_inode *ni);
88738 +int ni_load_mi_ex(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi);
88739 +int ni_load_mi(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le,
88740 +              struct mft_inode **mi);
88741 +struct ATTRIB *ni_find_attr(struct ntfs_inode *ni, struct ATTRIB *attr,
88742 +                           struct ATTR_LIST_ENTRY **entry_o,
88743 +                           enum ATTR_TYPE type, const __le16 *name,
88744 +                           u8 name_len, const CLST *vcn,
88745 +                           struct mft_inode **mi);
88746 +struct ATTRIB *ni_enum_attr_ex(struct ntfs_inode *ni, struct ATTRIB *attr,
88747 +                              struct ATTR_LIST_ENTRY **le,
88748 +                              struct mft_inode **mi);
88749 +struct ATTRIB *ni_load_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
88750 +                           const __le16 *name, u8 name_len, CLST vcn,
88751 +                           struct mft_inode **pmi);
88752 +int ni_load_all_mi(struct ntfs_inode *ni);
88753 +bool ni_add_subrecord(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi);
88754 +int ni_remove_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
88755 +                  const __le16 *name, size_t name_len, bool base_only,
88756 +                  const __le16 *id);
88757 +int ni_create_attr_list(struct ntfs_inode *ni);
88758 +int ni_expand_list(struct ntfs_inode *ni);
88759 +int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type,
88760 +                         const __le16 *name, u8 name_len,
88761 +                         const struct runs_tree *run, CLST svcn, CLST len,
88762 +                         __le16 flags, struct ATTRIB **new_attr,
88763 +                         struct mft_inode **mi);
88764 +int ni_insert_resident(struct ntfs_inode *ni, u32 data_size,
88765 +                      enum ATTR_TYPE type, const __le16 *name, u8 name_len,
88766 +                      struct ATTRIB **new_attr, struct mft_inode **mi);
88767 +int ni_remove_attr_le(struct ntfs_inode *ni, struct ATTRIB *attr,
88768 +                     struct ATTR_LIST_ENTRY *le);
88769 +int ni_delete_all(struct ntfs_inode *ni);
88770 +struct ATTR_FILE_NAME *ni_fname_name(struct ntfs_inode *ni,
88771 +                                    const struct cpu_str *uni,
88772 +                                    const struct MFT_REF *home,
88773 +                                    struct ATTR_LIST_ENTRY **entry);
88774 +struct ATTR_FILE_NAME *ni_fname_type(struct ntfs_inode *ni, u8 name_type,
88775 +                                    struct ATTR_LIST_ENTRY **entry);
88776 +int ni_new_attr_flags(struct ntfs_inode *ni, enum FILE_ATTRIBUTE new_fa);
88777 +enum REPARSE_SIGN ni_parse_reparse(struct ntfs_inode *ni, struct ATTRIB *attr,
88778 +                                  void *buffer);
88779 +int ni_write_inode(struct inode *inode, int sync, const char *hint);
88780 +#define _ni_write_inode(i, w) ni_write_inode(i, w, __func__)
88781 +int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
88782 +             __u64 vbo, __u64 len);
88783 +int ni_readpage_cmpr(struct ntfs_inode *ni, struct page *page);
88784 +int ni_decompress_file(struct ntfs_inode *ni);
88785 +int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
88786 +                 u32 pages_per_frame);
88787 +int ni_write_frame(struct ntfs_inode *ni, struct page **pages,
88788 +                  u32 pages_per_frame);
88790 +/* globals from fslog.c */
88791 +int log_replay(struct ntfs_inode *ni, bool *initialized);
88793 +/* globals from fsntfs.c */
88794 +bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes);
88795 +int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
88796 +                      bool simple);
88797 +int ntfs_extend_init(struct ntfs_sb_info *sbi);
88798 +int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi);
88799 +const struct ATTR_DEF_ENTRY *ntfs_query_def(struct ntfs_sb_info *sbi,
88800 +                                           enum ATTR_TYPE Type);
88801 +int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
88802 +                            CLST *new_lcn, CLST *new_len,
88803 +                            enum ALLOCATE_OPT opt);
88804 +int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
88805 +                      struct ntfs_inode *ni, struct mft_inode **mi);
88806 +void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno);
88807 +int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to);
88808 +int ntfs_refresh_zone(struct ntfs_sb_info *sbi);
88809 +int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait);
88810 +enum NTFS_DIRTY_FLAGS {
88811 +       NTFS_DIRTY_CLEAR = 0,
88812 +       NTFS_DIRTY_DIRTY = 1,
88813 +       NTFS_DIRTY_ERROR = 2,
88815 +int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty);
88816 +int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer);
88817 +int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
88818 +                 const void *buffer, int wait);
88819 +int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
88820 +                     u64 vbo, const void *buf, size_t bytes);
88821 +struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
88822 +                                  const struct runs_tree *run, u64 vbo);
88823 +int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
88824 +                    u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb);
88825 +int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
88826 +                struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
88827 +                struct ntfs_buffers *nb);
88828 +int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
88829 +               u32 bytes, struct ntfs_buffers *nb);
88830 +int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
88831 +                 struct ntfs_buffers *nb, int sync);
88832 +int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
88833 +                  struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
88834 +                  u32 op);
88835 +int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run);
88836 +int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
88837 +                   u64 vbo, u64 *lbo, u64 *bytes);
88838 +struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST nRec,
88839 +                                 bool dir);
88840 +extern const u8 s_default_security[0x50];
88841 +bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len);
88842 +int ntfs_security_init(struct ntfs_sb_info *sbi);
88843 +int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
88844 +                           struct SECURITY_DESCRIPTOR_RELATIVE **sd,
88845 +                           size_t *size);
88846 +int ntfs_insert_security(struct ntfs_sb_info *sbi,
88847 +                        const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
88848 +                        u32 size, __le32 *security_id, bool *inserted);
88849 +int ntfs_reparse_init(struct ntfs_sb_info *sbi);
88850 +int ntfs_objid_init(struct ntfs_sb_info *sbi);
88851 +int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid);
88852 +int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
88853 +                       const struct MFT_REF *ref);
88854 +int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
88855 +                       const struct MFT_REF *ref);
88856 +void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim);
88857 +int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim);
88859 +/* globals from index.c */
88860 +int indx_used_bit(struct ntfs_index *indx, struct ntfs_inode *ni, size_t *bit);
88861 +void fnd_clear(struct ntfs_fnd *fnd);
88862 +static inline struct ntfs_fnd *fnd_get(void)
88864 +       return ntfs_zalloc(sizeof(struct ntfs_fnd));
88866 +static inline void fnd_put(struct ntfs_fnd *fnd)
88868 +       if (fnd) {
88869 +               fnd_clear(fnd);
88870 +               ntfs_free(fnd);
88871 +       }
88873 +void indx_clear(struct ntfs_index *idx);
88874 +int indx_init(struct ntfs_index *indx, struct ntfs_sb_info *sbi,
88875 +             const struct ATTRIB *attr, enum index_mutex_classed type);
88876 +struct INDEX_ROOT *indx_get_root(struct ntfs_index *indx, struct ntfs_inode *ni,
88877 +                                struct ATTRIB **attr, struct mft_inode **mi);
88878 +int indx_read(struct ntfs_index *idx, struct ntfs_inode *ni, CLST vbn,
88879 +             struct indx_node **node);
88880 +int indx_find(struct ntfs_index *indx, struct ntfs_inode *dir,
88881 +             const struct INDEX_ROOT *root, const void *Key, size_t KeyLen,
88882 +             const void *param, int *diff, struct NTFS_DE **entry,
88883 +             struct ntfs_fnd *fnd);
88884 +int indx_find_sort(struct ntfs_index *indx, struct ntfs_inode *ni,
88885 +                  const struct INDEX_ROOT *root, struct NTFS_DE **entry,
88886 +                  struct ntfs_fnd *fnd);
88887 +int indx_find_raw(struct ntfs_index *indx, struct ntfs_inode *ni,
88888 +                 const struct INDEX_ROOT *root, struct NTFS_DE **entry,
88889 +                 size_t *off, struct ntfs_fnd *fnd);
88890 +int indx_insert_entry(struct ntfs_index *indx, struct ntfs_inode *ni,
88891 +                     const struct NTFS_DE *new_de, const void *param,
88892 +                     struct ntfs_fnd *fnd);
88893 +int indx_delete_entry(struct ntfs_index *indx, struct ntfs_inode *ni,
88894 +                     const void *key, u32 key_len, const void *param);
88895 +int indx_update_dup(struct ntfs_inode *ni, struct ntfs_sb_info *sbi,
88896 +                   const struct ATTR_FILE_NAME *fname,
88897 +                   const struct NTFS_DUP_INFO *dup, int sync);
88899 +/* globals from inode.c */
88900 +struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref,
88901 +                        const struct cpu_str *name);
88902 +int ntfs_set_size(struct inode *inode, u64 new_size);
88903 +int reset_log_file(struct inode *inode);
88904 +int ntfs_get_block(struct inode *inode, sector_t vbn,
88905 +                  struct buffer_head *bh_result, int create);
88906 +int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc);
88907 +int ntfs_sync_inode(struct inode *inode);
88908 +int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
88909 +                     struct inode *i2);
88910 +int inode_write_data(struct inode *inode, const void *data, size_t bytes);
88911 +struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
88912 +                               struct inode *dir, struct dentry *dentry,
88913 +                               const struct cpu_str *uni, umode_t mode,
88914 +                               dev_t dev, const char *symname, u32 size,
88915 +                               int excl, struct ntfs_fnd *fnd);
88916 +int ntfs_link_inode(struct inode *inode, struct dentry *dentry);
88917 +int ntfs_unlink_inode(struct inode *dir, const struct dentry *dentry);
88918 +void ntfs_evict_inode(struct inode *inode);
88919 +extern const struct inode_operations ntfs_link_inode_operations;
88920 +extern const struct address_space_operations ntfs_aops;
88921 +extern const struct address_space_operations ntfs_aops_cmpr;
88923 +/* globals from name_i.c*/
88924 +int fill_name_de(struct ntfs_sb_info *sbi, void *buf, const struct qstr *name,
88925 +                const struct cpu_str *uni);
88926 +struct dentry *ntfs3_get_parent(struct dentry *child);
88928 +extern const struct inode_operations ntfs_dir_inode_operations;
88930 +/* globals from record.c */
88931 +int mi_get(struct ntfs_sb_info *sbi, CLST rno, struct mft_inode **mi);
88932 +void mi_put(struct mft_inode *mi);
88933 +int mi_init(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno);
88934 +int mi_read(struct mft_inode *mi, bool is_mft);
88935 +struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr);
88936 +// TODO: id?
88937 +struct ATTRIB *mi_find_attr(struct mft_inode *mi, struct ATTRIB *attr,
88938 +                           enum ATTR_TYPE type, const __le16 *name,
88939 +                           size_t name_len, const __le16 *id);
88940 +static inline struct ATTRIB *rec_find_attr_le(struct mft_inode *rec,
88941 +                                             struct ATTR_LIST_ENTRY *le)
88943 +       return mi_find_attr(rec, NULL, le->type, le_name(le), le->name_len,
88944 +                           &le->id);
88946 +int mi_write(struct mft_inode *mi, int wait);
88947 +int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
88948 +                 __le16 flags, bool is_mft);
88949 +void mi_mark_free(struct mft_inode *mi);
88950 +struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
88951 +                             const __le16 *name, u8 name_len, u32 asize,
88952 +                             u16 name_off);
88954 +bool mi_remove_attr(struct mft_inode *mi, struct ATTRIB *attr);
88955 +bool mi_resize_attr(struct mft_inode *mi, struct ATTRIB *attr, int bytes);
88956 +int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr,
88957 +                struct runs_tree *run, CLST len);
88958 +static inline bool mi_is_ref(const struct mft_inode *mi,
88959 +                            const struct MFT_REF *ref)
88961 +       if (le32_to_cpu(ref->low) != mi->rno)
88962 +               return false;
88963 +       if (ref->seq != mi->mrec->seq)
88964 +               return false;
88966 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
88967 +       return le16_to_cpu(ref->high) == (mi->rno >> 32);
88968 +#else
88969 +       return !ref->high;
88970 +#endif
88973 +static inline void mi_get_ref(const struct mft_inode *mi, struct MFT_REF *ref)
88975 +       ref->low = cpu_to_le32(mi->rno);
88976 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
88977 +       ref->high = cpu_to_le16(mi->rno >> 32);
88978 +#else
88979 +       ref->high = 0;
88980 +#endif
88981 +       ref->seq = mi->mrec->seq;
88984 +/* globals from run.c */
88985 +bool run_lookup_entry(const struct runs_tree *run, CLST vcn, CLST *lcn,
88986 +                     CLST *len, size_t *index);
88987 +void run_truncate(struct runs_tree *run, CLST vcn);
88988 +void run_truncate_head(struct runs_tree *run, CLST vcn);
88989 +void run_truncate_around(struct runs_tree *run, CLST vcn);
88990 +bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *Index);
88991 +bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len,
88992 +                  bool is_mft);
88993 +bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len);
88994 +bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn,
88995 +                  CLST *lcn, CLST *len);
88996 +bool run_is_mapped_full(const struct runs_tree *run, CLST svcn, CLST evcn);
88998 +int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
88999 +            u32 run_buf_size, CLST *packed_vcns);
89000 +int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
89001 +              CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
89002 +              u32 run_buf_size);
89004 +#ifdef NTFS3_CHECK_FREE_CLST
89005 +int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
89006 +                 CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
89007 +                 u32 run_buf_size);
89008 +#else
89009 +#define run_unpack_ex run_unpack
89010 +#endif
89011 +int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn);
89013 +/* globals from super.c */
89014 +void *ntfs_set_shared(void *ptr, u32 bytes);
89015 +void *ntfs_put_shared(void *ptr);
89016 +void ntfs_unmap_meta(struct super_block *sb, CLST lcn, CLST len);
89017 +int ntfs_discard(struct ntfs_sb_info *sbi, CLST Lcn, CLST Len);
89019 +/* globals from bitmap.c*/
89020 +int __init ntfs3_init_bitmap(void);
89021 +void ntfs3_exit_bitmap(void);
89022 +void wnd_close(struct wnd_bitmap *wnd);
89023 +static inline size_t wnd_zeroes(const struct wnd_bitmap *wnd)
89025 +       return wnd->total_zeroes;
89027 +int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits);
89028 +int wnd_set_free(struct wnd_bitmap *wnd, size_t bit, size_t bits);
89029 +int wnd_set_used(struct wnd_bitmap *wnd, size_t bit, size_t bits);
89030 +bool wnd_is_free(struct wnd_bitmap *wnd, size_t bit, size_t bits);
89031 +bool wnd_is_used(struct wnd_bitmap *wnd, size_t bit, size_t bits);
89033 +/* Possible values for 'flags' 'wnd_find' */
89034 +#define BITMAP_FIND_MARK_AS_USED 0x01
89035 +#define BITMAP_FIND_FULL 0x02
89036 +size_t wnd_find(struct wnd_bitmap *wnd, size_t to_alloc, size_t hint,
89037 +               size_t flags, size_t *allocated);
89038 +int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits);
89039 +void wnd_zone_set(struct wnd_bitmap *wnd, size_t Lcn, size_t Len);
89040 +int ntfs_trim_fs(struct ntfs_sb_info *sbi, struct fstrim_range *range);
89042 +/* globals from upcase.c */
89043 +int ntfs_cmp_names(const __le16 *s1, size_t l1, const __le16 *s2, size_t l2,
89044 +                  const u16 *upcase, bool bothcase);
89045 +int ntfs_cmp_names_cpu(const struct cpu_str *uni1, const struct le_str *uni2,
89046 +                      const u16 *upcase, bool bothcase);
89048 +/* globals from xattr.c */
89049 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
89050 +struct posix_acl *ntfs_get_acl(struct inode *inode, int type);
89051 +int ntfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
89052 +                struct posix_acl *acl, int type);
89053 +int ntfs_init_acl(struct user_namespace *mnt_userns, struct inode *inode,
89054 +                 struct inode *dir);
89055 +#else
89056 +#define ntfs_get_acl NULL
89057 +#define ntfs_set_acl NULL
89058 +#endif
89060 +int ntfs_acl_chmod(struct user_namespace *mnt_userns, struct inode *inode);
89061 +int ntfs_permission(struct user_namespace *mnt_userns, struct inode *inode,
89062 +                   int mask);
89063 +ssize_t ntfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
89064 +extern const struct xattr_handler *ntfs_xattr_handlers[];
89066 +/* globals from lznt.c */
89067 +struct lznt *get_lznt_ctx(int level);
89068 +size_t compress_lznt(const void *uncompressed, size_t uncompressed_size,
89069 +                    void *compressed, size_t compressed_size,
89070 +                    struct lznt *ctx);
89071 +ssize_t decompress_lznt(const void *compressed, size_t compressed_size,
89072 +                       void *uncompressed, size_t uncompressed_size);
89074 +static inline bool is_ntfs3(struct ntfs_sb_info *sbi)
89076 +       return sbi->volume.major_ver >= 3;
89079 +/*(sb->s_flags & SB_ACTIVE)*/
89080 +static inline bool is_mounted(struct ntfs_sb_info *sbi)
89082 +       return !!sbi->sb->s_root;
89085 +static inline bool ntfs_is_meta_file(struct ntfs_sb_info *sbi, CLST rno)
89087 +       return rno < MFT_REC_FREE || rno == sbi->objid_no ||
89088 +              rno == sbi->quota_no || rno == sbi->reparse_no ||
89089 +              rno == sbi->usn_jrnl_no;
89092 +static inline void ntfs_unmap_page(struct page *page)
89094 +       kunmap(page);
89095 +       put_page(page);
89098 +static inline struct page *ntfs_map_page(struct address_space *mapping,
89099 +                                        unsigned long index)
89101 +       struct page *page = read_mapping_page(mapping, index, NULL);
89103 +       if (!IS_ERR(page)) {
89104 +               kmap(page);
89105 +               if (!PageError(page))
89106 +                       return page;
89107 +               ntfs_unmap_page(page);
89108 +               return ERR_PTR(-EIO);
89109 +       }
89110 +       return page;
89113 +static inline size_t wnd_zone_bit(const struct wnd_bitmap *wnd)
89115 +       return wnd->zone_bit;
89118 +static inline size_t wnd_zone_len(const struct wnd_bitmap *wnd)
89120 +       return wnd->zone_end - wnd->zone_bit;
89123 +static inline void run_init(struct runs_tree *run)
89125 +       run->runs = NULL;
89126 +       run->count = 0;
89127 +       run->allocated = 0;
89130 +static inline struct runs_tree *run_alloc(void)
89132 +       return ntfs_zalloc(sizeof(struct runs_tree));
89135 +static inline void run_close(struct runs_tree *run)
89137 +       ntfs_vfree(run->runs);
89138 +       memset(run, 0, sizeof(*run));
89141 +static inline void run_free(struct runs_tree *run)
89143 +       if (run) {
89144 +               ntfs_vfree(run->runs);
89145 +               ntfs_free(run);
89146 +       }
89149 +static inline bool run_is_empty(struct runs_tree *run)
89151 +       return !run->count;
89154 +/* NTFS uses quad aligned bitmaps */
89155 +static inline size_t bitmap_size(size_t bits)
89157 +       return QuadAlign((bits + 7) >> 3);
89160 +#define _100ns2seconds 10000000
89161 +#define SecondsToStartOf1970 0x00000002B6109100
89163 +#define NTFS_TIME_GRAN 100
89166 + * kernel2nt
89167 + *
89168 + * converts in-memory kernel timestamp into nt time
89169 + */
89170 +static inline __le64 kernel2nt(const struct timespec64 *ts)
89172 +       // 10^7 units of 100 nanoseconds one second
89173 +       return cpu_to_le64(_100ns2seconds *
89174 +                                  (ts->tv_sec + SecondsToStartOf1970) +
89175 +                          ts->tv_nsec / NTFS_TIME_GRAN);
89179 + * nt2kernel
89180 + *
89181 + * converts on-disk nt time into kernel timestamp
89182 + */
89183 +static inline void nt2kernel(const __le64 tm, struct timespec64 *ts)
89185 +       u64 t = le64_to_cpu(tm) - _100ns2seconds * SecondsToStartOf1970;
89187 +       // WARNING: do_div changes its first argument(!)
89188 +       ts->tv_nsec = do_div(t, _100ns2seconds) * 100;
89189 +       ts->tv_sec = t;
89192 +static inline struct ntfs_sb_info *ntfs_sb(struct super_block *sb)
89194 +       return sb->s_fs_info;
89197 +/* Align up on cluster boundary */
89198 +static inline u64 ntfs_up_cluster(const struct ntfs_sb_info *sbi, u64 size)
89200 +       return (size + sbi->cluster_mask) & sbi->cluster_mask_inv;
89203 +/* Align up on cluster boundary */
89204 +static inline u64 ntfs_up_block(const struct super_block *sb, u64 size)
89206 +       return (size + sb->s_blocksize - 1) & ~(u64)(sb->s_blocksize - 1);
89209 +static inline CLST bytes_to_cluster(const struct ntfs_sb_info *sbi, u64 size)
89211 +       return (size + sbi->cluster_mask) >> sbi->cluster_bits;
89214 +static inline u64 bytes_to_block(const struct super_block *sb, u64 size)
89216 +       return (size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
89219 +static inline struct buffer_head *ntfs_bread(struct super_block *sb,
89220 +                                            sector_t block)
89222 +       struct buffer_head *bh = sb_bread(sb, block);
89224 +       if (bh)
89225 +               return bh;
89227 +       ntfs_err(sb, "failed to read volume at offset 0x%llx",
89228 +                (u64)block << sb->s_blocksize_bits);
89229 +       return NULL;
89232 +static inline bool is_power_of2(size_t v)
89234 +       return v && !(v & (v - 1));
89237 +static inline struct ntfs_inode *ntfs_i(struct inode *inode)
89239 +       return container_of(inode, struct ntfs_inode, vfs_inode);
89242 +static inline bool is_compressed(const struct ntfs_inode *ni)
89244 +       return (ni->std_fa & FILE_ATTRIBUTE_COMPRESSED) ||
89245 +              (ni->ni_flags & NI_FLAG_COMPRESSED_MASK);
89248 +static inline int ni_ext_compress_bits(const struct ntfs_inode *ni)
89250 +       return 0xb + (ni->ni_flags & NI_FLAG_COMPRESSED_MASK);
89253 +/* bits - 0xc, 0xd, 0xe, 0xf, 0x10 */
89254 +static inline void ni_set_ext_compress_bits(struct ntfs_inode *ni, u8 bits)
89256 +       ni->ni_flags |= (bits - 0xb) & NI_FLAG_COMPRESSED_MASK;
89259 +static inline bool is_dedup(const struct ntfs_inode *ni)
89261 +       return ni->ni_flags & NI_FLAG_DEDUPLICATED;
89264 +static inline bool is_encrypted(const struct ntfs_inode *ni)
89266 +       return ni->std_fa & FILE_ATTRIBUTE_ENCRYPTED;
89269 +static inline bool is_sparsed(const struct ntfs_inode *ni)
89271 +       return ni->std_fa & FILE_ATTRIBUTE_SPARSE_FILE;
89274 +static inline int is_resident(struct ntfs_inode *ni)
89276 +       return ni->ni_flags & NI_FLAG_RESIDENT;
89279 +static inline void le16_sub_cpu(__le16 *var, u16 val)
89281 +       *var = cpu_to_le16(le16_to_cpu(*var) - val);
89284 +static inline void le32_sub_cpu(__le32 *var, u32 val)
89286 +       *var = cpu_to_le32(le32_to_cpu(*var) - val);
89289 +static inline void nb_put(struct ntfs_buffers *nb)
89291 +       u32 i, nbufs = nb->nbufs;
89293 +       if (!nbufs)
89294 +               return;
89296 +       for (i = 0; i < nbufs; i++)
89297 +               put_bh(nb->bh[i]);
89298 +       nb->nbufs = 0;
89301 +static inline void put_indx_node(struct indx_node *in)
89303 +       if (!in)
89304 +               return;
89306 +       ntfs_free(in->index);
89307 +       nb_put(&in->nb);
89308 +       ntfs_free(in);
89311 +static inline void mi_clear(struct mft_inode *mi)
89313 +       nb_put(&mi->nb);
89314 +       ntfs_free(mi->mrec);
89315 +       mi->mrec = NULL;
89318 +static inline void ni_lock(struct ntfs_inode *ni)
89320 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_NORMAL);
89323 +static inline void ni_lock_dir(struct ntfs_inode *ni)
89325 +       mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_PARENT);
89328 +static inline void ni_unlock(struct ntfs_inode *ni)
89330 +       mutex_unlock(&ni->ni_lock);
89333 +static inline int ni_trylock(struct ntfs_inode *ni)
89335 +       return mutex_trylock(&ni->ni_lock);
89338 +static inline int attr_load_runs_attr(struct ntfs_inode *ni,
89339 +                                     struct ATTRIB *attr,
89340 +                                     struct runs_tree *run, CLST vcn)
89342 +       return attr_load_runs_vcn(ni, attr->type, attr_name(attr),
89343 +                                 attr->name_len, run, vcn);
89346 +static inline void le64_sub_cpu(__le64 *var, u64 val)
89348 +       *var = cpu_to_le64(le64_to_cpu(*var) - val);
89350 diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c
89351 new file mode 100644
89352 index 000000000000..0d4a6251bddc
89353 --- /dev/null
89354 +++ b/fs/ntfs3/record.c
89355 @@ -0,0 +1,609 @@
89356 +// SPDX-License-Identifier: GPL-2.0
89358 + *
89359 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
89360 + *
89361 + */
89363 +#include <linux/blkdev.h>
89364 +#include <linux/buffer_head.h>
89365 +#include <linux/fs.h>
89366 +#include <linux/nls.h>
89368 +#include "debug.h"
89369 +#include "ntfs.h"
89370 +#include "ntfs_fs.h"
89372 +static inline int compare_attr(const struct ATTRIB *left, enum ATTR_TYPE type,
89373 +                              const __le16 *name, u8 name_len,
89374 +                              const u16 *upcase)
89376 +       /* First, compare the type codes: */
89377 +       int diff = le32_to_cpu(left->type) - le32_to_cpu(type);
89379 +       if (diff)
89380 +               return diff;
89382 +       /*
89383 +        * They have the same type code, so we have to compare the names.
89384 +        */
89385 +       return ntfs_cmp_names(attr_name(left), left->name_len, name, name_len,
89386 +                             upcase, true);
89390 + * mi_new_attt_id
89391 + *
89392 + * returns unused attribute id that is less than mrec->next_attr_id
89393 + */
89394 +static __le16 mi_new_attt_id(struct mft_inode *mi)
89396 +       u16 free_id, max_id, t16;
89397 +       struct MFT_REC *rec = mi->mrec;
89398 +       struct ATTRIB *attr;
89399 +       __le16 id;
89401 +       id = rec->next_attr_id;
89402 +       free_id = le16_to_cpu(id);
89403 +       if (free_id < 0x7FFF) {
89404 +               rec->next_attr_id = cpu_to_le16(free_id + 1);
89405 +               return id;
89406 +       }
89408 +       /* One record can store up to 1024/24 ~= 42 attributes */
89409 +       free_id = 0;
89410 +       max_id = 0;
89412 +       attr = NULL;
89414 +       for (;;) {
89415 +               attr = mi_enum_attr(mi, attr);
89416 +               if (!attr) {
89417 +                       rec->next_attr_id = cpu_to_le16(max_id + 1);
89418 +                       mi->dirty = true;
89419 +                       return cpu_to_le16(free_id);
89420 +               }
89422 +               t16 = le16_to_cpu(attr->id);
89423 +               if (t16 == free_id) {
89424 +                       free_id += 1;
89425 +                       attr = NULL;
89426 +               } else if (max_id < t16)
89427 +                       max_id = t16;
89428 +       }
89431 +int mi_get(struct ntfs_sb_info *sbi, CLST rno, struct mft_inode **mi)
89433 +       int err;
89434 +       struct mft_inode *m = ntfs_zalloc(sizeof(struct mft_inode));
89436 +       if (!m)
89437 +               return -ENOMEM;
89439 +       err = mi_init(m, sbi, rno);
89440 +       if (err) {
89441 +               ntfs_free(m);
89442 +               return err;
89443 +       }
89445 +       err = mi_read(m, false);
89446 +       if (err) {
89447 +               mi_put(m);
89448 +               return err;
89449 +       }
89451 +       *mi = m;
89452 +       return 0;
89455 +void mi_put(struct mft_inode *mi)
89457 +       mi_clear(mi);
89458 +       ntfs_free(mi);
89461 +int mi_init(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno)
89463 +       mi->sbi = sbi;
89464 +       mi->rno = rno;
89465 +       mi->mrec = ntfs_malloc(sbi->record_size);
89466 +       if (!mi->mrec)
89467 +               return -ENOMEM;
89469 +       return 0;
89473 + * mi_read
89474 + *
89475 + * reads MFT data
89476 + */
89477 +int mi_read(struct mft_inode *mi, bool is_mft)
89479 +       int err;
89480 +       struct MFT_REC *rec = mi->mrec;
89481 +       struct ntfs_sb_info *sbi = mi->sbi;
89482 +       u32 bpr = sbi->record_size;
89483 +       u64 vbo = (u64)mi->rno << sbi->record_bits;
89484 +       struct ntfs_inode *mft_ni = sbi->mft.ni;
89485 +       struct runs_tree *run = mft_ni ? &mft_ni->file.run : NULL;
89486 +       struct rw_semaphore *rw_lock = NULL;
89488 +       if (is_mounted(sbi)) {
89489 +               if (!is_mft) {
89490 +                       rw_lock = &mft_ni->file.run_lock;
89491 +                       down_read(rw_lock);
89492 +               }
89493 +       }
89495 +       err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb);
89496 +       if (rw_lock)
89497 +               up_read(rw_lock);
89498 +       if (!err)
89499 +               goto ok;
89501 +       if (err == -E_NTFS_FIXUP) {
89502 +               mi->dirty = true;
89503 +               goto ok;
89504 +       }
89506 +       if (err != -ENOENT)
89507 +               goto out;
89509 +       if (rw_lock) {
89510 +               ni_lock(mft_ni);
89511 +               down_write(rw_lock);
89512 +       }
89513 +       err = attr_load_runs_vcn(mft_ni, ATTR_DATA, NULL, 0, &mft_ni->file.run,
89514 +                                vbo >> sbi->cluster_bits);
89515 +       if (rw_lock) {
89516 +               up_write(rw_lock);
89517 +               ni_unlock(mft_ni);
89518 +       }
89519 +       if (err)
89520 +               goto out;
89522 +       if (rw_lock)
89523 +               down_read(rw_lock);
89524 +       err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb);
89525 +       if (rw_lock)
89526 +               up_read(rw_lock);
89528 +       if (err == -E_NTFS_FIXUP) {
89529 +               mi->dirty = true;
89530 +               goto ok;
89531 +       }
89532 +       if (err)
89533 +               goto out;
89535 +ok:
89536 +       /* check field 'total' only here */
89537 +       if (le32_to_cpu(rec->total) != bpr) {
89538 +               err = -EINVAL;
89539 +               goto out;
89540 +       }
89542 +       return 0;
89544 +out:
89545 +       return err;
89548 +struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
89550 +       const struct MFT_REC *rec = mi->mrec;
89551 +       u32 used = le32_to_cpu(rec->used);
89552 +       u32 t32, off, asize;
89553 +       u16 t16;
89555 +       if (!attr) {
89556 +               u32 total = le32_to_cpu(rec->total);
89558 +               off = le16_to_cpu(rec->attr_off);
89560 +               if (used > total)
89561 +                       return NULL;
89563 +               if (off >= used || off < MFTRECORD_FIXUP_OFFSET_1 ||
89564 +                   !IsDwordAligned(off)) {
89565 +                       return NULL;
89566 +               }
89568 +               /* Skip non-resident records */
89569 +               if (!is_rec_inuse(rec))
89570 +                       return NULL;
89572 +               attr = Add2Ptr(rec, off);
89573 +       } else {
89574 +               /* Check if input attr inside record */
89575 +               off = PtrOffset(rec, attr);
89576 +               if (off >= used)
89577 +                       return NULL;
89579 +               asize = le32_to_cpu(attr->size);
89580 +               if (asize < SIZEOF_RESIDENT) {
89581 +                       /* Impossible 'cause we should not return such attribute */
89582 +                       return NULL;
89583 +               }
89585 +               attr = Add2Ptr(attr, asize);
89586 +               off += asize;
89587 +       }
89589 +       asize = le32_to_cpu(attr->size);
89591 +       /* Can we use the first field (attr->type) */
89592 +       if (off + 8 > used) {
89593 +               static_assert(QuadAlign(sizeof(enum ATTR_TYPE)) == 8);
89594 +               return NULL;
89595 +       }
89597 +       if (attr->type == ATTR_END) {
89598 +               /* end of enumeration */
89599 +               return NULL;
89600 +       }
89602 +       /* 0x100 is last known attribute for now*/
89603 +       t32 = le32_to_cpu(attr->type);
89604 +       if ((t32 & 0xf) || (t32 > 0x100))
89605 +               return NULL;
89607 +       /* Check boundary */
89608 +       if (off + asize > used)
89609 +               return NULL;
89611 +       /* Check size of attribute */
89612 +       if (!attr->non_res) {
89613 +               if (asize < SIZEOF_RESIDENT)
89614 +                       return NULL;
89616 +               t16 = le16_to_cpu(attr->res.data_off);
89618 +               if (t16 > asize)
89619 +                       return NULL;
89621 +               t32 = le32_to_cpu(attr->res.data_size);
89622 +               if (t16 + t32 > asize)
89623 +                       return NULL;
89625 +               return attr;
89626 +       }
89628 +       /* Check some nonresident fields */
89629 +       if (attr->name_len &&
89630 +           le16_to_cpu(attr->name_off) + sizeof(short) * attr->name_len >
89631 +                   le16_to_cpu(attr->nres.run_off)) {
89632 +               return NULL;
89633 +       }
89635 +       if (attr->nres.svcn || !is_attr_ext(attr)) {
89636 +               if (asize + 8 < SIZEOF_NONRESIDENT)
89637 +                       return NULL;
89639 +               if (attr->nres.c_unit)
89640 +                       return NULL;
89641 +       } else if (asize + 8 < SIZEOF_NONRESIDENT_EX)
89642 +               return NULL;
89644 +       return attr;
89648 + * mi_find_attr
89649 + *
89650 + * finds the attribute by type and name and id
89651 + */
89652 +struct ATTRIB *mi_find_attr(struct mft_inode *mi, struct ATTRIB *attr,
89653 +                           enum ATTR_TYPE type, const __le16 *name,
89654 +                           size_t name_len, const __le16 *id)
89656 +       u32 type_in = le32_to_cpu(type);
89657 +       u32 atype;
89659 +next_attr:
89660 +       attr = mi_enum_attr(mi, attr);
89661 +       if (!attr)
89662 +               return NULL;
89664 +       atype = le32_to_cpu(attr->type);
89665 +       if (atype > type_in)
89666 +               return NULL;
89668 +       if (atype < type_in)
89669 +               goto next_attr;
89671 +       if (attr->name_len != name_len)
89672 +               goto next_attr;
89674 +       if (name_len && memcmp(attr_name(attr), name, name_len * sizeof(short)))
89675 +               goto next_attr;
89677 +       if (id && *id != attr->id)
89678 +               goto next_attr;
89680 +       return attr;
89683 +int mi_write(struct mft_inode *mi, int wait)
89685 +       struct MFT_REC *rec;
89686 +       int err;
89687 +       struct ntfs_sb_info *sbi;
89689 +       if (!mi->dirty)
89690 +               return 0;
89692 +       sbi = mi->sbi;
89693 +       rec = mi->mrec;
89695 +       err = ntfs_write_bh(sbi, &rec->rhdr, &mi->nb, wait);
89696 +       if (err)
89697 +               return err;
89699 +       if (mi->rno < sbi->mft.recs_mirr)
89700 +               sbi->flags |= NTFS_FLAGS_MFTMIRR;
89702 +       mi->dirty = false;
89704 +       return 0;
89707 +int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
89708 +                 __le16 flags, bool is_mft)
89710 +       int err;
89711 +       u16 seq = 1;
89712 +       struct MFT_REC *rec;
89713 +       u64 vbo = (u64)rno << sbi->record_bits;
89715 +       err = mi_init(mi, sbi, rno);
89716 +       if (err)
89717 +               return err;
89719 +       rec = mi->mrec;
89721 +       if (rno == MFT_REC_MFT) {
89722 +               ;
89723 +       } else if (rno < MFT_REC_FREE) {
89724 +               seq = rno;
89725 +       } else if (rno >= sbi->mft.used) {
89726 +               ;
89727 +       } else if (mi_read(mi, is_mft)) {
89728 +               ;
89729 +       } else if (rec->rhdr.sign == NTFS_FILE_SIGNATURE) {
89730 +               /* Record is reused. Update its sequence number */
89731 +               seq = le16_to_cpu(rec->seq) + 1;
89732 +               if (!seq)
89733 +                       seq = 1;
89734 +       }
89736 +       memcpy(rec, sbi->new_rec, sbi->record_size);
89738 +       rec->seq = cpu_to_le16(seq);
89739 +       rec->flags = RECORD_FLAG_IN_USE | flags;
89741 +       mi->dirty = true;
89743 +       if (!mi->nb.nbufs) {
89744 +               struct ntfs_inode *ni = sbi->mft.ni;
89745 +               bool lock = false;
89747 +               if (is_mounted(sbi) && !is_mft) {
89748 +                       down_read(&ni->file.run_lock);
89749 +                       lock = true;
89750 +               }
89752 +               err = ntfs_get_bh(sbi, &ni->file.run, vbo, sbi->record_size,
89753 +                                 &mi->nb);
89754 +               if (lock)
89755 +                       up_read(&ni->file.run_lock);
89756 +       }
89758 +       return err;
89762 + * mi_mark_free
89763 + *
89764 + * marks record as unused and marks it as free in bitmap
89765 + */
89766 +void mi_mark_free(struct mft_inode *mi)
89768 +       CLST rno = mi->rno;
89769 +       struct ntfs_sb_info *sbi = mi->sbi;
89771 +       if (rno >= MFT_REC_RESERVED && rno < MFT_REC_FREE) {
89772 +               ntfs_clear_mft_tail(sbi, rno, rno + 1);
89773 +               mi->dirty = false;
89774 +               return;
89775 +       }
89777 +       if (mi->mrec) {
89778 +               clear_rec_inuse(mi->mrec);
89779 +               mi->dirty = true;
89780 +               mi_write(mi, 0);
89781 +       }
89782 +       ntfs_mark_rec_free(sbi, rno);
89786 + * mi_insert_attr
89787 + *
89788 + * reserves space for new attribute
89789 + * returns not full constructed attribute or NULL if not possible to create
89790 + */
89791 +struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
89792 +                             const __le16 *name, u8 name_len, u32 asize,
89793 +                             u16 name_off)
89795 +       size_t tail;
89796 +       struct ATTRIB *attr;
89797 +       __le16 id;
89798 +       struct MFT_REC *rec = mi->mrec;
89799 +       struct ntfs_sb_info *sbi = mi->sbi;
89800 +       u32 used = le32_to_cpu(rec->used);
89801 +       const u16 *upcase = sbi->upcase;
89802 +       int diff;
89804 +       /* Can we insert mi attribute? */
89805 +       if (used + asize > mi->sbi->record_size)
89806 +               return NULL;
89808 +       /*
89809 +        * Scan through the list of attributes to find the point
89810 +        * at which we should insert it.
89811 +        */
89812 +       attr = NULL;
89813 +       while ((attr = mi_enum_attr(mi, attr))) {
89814 +               diff = compare_attr(attr, type, name, name_len, upcase);
89815 +               if (diff > 0)
89816 +                       break;
89817 +               if (diff < 0)
89818 +                       continue;
89820 +               if (!is_attr_indexed(attr))
89821 +                       return NULL;
89822 +               break;
89823 +       }
89825 +       if (!attr) {
89826 +               tail = 8; /* not used, just to suppress warning */
89827 +               attr = Add2Ptr(rec, used - 8);
89828 +       } else {
89829 +               tail = used - PtrOffset(rec, attr);
89830 +       }
89832 +       id = mi_new_attt_id(mi);
89834 +       memmove(Add2Ptr(attr, asize), attr, tail);
89835 +       memset(attr, 0, asize);
89837 +       attr->type = type;
89838 +       attr->size = cpu_to_le32(asize);
89839 +       attr->name_len = name_len;
89840 +       attr->name_off = cpu_to_le16(name_off);
89841 +       attr->id = id;
89843 +       memmove(Add2Ptr(attr, name_off), name, name_len * sizeof(short));
89844 +       rec->used = cpu_to_le32(used + asize);
89846 +       mi->dirty = true;
89848 +       return attr;
89852 + * mi_remove_attr
89853 + *
89854 + * removes the attribute from record
89855 + * NOTE: The source attr will point to next attribute
89856 + */
89857 +bool mi_remove_attr(struct mft_inode *mi, struct ATTRIB *attr)
89859 +       struct MFT_REC *rec = mi->mrec;
89860 +       u32 aoff = PtrOffset(rec, attr);
89861 +       u32 used = le32_to_cpu(rec->used);
89862 +       u32 asize = le32_to_cpu(attr->size);
89864 +       if (aoff + asize > used)
89865 +               return false;
89867 +       used -= asize;
89868 +       memmove(attr, Add2Ptr(attr, asize), used - aoff);
89869 +       rec->used = cpu_to_le32(used);
89870 +       mi->dirty = true;
89872 +       return true;
89875 +/* bytes = "new attribute size" - "old attribute size" */
89876 +bool mi_resize_attr(struct mft_inode *mi, struct ATTRIB *attr, int bytes)
89878 +       struct MFT_REC *rec = mi->mrec;
89879 +       u32 aoff = PtrOffset(rec, attr);
89880 +       u32 total, used = le32_to_cpu(rec->used);
89881 +       u32 nsize, asize = le32_to_cpu(attr->size);
89882 +       u32 rsize = le32_to_cpu(attr->res.data_size);
89883 +       int tail = (int)(used - aoff - asize);
89884 +       int dsize;
89885 +       char *next;
89887 +       if (tail < 0 || aoff >= used)
89888 +               return false;
89890 +       if (!bytes)
89891 +               return true;
89893 +       total = le32_to_cpu(rec->total);
89894 +       next = Add2Ptr(attr, asize);
89896 +       if (bytes > 0) {
89897 +               dsize = QuadAlign(bytes);
89898 +               if (used + dsize > total)
89899 +                       return false;
89900 +               nsize = asize + dsize;
89901 +               // move tail
89902 +               memmove(next + dsize, next, tail);
89903 +               memset(next, 0, dsize);
89904 +               used += dsize;
89905 +               rsize += dsize;
89906 +       } else {
89907 +               dsize = QuadAlign(-bytes);
89908 +               if (dsize > asize)
89909 +                       return false;
89910 +               nsize = asize - dsize;
89911 +               memmove(next - dsize, next, tail);
89912 +               used -= dsize;
89913 +               rsize -= dsize;
89914 +       }
89916 +       rec->used = cpu_to_le32(used);
89917 +       attr->size = cpu_to_le32(nsize);
89918 +       if (!attr->non_res)
89919 +               attr->res.data_size = cpu_to_le32(rsize);
89920 +       mi->dirty = true;
89922 +       return true;
89925 +int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr,
89926 +                struct runs_tree *run, CLST len)
89928 +       int err = 0;
89929 +       struct ntfs_sb_info *sbi = mi->sbi;
89930 +       u32 new_run_size;
89931 +       CLST plen;
89932 +       struct MFT_REC *rec = mi->mrec;
89933 +       CLST svcn = le64_to_cpu(attr->nres.svcn);
89934 +       u32 used = le32_to_cpu(rec->used);
89935 +       u32 aoff = PtrOffset(rec, attr);
89936 +       u32 asize = le32_to_cpu(attr->size);
89937 +       char *next = Add2Ptr(attr, asize);
89938 +       u16 run_off = le16_to_cpu(attr->nres.run_off);
89939 +       u32 run_size = asize - run_off;
89940 +       u32 tail = used - aoff - asize;
89941 +       u32 dsize = sbi->record_size - used;
89943 +       /* Make a maximum gap in current record */
89944 +       memmove(next + dsize, next, tail);
89946 +       /* Pack as much as possible */
89947 +       err = run_pack(run, svcn, len, Add2Ptr(attr, run_off), run_size + dsize,
89948 +                      &plen);
89949 +       if (err < 0) {
89950 +               memmove(next, next + dsize, tail);
89951 +               return err;
89952 +       }
89954 +       new_run_size = QuadAlign(err);
89956 +       memmove(next + new_run_size - run_size, next + dsize, tail);
89958 +       attr->size = cpu_to_le32(asize + new_run_size - run_size);
89959 +       attr->nres.evcn = cpu_to_le64(svcn + plen - 1);
89960 +       rec->used = cpu_to_le32(used + new_run_size - run_size);
89961 +       mi->dirty = true;
89963 +       return 0;
89965 diff --git a/fs/ntfs3/run.c b/fs/ntfs3/run.c
89966 new file mode 100644
89967 index 000000000000..5cdf6efe67e0
89968 --- /dev/null
89969 +++ b/fs/ntfs3/run.c
89970 @@ -0,0 +1,1111 @@
89971 +// SPDX-License-Identifier: GPL-2.0
89973 + *
89974 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
89975 + *
89976 + * TODO: try to use extents tree (instead of array)
89977 + */
89979 +#include <linux/blkdev.h>
89980 +#include <linux/buffer_head.h>
89981 +#include <linux/fs.h>
89982 +#include <linux/nls.h>
89984 +#include "debug.h"
89985 +#include "ntfs.h"
89986 +#include "ntfs_fs.h"
89988 +/* runs_tree is a continues memory. Try to avoid big size  */
89989 +#define NTFS3_RUN_MAX_BYTES 0x10000
89991 +struct ntfs_run {
89992 +       CLST vcn; /* virtual cluster number */
89993 +       CLST len; /* length in clusters */
89994 +       CLST lcn; /* logical cluster number */
89998 + * run_lookup
89999 + *
90000 + * Lookup the index of a MCB entry that is first <= vcn.
90001 + * case of success it will return non-zero value and set
90002 + * 'index' parameter to index of entry been found.
90003 + * case of entry missing from list 'index' will be set to
90004 + * point to insertion position for the entry question.
90005 + */
90006 +bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index)
90008 +       size_t min_idx, max_idx, mid_idx;
90009 +       struct ntfs_run *r;
90011 +       if (!run->count) {
90012 +               *index = 0;
90013 +               return false;
90014 +       }
90016 +       min_idx = 0;
90017 +       max_idx = run->count - 1;
90019 +       /* Check boundary cases specially, 'cause they cover the often requests */
90020 +       r = run->runs;
90021 +       if (vcn < r->vcn) {
90022 +               *index = 0;
90023 +               return false;
90024 +       }
90026 +       if (vcn < r->vcn + r->len) {
90027 +               *index = 0;
90028 +               return true;
90029 +       }
90031 +       r += max_idx;
90032 +       if (vcn >= r->vcn + r->len) {
90033 +               *index = run->count;
90034 +               return false;
90035 +       }
90037 +       if (vcn >= r->vcn) {
90038 +               *index = max_idx;
90039 +               return true;
90040 +       }
90042 +       do {
90043 +               mid_idx = min_idx + ((max_idx - min_idx) >> 1);
90044 +               r = run->runs + mid_idx;
90046 +               if (vcn < r->vcn) {
90047 +                       max_idx = mid_idx - 1;
90048 +                       if (!mid_idx)
90049 +                               break;
90050 +               } else if (vcn >= r->vcn + r->len) {
90051 +                       min_idx = mid_idx + 1;
90052 +               } else {
90053 +                       *index = mid_idx;
90054 +                       return true;
90055 +               }
90056 +       } while (min_idx <= max_idx);
90058 +       *index = max_idx + 1;
90059 +       return false;
90063 + * run_consolidate
90064 + *
90065 + * consolidate runs starting from a given one.
90066 + */
90067 +static void run_consolidate(struct runs_tree *run, size_t index)
90069 +       size_t i;
90070 +       struct ntfs_run *r = run->runs + index;
90072 +       while (index + 1 < run->count) {
90073 +               /*
90074 +                * I should merge current run with next
90075 +                * if start of the next run lies inside one being tested.
90076 +                */
90077 +               struct ntfs_run *n = r + 1;
90078 +               CLST end = r->vcn + r->len;
90079 +               CLST dl;
90081 +               /* Stop if runs are not aligned one to another. */
90082 +               if (n->vcn > end)
90083 +                       break;
90085 +               dl = end - n->vcn;
90087 +               /*
90088 +                * If range at index overlaps with next one
90089 +                * then I will either adjust it's start position
90090 +                * or (if completely matches) dust remove one from the list.
90091 +                */
90092 +               if (dl > 0) {
90093 +                       if (n->len <= dl)
90094 +                               goto remove_next_range;
90096 +                       n->len -= dl;
90097 +                       n->vcn += dl;
90098 +                       if (n->lcn != SPARSE_LCN)
90099 +                               n->lcn += dl;
90100 +                       dl = 0;
90101 +               }
90103 +               /*
90104 +                * Stop if sparse mode does not match
90105 +                * both current and next runs.
90106 +                */
90107 +               if ((n->lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) {
90108 +                       index += 1;
90109 +                       r = n;
90110 +                       continue;
90111 +               }
90113 +               /*
90114 +                * Check if volume block
90115 +                * of a next run lcn does not match
90116 +                * last volume block of the current run.
90117 +                */
90118 +               if (n->lcn != SPARSE_LCN && n->lcn != r->lcn + r->len)
90119 +                       break;
90121 +               /*
90122 +                * Next and current are siblings.
90123 +                * Eat/join.
90124 +                */
90125 +               r->len += n->len - dl;
90127 +remove_next_range:
90128 +               i = run->count - (index + 1);
90129 +               if (i > 1)
90130 +                       memmove(n, n + 1, sizeof(*n) * (i - 1));
90132 +               run->count -= 1;
90133 +       }
90136 +/* returns true if range [svcn - evcn] is mapped*/
90137 +bool run_is_mapped_full(const struct runs_tree *run, CLST svcn, CLST evcn)
90139 +       size_t i;
90140 +       const struct ntfs_run *r, *end;
90141 +       CLST next_vcn;
90143 +       if (!run_lookup(run, svcn, &i))
90144 +               return false;
90146 +       end = run->runs + run->count;
90147 +       r = run->runs + i;
90149 +       for (;;) {
90150 +               next_vcn = r->vcn + r->len;
90151 +               if (next_vcn > evcn)
90152 +                       return true;
90154 +               if (++r >= end)
90155 +                       return false;
90157 +               if (r->vcn != next_vcn)
90158 +                       return false;
90159 +       }
90162 +bool run_lookup_entry(const struct runs_tree *run, CLST vcn, CLST *lcn,
90163 +                     CLST *len, size_t *index)
90165 +       size_t idx;
90166 +       CLST gap;
90167 +       struct ntfs_run *r;
90169 +       /* Fail immediately if nrun was not touched yet. */
90170 +       if (!run->runs)
90171 +               return false;
90173 +       if (!run_lookup(run, vcn, &idx))
90174 +               return false;
90176 +       r = run->runs + idx;
90178 +       if (vcn >= r->vcn + r->len)
90179 +               return false;
90181 +       gap = vcn - r->vcn;
90182 +       if (r->len <= gap)
90183 +               return false;
90185 +       *lcn = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + gap);
90187 +       if (len)
90188 +               *len = r->len - gap;
90189 +       if (index)
90190 +               *index = idx;
90192 +       return true;
90196 + * run_truncate_head
90197 + *
90198 + * decommit the range before vcn
90199 + */
90200 +void run_truncate_head(struct runs_tree *run, CLST vcn)
90202 +       size_t index;
90203 +       struct ntfs_run *r;
90205 +       if (run_lookup(run, vcn, &index)) {
90206 +               r = run->runs + index;
90208 +               if (vcn > r->vcn) {
90209 +                       CLST dlen = vcn - r->vcn;
90211 +                       r->vcn = vcn;
90212 +                       r->len -= dlen;
90213 +                       if (r->lcn != SPARSE_LCN)
90214 +                               r->lcn += dlen;
90215 +               }
90217 +               if (!index)
90218 +                       return;
90219 +       }
90220 +       r = run->runs;
90221 +       memmove(r, r + index, sizeof(*r) * (run->count - index));
90223 +       run->count -= index;
90225 +       if (!run->count) {
90226 +               ntfs_vfree(run->runs);
90227 +               run->runs = NULL;
90228 +               run->allocated = 0;
90229 +       }
90233 + * run_truncate
90234 + *
90235 + * decommit the range after vcn
90236 + */
90237 +void run_truncate(struct runs_tree *run, CLST vcn)
90239 +       size_t index;
90241 +       /*
90242 +        * If I hit the range then
90243 +        * I have to truncate one.
90244 +        * If range to be truncated is becoming empty
90245 +        * then it will entirely be removed.
90246 +        */
90247 +       if (run_lookup(run, vcn, &index)) {
90248 +               struct ntfs_run *r = run->runs + index;
90250 +               r->len = vcn - r->vcn;
90252 +               if (r->len > 0)
90253 +                       index += 1;
90254 +       }
90256 +       /*
90257 +        * At this point 'index' is set to
90258 +        * position that should be thrown away (including index itself)
90259 +        * Simple one - just set the limit.
90260 +        */
90261 +       run->count = index;
90263 +       /* Do not reallocate array 'runs'. Only free if possible */
90264 +       if (!index) {
90265 +               ntfs_vfree(run->runs);
90266 +               run->runs = NULL;
90267 +               run->allocated = 0;
90268 +       }
90271 +/* trim head and tail if necessary*/
90272 +void run_truncate_around(struct runs_tree *run, CLST vcn)
90274 +       run_truncate_head(run, vcn);
90276 +       if (run->count >= NTFS3_RUN_MAX_BYTES / sizeof(struct ntfs_run) / 2)
90277 +               run_truncate(run, (run->runs + (run->count >> 1))->vcn);
90281 + * run_add_entry
90282 + *
90283 + * sets location to known state.
90284 + * run to be added may overlap with existing location.
90285 + * returns false if of memory
90286 + */
90287 +bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len,
90288 +                  bool is_mft)
90290 +       size_t used, index;
90291 +       struct ntfs_run *r;
90292 +       bool inrange;
90293 +       CLST tail_vcn = 0, tail_len = 0, tail_lcn = 0;
90294 +       bool should_add_tail = false;
90296 +       /*
90297 +        * Lookup the insertion point.
90298 +        *
90299 +        * Execute bsearch for the entry containing
90300 +        * start position question.
90301 +        */
90302 +       inrange = run_lookup(run, vcn, &index);
90304 +       /*
90305 +        * Shortcut here would be case of
90306 +        * range not been found but one been added
90307 +        * continues previous run.
90308 +        * this case I can directly make use of
90309 +        * existing range as my start point.
90310 +        */
90311 +       if (!inrange && index > 0) {
90312 +               struct ntfs_run *t = run->runs + index - 1;
90314 +               if (t->vcn + t->len == vcn &&
90315 +                   (t->lcn == SPARSE_LCN) == (lcn == SPARSE_LCN) &&
90316 +                   (lcn == SPARSE_LCN || lcn == t->lcn + t->len)) {
90317 +                       inrange = true;
90318 +                       index -= 1;
90319 +               }
90320 +       }
90322 +       /*
90323 +        * At this point 'index' either points to the range
90324 +        * containing start position or to the insertion position
90325 +        * for a new range.
90326 +        * So first let's check if range I'm probing is here already.
90327 +        */
90328 +       if (!inrange) {
90329 +requires_new_range:
90330 +               /*
90331 +                * Range was not found.
90332 +                * Insert at position 'index'
90333 +                */
90334 +               used = run->count * sizeof(struct ntfs_run);
90336 +               /*
90337 +                * Check allocated space.
90338 +                * If one is not enough to get one more entry
90339 +                * then it will be reallocated
90340 +                */
90341 +               if (run->allocated < used + sizeof(struct ntfs_run)) {
90342 +                       size_t bytes;
90343 +                       struct ntfs_run *new_ptr;
90345 +                       /* Use power of 2 for 'bytes'*/
90346 +                       if (!used) {
90347 +                               bytes = 64;
90348 +                       } else if (used <= 16 * PAGE_SIZE) {
90349 +                               if (is_power_of2(run->allocated))
90350 +                                       bytes = run->allocated << 1;
90351 +                               else
90352 +                                       bytes = (size_t)1
90353 +                                               << (2 + blksize_bits(used));
90354 +                       } else {
90355 +                               bytes = run->allocated + (16 * PAGE_SIZE);
90356 +                       }
90358 +                       WARN_ON(!is_mft && bytes > NTFS3_RUN_MAX_BYTES);
90360 +                       new_ptr = ntfs_vmalloc(bytes);
90362 +                       if (!new_ptr)
90363 +                               return false;
90365 +                       r = new_ptr + index;
90366 +                       memcpy(new_ptr, run->runs,
90367 +                              index * sizeof(struct ntfs_run));
90368 +                       memcpy(r + 1, run->runs + index,
90369 +                              sizeof(struct ntfs_run) * (run->count - index));
90371 +                       ntfs_vfree(run->runs);
90372 +                       run->runs = new_ptr;
90373 +                       run->allocated = bytes;
90375 +               } else {
90376 +                       size_t i = run->count - index;
90378 +                       r = run->runs + index;
90380 +                       /* memmove appears to be a bottle neck here... */
90381 +                       if (i > 0)
90382 +                               memmove(r + 1, r, sizeof(struct ntfs_run) * i);
90383 +               }
90385 +               r->vcn = vcn;
90386 +               r->lcn = lcn;
90387 +               r->len = len;
90388 +               run->count += 1;
90389 +       } else {
90390 +               r = run->runs + index;
90392 +               /*
90393 +                * If one of ranges was not allocated
90394 +                * then I have to split location I just matched.
90395 +                * and insert current one
90396 +                * a common case this requires tail to be reinserted
90397 +                * a recursive call.
90398 +                */
90399 +               if (((lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) ||
90400 +                   (lcn != SPARSE_LCN && lcn != r->lcn + (vcn - r->vcn))) {
90401 +                       CLST to_eat = vcn - r->vcn;
90402 +                       CLST Tovcn = to_eat + len;
90404 +                       should_add_tail = Tovcn < r->len;
90406 +                       if (should_add_tail) {
90407 +                               tail_lcn = r->lcn == SPARSE_LCN
90408 +                                                  ? SPARSE_LCN
90409 +                                                  : (r->lcn + Tovcn);
90410 +                               tail_vcn = r->vcn + Tovcn;
90411 +                               tail_len = r->len - Tovcn;
90412 +                       }
90414 +                       if (to_eat > 0) {
90415 +                               r->len = to_eat;
90416 +                               inrange = false;
90417 +                               index += 1;
90418 +                               goto requires_new_range;
90419 +                       }
90421 +                       /* lcn should match one I'm going to add. */
90422 +                       r->lcn = lcn;
90423 +               }
90425 +               /*
90426 +                * If existing range fits then I'm done.
90427 +                * Otherwise extend found one and fall back to range jocode.
90428 +                */
90429 +               if (r->vcn + r->len < vcn + len)
90430 +                       r->len += len - ((r->vcn + r->len) - vcn);
90431 +       }
90433 +       /*
90434 +        * And normalize it starting from insertion point.
90435 +        * It's possible that no insertion needed case if
90436 +        * start point lies within the range of an entry
90437 +        * that 'index' points to.
90438 +        */
90439 +       if (inrange && index > 0)
90440 +               index -= 1;
90441 +       run_consolidate(run, index);
90442 +       run_consolidate(run, index + 1);
90444 +       /*
90445 +        * a special case
90446 +        * I have to add extra range a tail.
90447 +        */
90448 +       if (should_add_tail &&
90449 +           !run_add_entry(run, tail_vcn, tail_lcn, tail_len, is_mft))
90450 +               return false;
90452 +       return true;
90455 +/*helper for attr_collapse_range, which is helper for fallocate(collapse_range)*/
90456 +bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
90458 +       size_t index, eat;
90459 +       struct ntfs_run *r, *e, *eat_start, *eat_end;
90460 +       CLST end;
90462 +       if (WARN_ON(!run_lookup(run, vcn, &index)))
90463 +               return true; /* should never be here */
90465 +       e = run->runs + run->count;
90466 +       r = run->runs + index;
90467 +       end = vcn + len;
90469 +       if (vcn > r->vcn) {
90470 +               if (r->vcn + r->len <= end) {
90471 +                       /* collapse tail of run */
90472 +                       r->len = vcn - r->vcn;
90473 +               } else if (r->lcn == SPARSE_LCN) {
90474 +                       /* collapse a middle part of sparsed run */
90475 +                       r->len -= len;
90476 +               } else {
90477 +                       /* collapse a middle part of normal run, split */
90478 +                       if (!run_add_entry(run, vcn, SPARSE_LCN, len, false))
90479 +                               return false;
90480 +                       return run_collapse_range(run, vcn, len);
90481 +               }
90483 +               r += 1;
90484 +       }
90486 +       eat_start = r;
90487 +       eat_end = r;
90489 +       for (; r < e; r++) {
90490 +               CLST d;
90492 +               if (r->vcn >= end) {
90493 +                       r->vcn -= len;
90494 +                       continue;
90495 +               }
90497 +               if (r->vcn + r->len <= end) {
90498 +                       /* eat this run */
90499 +                       eat_end = r + 1;
90500 +                       continue;
90501 +               }
90503 +               d = end - r->vcn;
90504 +               if (r->lcn != SPARSE_LCN)
90505 +                       r->lcn += d;
90506 +               r->len -= d;
90507 +               r->vcn -= len - d;
90508 +       }
90510 +       eat = eat_end - eat_start;
90511 +       memmove(eat_start, eat_end, (e - eat_end) * sizeof(*r));
90512 +       run->count -= eat;
90514 +       return true;
90518 + * run_get_entry
90519 + *
90520 + * returns index-th mapped region
90521 + */
90522 +bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn,
90523 +                  CLST *lcn, CLST *len)
90525 +       const struct ntfs_run *r;
90527 +       if (index >= run->count)
90528 +               return false;
90530 +       r = run->runs + index;
90532 +       if (!r->len)
90533 +               return false;
90535 +       if (vcn)
90536 +               *vcn = r->vcn;
90537 +       if (lcn)
90538 +               *lcn = r->lcn;
90539 +       if (len)
90540 +               *len = r->len;
90541 +       return true;
90545 + * run_packed_size
90546 + *
90547 + * calculates the size of packed int64
90548 + */
90549 +#ifdef __BIG_ENDIAN
90550 +static inline int run_packed_size(const s64 n)
90552 +       const u8 *p = (const u8 *)&n + sizeof(n) - 1;
90554 +       if (n >= 0) {
90555 +               if (p[-7] || p[-6] || p[-5] || p[-4])
90556 +                       p -= 4;
90557 +               if (p[-3] || p[-2])
90558 +                       p -= 2;
90559 +               if (p[-1])
90560 +                       p -= 1;
90561 +               if (p[0] & 0x80)
90562 +                       p -= 1;
90563 +       } else {
90564 +               if (p[-7] != 0xff || p[-6] != 0xff || p[-5] != 0xff ||
90565 +                   p[-4] != 0xff)
90566 +                       p -= 4;
90567 +               if (p[-3] != 0xff || p[-2] != 0xff)
90568 +                       p -= 2;
90569 +               if (p[-1] != 0xff)
90570 +                       p -= 1;
90571 +               if (!(p[0] & 0x80))
90572 +                       p -= 1;
90573 +       }
90574 +       return (const u8 *)&n + sizeof(n) - p;
90577 +/* full trusted function. It does not check 'size' for errors */
90578 +static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v)
90580 +       const u8 *p = (u8 *)&v;
90582 +       switch (size) {
90583 +       case 8:
90584 +               run_buf[7] = p[0];
90585 +               fallthrough;
90586 +       case 7:
90587 +               run_buf[6] = p[1];
90588 +               fallthrough;
90589 +       case 6:
90590 +               run_buf[5] = p[2];
90591 +               fallthrough;
90592 +       case 5:
90593 +               run_buf[4] = p[3];
90594 +               fallthrough;
90595 +       case 4:
90596 +               run_buf[3] = p[4];
90597 +               fallthrough;
90598 +       case 3:
90599 +               run_buf[2] = p[5];
90600 +               fallthrough;
90601 +       case 2:
90602 +               run_buf[1] = p[6];
90603 +               fallthrough;
90604 +       case 1:
90605 +               run_buf[0] = p[7];
90606 +       }
90609 +/* full trusted function. It does not check 'size' for errors */
90610 +static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v)
90612 +       u8 *p = (u8 *)&v;
90614 +       switch (size) {
90615 +       case 8:
90616 +               p[0] = run_buf[7];
90617 +               fallthrough;
90618 +       case 7:
90619 +               p[1] = run_buf[6];
90620 +               fallthrough;
90621 +       case 6:
90622 +               p[2] = run_buf[5];
90623 +               fallthrough;
90624 +       case 5:
90625 +               p[3] = run_buf[4];
90626 +               fallthrough;
90627 +       case 4:
90628 +               p[4] = run_buf[3];
90629 +               fallthrough;
90630 +       case 3:
90631 +               p[5] = run_buf[2];
90632 +               fallthrough;
90633 +       case 2:
90634 +               p[6] = run_buf[1];
90635 +               fallthrough;
90636 +       case 1:
90637 +               p[7] = run_buf[0];
90638 +       }
90639 +       return v;
90642 +#else
90644 +static inline int run_packed_size(const s64 n)
90646 +       const u8 *p = (const u8 *)&n;
90648 +       if (n >= 0) {
90649 +               if (p[7] || p[6] || p[5] || p[4])
90650 +                       p += 4;
90651 +               if (p[3] || p[2])
90652 +                       p += 2;
90653 +               if (p[1])
90654 +                       p += 1;
90655 +               if (p[0] & 0x80)
90656 +                       p += 1;
90657 +       } else {
90658 +               if (p[7] != 0xff || p[6] != 0xff || p[5] != 0xff ||
90659 +                   p[4] != 0xff)
90660 +                       p += 4;
90661 +               if (p[3] != 0xff || p[2] != 0xff)
90662 +                       p += 2;
90663 +               if (p[1] != 0xff)
90664 +                       p += 1;
90665 +               if (!(p[0] & 0x80))
90666 +                       p += 1;
90667 +       }
90669 +       return 1 + p - (const u8 *)&n;
90672 +/* full trusted function. It does not check 'size' for errors */
90673 +static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v)
90675 +       const u8 *p = (u8 *)&v;
90677 +       /* memcpy( run_buf, &v, size); is it faster? */
90678 +       switch (size) {
90679 +       case 8:
90680 +               run_buf[7] = p[7];
90681 +               fallthrough;
90682 +       case 7:
90683 +               run_buf[6] = p[6];
90684 +               fallthrough;
90685 +       case 6:
90686 +               run_buf[5] = p[5];
90687 +               fallthrough;
90688 +       case 5:
90689 +               run_buf[4] = p[4];
90690 +               fallthrough;
90691 +       case 4:
90692 +               run_buf[3] = p[3];
90693 +               fallthrough;
90694 +       case 3:
90695 +               run_buf[2] = p[2];
90696 +               fallthrough;
90697 +       case 2:
90698 +               run_buf[1] = p[1];
90699 +               fallthrough;
90700 +       case 1:
90701 +               run_buf[0] = p[0];
90702 +       }
90705 +/* full trusted function. It does not check 'size' for errors */
90706 +static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v)
90708 +       u8 *p = (u8 *)&v;
90710 +       /* memcpy( &v, run_buf, size); is it faster? */
90711 +       switch (size) {
90712 +       case 8:
90713 +               p[7] = run_buf[7];
90714 +               fallthrough;
90715 +       case 7:
90716 +               p[6] = run_buf[6];
90717 +               fallthrough;
90718 +       case 6:
90719 +               p[5] = run_buf[5];
90720 +               fallthrough;
90721 +       case 5:
90722 +               p[4] = run_buf[4];
90723 +               fallthrough;
90724 +       case 4:
90725 +               p[3] = run_buf[3];
90726 +               fallthrough;
90727 +       case 3:
90728 +               p[2] = run_buf[2];
90729 +               fallthrough;
90730 +       case 2:
90731 +               p[1] = run_buf[1];
90732 +               fallthrough;
90733 +       case 1:
90734 +               p[0] = run_buf[0];
90735 +       }
90736 +       return v;
90738 +#endif
90741 + * run_pack
90742 + *
90743 + * packs runs into buffer
90744 + * packed_vcns - how much runs we have packed
90745 + * packed_size - how much bytes we have used run_buf
90746 + */
90747 +int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
90748 +            u32 run_buf_size, CLST *packed_vcns)
90750 +       CLST next_vcn, vcn, lcn;
90751 +       CLST prev_lcn = 0;
90752 +       CLST evcn1 = svcn + len;
90753 +       int packed_size = 0;
90754 +       size_t i;
90755 +       bool ok;
90756 +       s64 dlcn;
90757 +       int offset_size, size_size, tmp;
90759 +       next_vcn = vcn = svcn;
90761 +       *packed_vcns = 0;
90763 +       if (!len)
90764 +               goto out;
90766 +       ok = run_lookup_entry(run, vcn, &lcn, &len, &i);
90768 +       if (!ok)
90769 +               goto error;
90771 +       if (next_vcn != vcn)
90772 +               goto error;
90774 +       for (;;) {
90775 +               next_vcn = vcn + len;
90776 +               if (next_vcn > evcn1)
90777 +                       len = evcn1 - vcn;
90779 +               /* how much bytes required to pack len */
90780 +               size_size = run_packed_size(len);
90782 +               /* offset_size - how much bytes is packed dlcn */
90783 +               if (lcn == SPARSE_LCN) {
90784 +                       offset_size = 0;
90785 +                       dlcn = 0;
90786 +               } else {
90787 +                       /* NOTE: lcn can be less than prev_lcn! */
90788 +                       dlcn = (s64)lcn - prev_lcn;
90789 +                       offset_size = run_packed_size(dlcn);
90790 +                       prev_lcn = lcn;
90791 +               }
90793 +               tmp = run_buf_size - packed_size - 2 - offset_size;
90794 +               if (tmp <= 0)
90795 +                       goto out;
90797 +               /* can we store this entire run */
90798 +               if (tmp < size_size)
90799 +                       goto out;
90801 +               if (run_buf) {
90802 +                       /* pack run header */
90803 +                       run_buf[0] = ((u8)(size_size | (offset_size << 4)));
90804 +                       run_buf += 1;
90806 +                       /* Pack the length of run */
90807 +                       run_pack_s64(run_buf, size_size, len);
90809 +                       run_buf += size_size;
90810 +                       /* Pack the offset from previous lcn */
90811 +                       run_pack_s64(run_buf, offset_size, dlcn);
90812 +                       run_buf += offset_size;
90813 +               }
90815 +               packed_size += 1 + offset_size + size_size;
90816 +               *packed_vcns += len;
90818 +               if (packed_size + 1 >= run_buf_size || next_vcn >= evcn1)
90819 +                       goto out;
90821 +               ok = run_get_entry(run, ++i, &vcn, &lcn, &len);
90822 +               if (!ok)
90823 +                       goto error;
90825 +               if (next_vcn != vcn)
90826 +                       goto error;
90827 +       }
90829 +out:
90830 +       /* Store last zero */
90831 +       if (run_buf)
90832 +               run_buf[0] = 0;
90834 +       return packed_size + 1;
90836 +error:
90837 +       return -EOPNOTSUPP;
90841 + * run_unpack
90842 + *
90843 + * unpacks packed runs from "run_buf"
90844 + * returns error, if negative, or real used bytes
90845 + */
90846 +int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
90847 +              CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
90848 +              u32 run_buf_size)
90850 +       u64 prev_lcn, vcn64, lcn, next_vcn;
90851 +       const u8 *run_last, *run_0;
90852 +       bool is_mft = ino == MFT_REC_MFT;
90854 +       /* Check for empty */
90855 +       if (evcn + 1 == svcn)
90856 +               return 0;
90858 +       if (evcn < svcn)
90859 +               return -EINVAL;
90861 +       run_0 = run_buf;
90862 +       run_last = run_buf + run_buf_size;
90863 +       prev_lcn = 0;
90864 +       vcn64 = svcn;
90866 +       /* Read all runs the chain */
90867 +       /* size_size - how much bytes is packed len */
90868 +       while (run_buf < run_last) {
90869 +               /* size_size - how much bytes is packed len */
90870 +               u8 size_size = *run_buf & 0xF;
90871 +               /* offset_size - how much bytes is packed dlcn */
90872 +               u8 offset_size = *run_buf++ >> 4;
90873 +               u64 len;
90875 +               if (!size_size)
90876 +                       break;
90878 +               /*
90879 +                * Unpack runs.
90880 +                * NOTE: runs are stored little endian order
90881 +                * "len" is unsigned value, "dlcn" is signed
90882 +                * Large positive number requires to store 5 bytes
90883 +                * e.g.: 05 FF 7E FF FF 00 00 00
90884 +                */
90885 +               if (size_size > 8)
90886 +                       return -EINVAL;
90888 +               len = run_unpack_s64(run_buf, size_size, 0);
90889 +               /* skip size_size */
90890 +               run_buf += size_size;
90892 +               if (!len)
90893 +                       return -EINVAL;
90895 +               if (!offset_size)
90896 +                       lcn = SPARSE_LCN64;
90897 +               else if (offset_size <= 8) {
90898 +                       s64 dlcn;
90900 +                       /* initial value of dlcn is -1 or 0 */
90901 +                       dlcn = (run_buf[offset_size - 1] & 0x80) ? (s64)-1 : 0;
90902 +                       dlcn = run_unpack_s64(run_buf, offset_size, dlcn);
90903 +                       /* skip offset_size */
90904 +                       run_buf += offset_size;
90906 +                       if (!dlcn)
90907 +                               return -EINVAL;
90908 +                       lcn = prev_lcn + dlcn;
90909 +                       prev_lcn = lcn;
90910 +               } else
90911 +                       return -EINVAL;
90913 +               next_vcn = vcn64 + len;
90914 +               /* check boundary */
90915 +               if (next_vcn > evcn + 1)
90916 +                       return -EINVAL;
90918 +#ifndef CONFIG_NTFS3_64BIT_CLUSTER
90919 +               if (next_vcn > 0x100000000ull || (lcn + len) > 0x100000000ull) {
90920 +                       ntfs_err(
90921 +                               sbi->sb,
90922 +                               "This driver is compiled whitout CONFIG_NTFS3_64BIT_CLUSTER (like windows driver).\n"
90923 +                               "Volume contains 64 bits run: vcn %llx, lcn %llx, len %llx.\n"
90924 +                               "Activate CONFIG_NTFS3_64BIT_CLUSTER to process this case",
90925 +                               vcn64, lcn, len);
90926 +                       return -EOPNOTSUPP;
90927 +               }
90928 +#endif
90929 +               if (lcn != SPARSE_LCN64 && lcn + len > sbi->used.bitmap.nbits) {
90930 +                       /* lcn range is out of volume */
90931 +                       return -EINVAL;
90932 +               }
90934 +               if (!run)
90935 +                       ; /* called from check_attr(fslog.c) to check run */
90936 +               else if (run == RUN_DEALLOCATE) {
90937 +                       /* called from ni_delete_all to free clusters without storing in run */
90938 +                       if (lcn != SPARSE_LCN64)
90939 +                               mark_as_free_ex(sbi, lcn, len, true);
90940 +               } else if (vcn64 >= vcn) {
90941 +                       if (!run_add_entry(run, vcn64, lcn, len, is_mft))
90942 +                               return -ENOMEM;
90943 +               } else if (next_vcn > vcn) {
90944 +                       u64 dlen = vcn - vcn64;
90946 +                       if (!run_add_entry(run, vcn, lcn + dlen, len - dlen,
90947 +                                          is_mft))
90948 +                               return -ENOMEM;
90949 +               }
90951 +               vcn64 = next_vcn;
90952 +       }
90954 +       if (vcn64 != evcn + 1) {
90955 +               /* not expected length of unpacked runs */
90956 +               return -EINVAL;
90957 +       }
90959 +       return run_buf - run_0;
90962 +#ifdef NTFS3_CHECK_FREE_CLST
90964 + * run_unpack_ex
90965 + *
90966 + * unpacks packed runs from "run_buf"
90967 + * checks unpacked runs to be used in bitmap
90968 + * returns error, if negative, or real used bytes
90969 + */
90970 +int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
90971 +                 CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
90972 +                 u32 run_buf_size)
90974 +       int ret, err;
90975 +       CLST next_vcn, lcn, len;
90976 +       size_t index;
90977 +       bool ok;
90978 +       struct wnd_bitmap *wnd;
90980 +       ret = run_unpack(run, sbi, ino, svcn, evcn, vcn, run_buf, run_buf_size);
90981 +       if (ret <= 0)
90982 +               return ret;
90984 +       if (!sbi->used.bitmap.sb || !run || run == RUN_DEALLOCATE)
90985 +               return ret;
90987 +       if (ino == MFT_REC_BADCLUST)
90988 +               return ret;
90990 +       next_vcn = vcn = svcn;
90991 +       wnd = &sbi->used.bitmap;
90993 +       for (ok = run_lookup_entry(run, vcn, &lcn, &len, &index);
90994 +            next_vcn <= evcn;
90995 +            ok = run_get_entry(run, ++index, &vcn, &lcn, &len)) {
90996 +               if (!ok || next_vcn != vcn)
90997 +                       return -EINVAL;
90999 +               next_vcn = vcn + len;
91001 +               if (lcn == SPARSE_LCN)
91002 +                       continue;
91004 +               if (sbi->flags & NTFS_FLAGS_NEED_REPLAY)
91005 +                       continue;
91007 +               down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
91008 +               /* Check for free blocks */
91009 +               ok = wnd_is_used(wnd, lcn, len);
91010 +               up_read(&wnd->rw_lock);
91011 +               if (ok)
91012 +                       continue;
91014 +               /* Looks like volume is corrupted */
91015 +               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
91017 +               if (down_write_trylock(&wnd->rw_lock)) {
91018 +                       /* mark all zero bits as used in range [lcn, lcn+len) */
91019 +                       CLST i, lcn_f = 0, len_f = 0;
91021 +                       err = 0;
91022 +                       for (i = 0; i < len; i++) {
91023 +                               if (wnd_is_free(wnd, lcn + i, 1)) {
91024 +                                       if (!len_f)
91025 +                                               lcn_f = lcn + i;
91026 +                                       len_f += 1;
91027 +                               } else if (len_f) {
91028 +                                       err = wnd_set_used(wnd, lcn_f, len_f);
91029 +                                       len_f = 0;
91030 +                                       if (err)
91031 +                                               break;
91032 +                               }
91033 +                       }
91035 +                       if (len_f)
91036 +                               err = wnd_set_used(wnd, lcn_f, len_f);
91038 +                       up_write(&wnd->rw_lock);
91039 +                       if (err)
91040 +                               return err;
91041 +               }
91042 +       }
91044 +       return ret;
91046 +#endif
91049 + * run_get_highest_vcn
91050 + *
91051 + * returns the highest vcn from a mapping pairs array
91052 + * it used while replaying log file
91053 + */
91054 +int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn)
91056 +       u64 vcn64 = vcn;
91057 +       u8 size_size;
91059 +       while ((size_size = *run_buf & 0xF)) {
91060 +               u8 offset_size = *run_buf++ >> 4;
91061 +               u64 len;
91063 +               if (size_size > 8 || offset_size > 8)
91064 +                       return -EINVAL;
91066 +               len = run_unpack_s64(run_buf, size_size, 0);
91067 +               if (!len)
91068 +                       return -EINVAL;
91070 +               run_buf += size_size + offset_size;
91071 +               vcn64 += len;
91073 +#ifndef CONFIG_NTFS3_64BIT_CLUSTER
91074 +               if (vcn64 > 0x100000000ull)
91075 +                       return -EINVAL;
91076 +#endif
91077 +       }
91079 +       *highest_vcn = vcn64 - 1;
91080 +       return 0;
91082 diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
91083 new file mode 100644
91084 index 000000000000..c563431248bf
91085 --- /dev/null
91086 +++ b/fs/ntfs3/super.c
91087 @@ -0,0 +1,1500 @@
91088 +// SPDX-License-Identifier: GPL-2.0
91090 + *
91091 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
91092 + *
91093 + *
91094 + *                 terminology
91095 + *
91096 + * cluster - allocation unit     - 512,1K,2K,4K,...,2M
91097 + * vcn - virtual cluster number  - offset inside the file in clusters
91098 + * vbo - virtual byte offset     - offset inside the file in bytes
91099 + * lcn - logical cluster number  - 0 based cluster in clusters heap
91100 + * lbo - logical byte offset     - absolute position inside volume
91101 + * run - maps vcn to lcn         - stored in attributes in packed form
91102 + * attr - attribute segment      - std/name/data etc records inside MFT
91103 + * mi  - mft inode               - one MFT record(usually 1024 bytes or 4K), consists of attributes
91104 + * ni  - ntfs inode              - extends linux inode. consists of one or more mft inodes
91105 + * index - unit inside directory - 2K, 4K, <=page size, does not depend on cluster size
91106 + *
91107 + * TODO: Implement
91108 + * https://docs.microsoft.com/en-us/windows/wsl/file-permissions
91109 + */
91111 +#include <linux/backing-dev.h>
91112 +#include <linux/blkdev.h>
91113 +#include <linux/buffer_head.h>
91114 +#include <linux/exportfs.h>
91115 +#include <linux/fs.h>
91116 +#include <linux/iversion.h>
91117 +#include <linux/module.h>
91118 +#include <linux/nls.h>
91119 +#include <linux/parser.h>
91120 +#include <linux/seq_file.h>
91121 +#include <linux/statfs.h>
91123 +#include "debug.h"
91124 +#include "ntfs.h"
91125 +#include "ntfs_fs.h"
91126 +#ifdef CONFIG_NTFS3_LZX_XPRESS
91127 +#include "lib/lib.h"
91128 +#endif
91130 +#ifdef CONFIG_PRINTK
91132 + * Trace warnings/notices/errors
91133 + * Thanks Joe Perches <joe@perches.com> for implementation
91134 + */
91135 +void ntfs_printk(const struct super_block *sb, const char *fmt, ...)
91137 +       struct va_format vaf;
91138 +       va_list args;
91139 +       int level;
91140 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
91142 +       /*should we use different ratelimits for warnings/notices/errors? */
91143 +       if (!___ratelimit(&sbi->msg_ratelimit, "ntfs3"))
91144 +               return;
91146 +       va_start(args, fmt);
91148 +       level = printk_get_level(fmt);
91149 +       vaf.fmt = printk_skip_level(fmt);
91150 +       vaf.va = &args;
91151 +       printk("%c%cntfs3: %s: %pV\n", KERN_SOH_ASCII, level, sb->s_id, &vaf);
91153 +       va_end(args);
91156 +static char s_name_buf[512];
91157 +static atomic_t s_name_buf_cnt = ATOMIC_INIT(1); // 1 means 'free s_name_buf'
91159 +/* print warnings/notices/errors about inode using name or inode number */
91160 +void ntfs_inode_printk(struct inode *inode, const char *fmt, ...)
91162 +       struct super_block *sb = inode->i_sb;
91163 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
91164 +       char *name;
91165 +       va_list args;
91166 +       struct va_format vaf;
91167 +       int level;
91169 +       if (!___ratelimit(&sbi->msg_ratelimit, "ntfs3"))
91170 +               return;
91172 +       /* use static allocated buffer, if possible */
91173 +       name = atomic_dec_and_test(&s_name_buf_cnt)
91174 +                      ? s_name_buf
91175 +                      : kmalloc(sizeof(s_name_buf), GFP_NOFS);
91177 +       if (name) {
91178 +               struct dentry *de = d_find_alias(inode);
91179 +               const u32 name_len = ARRAY_SIZE(s_name_buf) - 1;
91181 +               if (de) {
91182 +                       spin_lock(&de->d_lock);
91183 +                       snprintf(name, name_len, " \"%s\"", de->d_name.name);
91184 +                       spin_unlock(&de->d_lock);
91185 +                       name[name_len] = 0; /* to be sure*/
91186 +               } else {
91187 +                       name[0] = 0;
91188 +               }
91189 +               dput(de); /* cocci warns if placed in branch "if (de)" */
91190 +       }
91192 +       va_start(args, fmt);
91194 +       level = printk_get_level(fmt);
91195 +       vaf.fmt = printk_skip_level(fmt);
91196 +       vaf.va = &args;
91198 +       printk("%c%cntfs3: %s: ino=%lx,%s %pV\n", KERN_SOH_ASCII, level,
91199 +              sb->s_id, inode->i_ino, name ? name : "", &vaf);
91201 +       va_end(args);
91203 +       atomic_inc(&s_name_buf_cnt);
91204 +       if (name != s_name_buf)
91205 +               kfree(name);
91207 +#endif
91210 + * Shared memory struct.
91211 + *
91212 + * on-disk ntfs's upcase table is created by ntfs formater
91213 + * 'upcase' table is 128K bytes of memory
91214 + * we should read it into memory when mounting
91215 + * Several ntfs volumes likely use the same 'upcase' table
91216 + * It is good idea to share in-memory 'upcase' table between different volumes
91217 + * Unfortunately winxp/vista/win7 use different upcase tables
91218 + */
91219 +static DEFINE_SPINLOCK(s_shared_lock);
91221 +static struct {
91222 +       void *ptr;
91223 +       u32 len;
91224 +       int cnt;
91225 +} s_shared[8];
91228 + * ntfs_set_shared
91229 + *
91230 + * Returns 'ptr' if pointer was saved in shared memory
91231 + * Returns NULL if pointer was not shared
91232 + */
91233 +void *ntfs_set_shared(void *ptr, u32 bytes)
91235 +       void *ret = NULL;
91236 +       int i, j = -1;
91238 +       spin_lock(&s_shared_lock);
91239 +       for (i = 0; i < ARRAY_SIZE(s_shared); i++) {
91240 +               if (!s_shared[i].cnt) {
91241 +                       j = i;
91242 +               } else if (bytes == s_shared[i].len &&
91243 +                          !memcmp(s_shared[i].ptr, ptr, bytes)) {
91244 +                       s_shared[i].cnt += 1;
91245 +                       ret = s_shared[i].ptr;
91246 +                       break;
91247 +               }
91248 +       }
91250 +       if (!ret && j != -1) {
91251 +               s_shared[j].ptr = ptr;
91252 +               s_shared[j].len = bytes;
91253 +               s_shared[j].cnt = 1;
91254 +               ret = ptr;
91255 +       }
91256 +       spin_unlock(&s_shared_lock);
91258 +       return ret;
91262 + * ntfs_put_shared
91263 + *
91264 + * Returns 'ptr' if pointer is not shared anymore
91265 + * Returns NULL if pointer is still shared
91266 + */
91267 +void *ntfs_put_shared(void *ptr)
91269 +       void *ret = ptr;
91270 +       int i;
91272 +       spin_lock(&s_shared_lock);
91273 +       for (i = 0; i < ARRAY_SIZE(s_shared); i++) {
91274 +               if (s_shared[i].cnt && s_shared[i].ptr == ptr) {
91275 +                       if (--s_shared[i].cnt)
91276 +                               ret = NULL;
91277 +                       break;
91278 +               }
91279 +       }
91280 +       spin_unlock(&s_shared_lock);
91282 +       return ret;
91285 +static inline void clear_mount_options(struct ntfs_mount_options *options)
91287 +       unload_nls(options->nls);
91290 +enum Opt {
91291 +       Opt_uid,
91292 +       Opt_gid,
91293 +       Opt_umask,
91294 +       Opt_dmask,
91295 +       Opt_fmask,
91296 +       Opt_immutable,
91297 +       Opt_discard,
91298 +       Opt_force,
91299 +       Opt_sparse,
91300 +       Opt_nohidden,
91301 +       Opt_showmeta,
91302 +       Opt_acl,
91303 +       Opt_noatime,
91304 +       Opt_nls,
91305 +       Opt_prealloc,
91306 +       Opt_no_acs_rules,
91307 +       Opt_err,
91310 +static const match_table_t ntfs_tokens = {
91311 +       { Opt_uid, "uid=%u" },
91312 +       { Opt_gid, "gid=%u" },
91313 +       { Opt_umask, "umask=%o" },
91314 +       { Opt_dmask, "dmask=%o" },
91315 +       { Opt_fmask, "fmask=%o" },
91316 +       { Opt_immutable, "sys_immutable" },
91317 +       { Opt_discard, "discard" },
91318 +       { Opt_force, "force" },
91319 +       { Opt_sparse, "sparse" },
91320 +       { Opt_nohidden, "nohidden" },
91321 +       { Opt_acl, "acl" },
91322 +       { Opt_noatime, "noatime" },
91323 +       { Opt_showmeta, "showmeta" },
91324 +       { Opt_nls, "nls=%s" },
91325 +       { Opt_prealloc, "prealloc" },
91326 +       { Opt_no_acs_rules, "no_acs_rules" },
91327 +       { Opt_err, NULL },
91330 +static noinline int ntfs_parse_options(struct super_block *sb, char *options,
91331 +                                      int silent,
91332 +                                      struct ntfs_mount_options *opts)
91334 +       char *p;
91335 +       substring_t args[MAX_OPT_ARGS];
91336 +       int option;
91337 +       char nls_name[30];
91338 +       struct nls_table *nls;
91340 +       opts->fs_uid = current_uid();
91341 +       opts->fs_gid = current_gid();
91342 +       opts->fs_fmask_inv = opts->fs_dmask_inv = ~current_umask();
91343 +       nls_name[0] = 0;
91345 +       if (!options)
91346 +               goto out;
91348 +       while ((p = strsep(&options, ","))) {
91349 +               int token;
91351 +               if (!*p)
91352 +                       continue;
91354 +               token = match_token(p, ntfs_tokens, args);
91355 +               switch (token) {
91356 +               case Opt_immutable:
91357 +                       opts->sys_immutable = 1;
91358 +                       break;
91359 +               case Opt_uid:
91360 +                       if (match_int(&args[0], &option))
91361 +                               return -EINVAL;
91362 +                       opts->fs_uid = make_kuid(current_user_ns(), option);
91363 +                       if (!uid_valid(opts->fs_uid))
91364 +                               return -EINVAL;
91365 +                       opts->uid = 1;
91366 +                       break;
91367 +               case Opt_gid:
91368 +                       if (match_int(&args[0], &option))
91369 +                               return -EINVAL;
91370 +                       opts->fs_gid = make_kgid(current_user_ns(), option);
91371 +                       if (!gid_valid(opts->fs_gid))
91372 +                               return -EINVAL;
91373 +                       opts->gid = 1;
91374 +                       break;
91375 +               case Opt_umask:
91376 +                       if (match_octal(&args[0], &option))
91377 +                               return -EINVAL;
91378 +                       opts->fs_fmask_inv = opts->fs_dmask_inv = ~option;
91379 +                       opts->fmask = opts->dmask = 1;
91380 +                       break;
91381 +               case Opt_dmask:
91382 +                       if (match_octal(&args[0], &option))
91383 +                               return -EINVAL;
91384 +                       opts->fs_dmask_inv = ~option;
91385 +                       opts->dmask = 1;
91386 +                       break;
91387 +               case Opt_fmask:
91388 +                       if (match_octal(&args[0], &option))
91389 +                               return -EINVAL;
91390 +                       opts->fs_fmask_inv = ~option;
91391 +                       opts->fmask = 1;
91392 +                       break;
91393 +               case Opt_discard:
91394 +                       opts->discard = 1;
91395 +                       break;
91396 +               case Opt_force:
91397 +                       opts->force = 1;
91398 +                       break;
91399 +               case Opt_sparse:
91400 +                       opts->sparse = 1;
91401 +                       break;
91402 +               case Opt_nohidden:
91403 +                       opts->nohidden = 1;
91404 +                       break;
91405 +               case Opt_acl:
91406 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
91407 +                       sb->s_flags |= SB_POSIXACL;
91408 +                       break;
91409 +#else
91410 +                       ntfs_err(sb, "support for ACL not compiled in!");
91411 +                       return -EINVAL;
91412 +#endif
91413 +               case Opt_noatime:
91414 +                       sb->s_flags |= SB_NOATIME;
91415 +                       break;
91416 +               case Opt_showmeta:
91417 +                       opts->showmeta = 1;
91418 +                       break;
91419 +               case Opt_nls:
91420 +                       match_strlcpy(nls_name, &args[0], sizeof(nls_name));
91421 +                       break;
91422 +               case Opt_prealloc:
91423 +                       opts->prealloc = 1;
91424 +                       break;
91425 +               case Opt_no_acs_rules:
91426 +                       opts->no_acs_rules = 1;
91427 +                       break;
91428 +               default:
91429 +                       if (!silent)
91430 +                               ntfs_err(
91431 +                                       sb,
91432 +                                       "Unrecognized mount option \"%s\" or missing value",
91433 +                                       p);
91434 +                       //return -EINVAL;
91435 +               }
91436 +       }
91438 +out:
91439 +       if (!strcmp(nls_name[0] ? nls_name : CONFIG_NLS_DEFAULT, "utf8")) {
91440 +               /* For UTF-8 use utf16s_to_utf8s/utf8s_to_utf16s instead of nls */
91441 +               nls = NULL;
91442 +       } else if (nls_name[0]) {
91443 +               nls = load_nls(nls_name);
91444 +               if (!nls) {
91445 +                       ntfs_err(sb, "failed to load \"%s\"", nls_name);
91446 +                       return -EINVAL;
91447 +               }
91448 +       } else {
91449 +               nls = load_nls_default();
91450 +               if (!nls) {
91451 +                       ntfs_err(sb, "failed to load default nls");
91452 +                       return -EINVAL;
91453 +               }
91454 +       }
91455 +       opts->nls = nls;
91457 +       return 0;
91460 +static int ntfs_remount(struct super_block *sb, int *flags, char *data)
91462 +       int err, ro_rw;
91463 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
91464 +       struct ntfs_mount_options old_opts;
91465 +       char *orig_data = kstrdup(data, GFP_KERNEL);
91467 +       if (data && !orig_data)
91468 +               return -ENOMEM;
91470 +       /* Store  original options */
91471 +       memcpy(&old_opts, &sbi->options, sizeof(old_opts));
91472 +       clear_mount_options(&sbi->options);
91473 +       memset(&sbi->options, 0, sizeof(sbi->options));
91475 +       err = ntfs_parse_options(sb, data, 0, &sbi->options);
91476 +       if (err)
91477 +               goto restore_opts;
91479 +       ro_rw = sb_rdonly(sb) && !(*flags & SB_RDONLY);
91480 +       if (ro_rw && (sbi->flags & NTFS_FLAGS_NEED_REPLAY)) {
91481 +               ntfs_warn(
91482 +                       sb,
91483 +                       "Couldn't remount rw because journal is not replayed. Please umount/remount instead\n");
91484 +               err = -EINVAL;
91485 +               goto restore_opts;
91486 +       }
91488 +       sync_filesystem(sb);
91490 +       if (ro_rw && (sbi->volume.flags & VOLUME_FLAG_DIRTY) &&
91491 +           !sbi->options.force) {
91492 +               ntfs_warn(sb, "volume is dirty and \"force\" flag is not set!");
91493 +               err = -EINVAL;
91494 +               goto restore_opts;
91495 +       }
91497 +       clear_mount_options(&old_opts);
91499 +       *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME) |
91500 +                SB_NODIRATIME | SB_NOATIME;
91501 +       ntfs_info(sb, "re-mounted. Opts: %s", orig_data);
91502 +       err = 0;
91503 +       goto out;
91505 +restore_opts:
91506 +       clear_mount_options(&sbi->options);
91507 +       memcpy(&sbi->options, &old_opts, sizeof(old_opts));
91509 +out:
91510 +       kfree(orig_data);
91511 +       return err;
91514 +static struct kmem_cache *ntfs_inode_cachep;
91516 +static struct inode *ntfs_alloc_inode(struct super_block *sb)
91518 +       struct ntfs_inode *ni = kmem_cache_alloc(ntfs_inode_cachep, GFP_NOFS);
91520 +       if (!ni)
91521 +               return NULL;
91523 +       memset(ni, 0, offsetof(struct ntfs_inode, vfs_inode));
91525 +       mutex_init(&ni->ni_lock);
91527 +       return &ni->vfs_inode;
91530 +static void ntfs_i_callback(struct rcu_head *head)
91532 +       struct inode *inode = container_of(head, struct inode, i_rcu);
91533 +       struct ntfs_inode *ni = ntfs_i(inode);
91535 +       mutex_destroy(&ni->ni_lock);
91537 +       kmem_cache_free(ntfs_inode_cachep, ni);
91540 +static void ntfs_destroy_inode(struct inode *inode)
91542 +       call_rcu(&inode->i_rcu, ntfs_i_callback);
91545 +static void init_once(void *foo)
91547 +       struct ntfs_inode *ni = foo;
91549 +       inode_init_once(&ni->vfs_inode);
91552 +/* noinline to reduce binary size*/
91553 +static noinline void put_ntfs(struct ntfs_sb_info *sbi)
91555 +       ntfs_free(sbi->new_rec);
91556 +       ntfs_vfree(ntfs_put_shared(sbi->upcase));
91557 +       ntfs_free(sbi->def_table);
91559 +       wnd_close(&sbi->mft.bitmap);
91560 +       wnd_close(&sbi->used.bitmap);
91562 +       if (sbi->mft.ni)
91563 +               iput(&sbi->mft.ni->vfs_inode);
91565 +       if (sbi->security.ni)
91566 +               iput(&sbi->security.ni->vfs_inode);
91568 +       if (sbi->reparse.ni)
91569 +               iput(&sbi->reparse.ni->vfs_inode);
91571 +       if (sbi->objid.ni)
91572 +               iput(&sbi->objid.ni->vfs_inode);
91574 +       if (sbi->volume.ni)
91575 +               iput(&sbi->volume.ni->vfs_inode);
91577 +       ntfs_update_mftmirr(sbi, 0);
91579 +       indx_clear(&sbi->security.index_sii);
91580 +       indx_clear(&sbi->security.index_sdh);
91581 +       indx_clear(&sbi->reparse.index_r);
91582 +       indx_clear(&sbi->objid.index_o);
91583 +       ntfs_free(sbi->compress.lznt);
91584 +#ifdef CONFIG_NTFS3_LZX_XPRESS
91585 +       xpress_free_decompressor(sbi->compress.xpress);
91586 +       lzx_free_decompressor(sbi->compress.lzx);
91587 +#endif
91588 +       clear_mount_options(&sbi->options);
91590 +       ntfs_free(sbi);
91593 +static void ntfs_put_super(struct super_block *sb)
91595 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
91597 +       /*mark rw ntfs as clear, if possible*/
91598 +       ntfs_set_state(sbi, NTFS_DIRTY_CLEAR);
91600 +       put_ntfs(sbi);
91602 +       sync_blockdev(sb->s_bdev);
91605 +static int ntfs_statfs(struct dentry *dentry, struct kstatfs *buf)
91607 +       struct super_block *sb = dentry->d_sb;
91608 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
91609 +       struct wnd_bitmap *wnd = &sbi->used.bitmap;
91611 +       buf->f_type = sb->s_magic;
91612 +       buf->f_bsize = sbi->cluster_size;
91613 +       buf->f_blocks = wnd->nbits;
91615 +       buf->f_bfree = buf->f_bavail = wnd_zeroes(wnd);
91616 +       buf->f_fsid.val[0] = sbi->volume.ser_num;
91617 +       buf->f_fsid.val[1] = (sbi->volume.ser_num >> 32);
91618 +       buf->f_namelen = NTFS_NAME_LEN;
91620 +       return 0;
91623 +static int ntfs_show_options(struct seq_file *m, struct dentry *root)
91625 +       struct super_block *sb = root->d_sb;
91626 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
91627 +       struct ntfs_mount_options *opts = &sbi->options;
91628 +       struct user_namespace *user_ns = seq_user_ns(m);
91630 +       if (opts->uid)
91631 +               seq_printf(m, ",uid=%u",
91632 +                          from_kuid_munged(user_ns, opts->fs_uid));
91633 +       if (opts->gid)
91634 +               seq_printf(m, ",gid=%u",
91635 +                          from_kgid_munged(user_ns, opts->fs_gid));
91636 +       if (opts->fmask)
91637 +               seq_printf(m, ",fmask=%04o", ~opts->fs_fmask_inv);
91638 +       if (opts->dmask)
91639 +               seq_printf(m, ",dmask=%04o", ~opts->fs_dmask_inv);
91640 +       if (opts->nls)
91641 +               seq_printf(m, ",nls=%s", opts->nls->charset);
91642 +       else
91643 +               seq_puts(m, ",nls=utf8");
91644 +       if (opts->sys_immutable)
91645 +               seq_puts(m, ",sys_immutable");
91646 +       if (opts->discard)
91647 +               seq_puts(m, ",discard");
91648 +       if (opts->sparse)
91649 +               seq_puts(m, ",sparse");
91650 +       if (opts->showmeta)
91651 +               seq_puts(m, ",showmeta");
91652 +       if (opts->nohidden)
91653 +               seq_puts(m, ",nohidden");
91654 +       if (opts->force)
91655 +               seq_puts(m, ",force");
91656 +       if (opts->no_acs_rules)
91657 +               seq_puts(m, ",no_acs_rules");
91658 +       if (opts->prealloc)
91659 +               seq_puts(m, ",prealloc");
91660 +       if (sb->s_flags & SB_POSIXACL)
91661 +               seq_puts(m, ",acl");
91662 +       if (sb->s_flags & SB_NOATIME)
91663 +               seq_puts(m, ",noatime");
91665 +       return 0;
91668 +/*super_operations::sync_fs*/
91669 +static int ntfs_sync_fs(struct super_block *sb, int wait)
91671 +       int err = 0, err2;
91672 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
91673 +       struct ntfs_inode *ni;
91674 +       struct inode *inode;
91676 +       ni = sbi->security.ni;
91677 +       if (ni) {
91678 +               inode = &ni->vfs_inode;
91679 +               err2 = _ni_write_inode(inode, wait);
91680 +               if (err2 && !err)
91681 +                       err = err2;
91682 +       }
91684 +       ni = sbi->objid.ni;
91685 +       if (ni) {
91686 +               inode = &ni->vfs_inode;
91687 +               err2 = _ni_write_inode(inode, wait);
91688 +               if (err2 && !err)
91689 +                       err = err2;
91690 +       }
91692 +       ni = sbi->reparse.ni;
91693 +       if (ni) {
91694 +               inode = &ni->vfs_inode;
91695 +               err2 = _ni_write_inode(inode, wait);
91696 +               if (err2 && !err)
91697 +                       err = err2;
91698 +       }
91700 +       if (!err)
91701 +               ntfs_set_state(sbi, NTFS_DIRTY_CLEAR);
91703 +       ntfs_update_mftmirr(sbi, wait);
91705 +       return err;
91708 +static const struct super_operations ntfs_sops = {
91709 +       .alloc_inode = ntfs_alloc_inode,
91710 +       .destroy_inode = ntfs_destroy_inode,
91711 +       .evict_inode = ntfs_evict_inode,
91712 +       .put_super = ntfs_put_super,
91713 +       .statfs = ntfs_statfs,
91714 +       .show_options = ntfs_show_options,
91715 +       .sync_fs = ntfs_sync_fs,
91716 +       .remount_fs = ntfs_remount,
91717 +       .write_inode = ntfs3_write_inode,
91720 +static struct inode *ntfs_export_get_inode(struct super_block *sb, u64 ino,
91721 +                                          u32 generation)
91723 +       struct MFT_REF ref;
91724 +       struct inode *inode;
91726 +       ref.low = cpu_to_le32(ino);
91727 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
91728 +       ref.high = cpu_to_le16(ino >> 32);
91729 +#else
91730 +       ref.high = 0;
91731 +#endif
91732 +       ref.seq = cpu_to_le16(generation);
91734 +       inode = ntfs_iget5(sb, &ref, NULL);
91735 +       if (!IS_ERR(inode) && is_bad_inode(inode)) {
91736 +               iput(inode);
91737 +               inode = ERR_PTR(-ESTALE);
91738 +       }
91740 +       return inode;
91743 +static struct dentry *ntfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
91744 +                                       int fh_len, int fh_type)
91746 +       return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
91747 +                                   ntfs_export_get_inode);
91750 +static struct dentry *ntfs_fh_to_parent(struct super_block *sb, struct fid *fid,
91751 +                                       int fh_len, int fh_type)
91753 +       return generic_fh_to_parent(sb, fid, fh_len, fh_type,
91754 +                                   ntfs_export_get_inode);
91757 +/* TODO: == ntfs_sync_inode */
91758 +static int ntfs_nfs_commit_metadata(struct inode *inode)
91760 +       return _ni_write_inode(inode, 1);
91763 +static const struct export_operations ntfs_export_ops = {
91764 +       .fh_to_dentry = ntfs_fh_to_dentry,
91765 +       .fh_to_parent = ntfs_fh_to_parent,
91766 +       .get_parent = ntfs3_get_parent,
91767 +       .commit_metadata = ntfs_nfs_commit_metadata,
91770 +/* Returns Gb,Mb to print with "%u.%02u Gb" */
91771 +static u32 format_size_gb(const u64 bytes, u32 *mb)
91773 +       /* Do simple right 30 bit shift of 64 bit value */
91774 +       u64 kbytes = bytes >> 10;
91775 +       u32 kbytes32 = kbytes;
91777 +       *mb = (100 * (kbytes32 & 0xfffff) + 0x7ffff) >> 20;
91778 +       if (*mb >= 100)
91779 +               *mb = 99;
91781 +       return (kbytes32 >> 20) | (((u32)(kbytes >> 32)) << 12);
91784 +static u32 true_sectors_per_clst(const struct NTFS_BOOT *boot)
91786 +       return boot->sectors_per_clusters <= 0x80
91787 +                      ? boot->sectors_per_clusters
91788 +                      : (1u << (0 - boot->sectors_per_clusters));
91791 +/* inits internal info from on-disk boot sector*/
91792 +static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
91793 +                              u64 dev_size)
91795 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
91796 +       int err;
91797 +       u32 mb, gb, boot_sector_size, sct_per_clst, record_size;
91798 +       u64 sectors, clusters, fs_size, mlcn, mlcn2;
91799 +       struct NTFS_BOOT *boot;
91800 +       struct buffer_head *bh;
91801 +       struct MFT_REC *rec;
91802 +       u16 fn, ao;
91804 +       sbi->volume.blocks = dev_size >> PAGE_SHIFT;
91806 +       bh = ntfs_bread(sb, 0);
91807 +       if (!bh)
91808 +               return -EIO;
91810 +       err = -EINVAL;
91811 +       boot = (struct NTFS_BOOT *)bh->b_data;
91813 +       if (memcmp(boot->system_id, "NTFS    ", sizeof("NTFS    ") - 1))
91814 +               goto out;
91816 +       /* 0x55AA is not mandaroty. Thanks Maxim Suhanov*/
91817 +       /*if (0x55 != boot->boot_magic[0] || 0xAA != boot->boot_magic[1])
91818 +        *      goto out;
91819 +        */
91821 +       boot_sector_size = (u32)boot->bytes_per_sector[1] << 8;
91822 +       if (boot->bytes_per_sector[0] || boot_sector_size < SECTOR_SIZE ||
91823 +           !is_power_of2(boot_sector_size)) {
91824 +               goto out;
91825 +       }
91827 +       /* cluster size: 512, 1K, 2K, 4K, ... 2M */
91828 +       sct_per_clst = true_sectors_per_clst(boot);
91829 +       if (!is_power_of2(sct_per_clst))
91830 +               goto out;
91832 +       mlcn = le64_to_cpu(boot->mft_clst);
91833 +       mlcn2 = le64_to_cpu(boot->mft2_clst);
91834 +       sectors = le64_to_cpu(boot->sectors_per_volume);
91836 +       if (mlcn * sct_per_clst >= sectors)
91837 +               goto out;
91839 +       if (mlcn2 * sct_per_clst >= sectors)
91840 +               goto out;
91842 +       /* Check MFT record size */
91843 +       if ((boot->record_size < 0 &&
91844 +            SECTOR_SIZE > (2U << (-boot->record_size))) ||
91845 +           (boot->record_size >= 0 && !is_power_of2(boot->record_size))) {
91846 +               goto out;
91847 +       }
91849 +       /* Check index record size */
91850 +       if ((boot->index_size < 0 &&
91851 +            SECTOR_SIZE > (2U << (-boot->index_size))) ||
91852 +           (boot->index_size >= 0 && !is_power_of2(boot->index_size))) {
91853 +               goto out;
91854 +       }
91856 +       sbi->sector_size = boot_sector_size;
91857 +       sbi->sector_bits = blksize_bits(boot_sector_size);
91858 +       fs_size = (sectors + 1) << sbi->sector_bits;
91860 +       gb = format_size_gb(fs_size, &mb);
91862 +       /*
91863 +        * - Volume formatted and mounted with the same sector size
91864 +        * - Volume formatted 4K and mounted as 512
91865 +        * - Volume formatted 512 and mounted as 4K
91866 +        */
91867 +       if (sbi->sector_size != sector_size) {
91868 +               ntfs_warn(sb,
91869 +                         "Different NTFS' sector size and media sector size");
91870 +               dev_size += sector_size - 1;
91871 +       }
91873 +       sbi->cluster_size = boot_sector_size * sct_per_clst;
91874 +       sbi->cluster_bits = blksize_bits(sbi->cluster_size);
91876 +       sbi->mft.lbo = mlcn << sbi->cluster_bits;
91877 +       sbi->mft.lbo2 = mlcn2 << sbi->cluster_bits;
91879 +       if (sbi->cluster_size < sbi->sector_size)
91880 +               goto out;
91882 +       sbi->cluster_mask = sbi->cluster_size - 1;
91883 +       sbi->cluster_mask_inv = ~(u64)sbi->cluster_mask;
91884 +       sbi->record_size = record_size = boot->record_size < 0
91885 +                                                ? 1 << (-boot->record_size)
91886 +                                                : (u32)boot->record_size
91887 +                                                          << sbi->cluster_bits;
91889 +       if (record_size > MAXIMUM_BYTES_PER_MFT)
91890 +               goto out;
91892 +       sbi->record_bits = blksize_bits(record_size);
91893 +       sbi->attr_size_tr = (5 * record_size >> 4); // ~320 bytes
91895 +       sbi->max_bytes_per_attr =
91896 +               record_size - QuadAlign(MFTRECORD_FIXUP_OFFSET_1) -
91897 +               QuadAlign(((record_size >> SECTOR_SHIFT) * sizeof(short))) -
91898 +               QuadAlign(sizeof(enum ATTR_TYPE));
91900 +       sbi->index_size = boot->index_size < 0
91901 +                                 ? 1u << (-boot->index_size)
91902 +                                 : (u32)boot->index_size << sbi->cluster_bits;
91904 +       sbi->volume.ser_num = le64_to_cpu(boot->serial_num);
91905 +       sbi->volume.size = sectors << sbi->sector_bits;
91907 +       /* warning if RAW volume */
91908 +       if (dev_size < fs_size) {
91909 +               u32 mb0, gb0;
91911 +               gb0 = format_size_gb(dev_size, &mb0);
91912 +               ntfs_warn(
91913 +                       sb,
91914 +                       "RAW NTFS volume: Filesystem size %u.%02u Gb > volume size %u.%02u Gb. Mount in read-only",
91915 +                       gb, mb, gb0, mb0);
91916 +               sb->s_flags |= SB_RDONLY;
91917 +       }
91919 +       clusters = sbi->volume.size >> sbi->cluster_bits;
91920 +#ifndef CONFIG_NTFS3_64BIT_CLUSTER
91921 +       /* 32 bits per cluster */
91922 +       if (clusters >> 32) {
91923 +               ntfs_notice(
91924 +                       sb,
91925 +                       "NTFS %u.%02u Gb is too big to use 32 bits per cluster",
91926 +                       gb, mb);
91927 +               goto out;
91928 +       }
91929 +#elif BITS_PER_LONG < 64
91930 +#error "CONFIG_NTFS3_64BIT_CLUSTER incompatible in 32 bit OS"
91931 +#endif
91933 +       sbi->used.bitmap.nbits = clusters;
91935 +       rec = ntfs_zalloc(record_size);
91936 +       if (!rec) {
91937 +               err = -ENOMEM;
91938 +               goto out;
91939 +       }
91941 +       sbi->new_rec = rec;
91942 +       rec->rhdr.sign = NTFS_FILE_SIGNATURE;
91943 +       rec->rhdr.fix_off = cpu_to_le16(MFTRECORD_FIXUP_OFFSET_1);
91944 +       fn = (sbi->record_size >> SECTOR_SHIFT) + 1;
91945 +       rec->rhdr.fix_num = cpu_to_le16(fn);
91946 +       ao = QuadAlign(MFTRECORD_FIXUP_OFFSET_1 + sizeof(short) * fn);
91947 +       rec->attr_off = cpu_to_le16(ao);
91948 +       rec->used = cpu_to_le32(ao + QuadAlign(sizeof(enum ATTR_TYPE)));
91949 +       rec->total = cpu_to_le32(sbi->record_size);
91950 +       ((struct ATTRIB *)Add2Ptr(rec, ao))->type = ATTR_END;
91952 +       if (sbi->cluster_size < PAGE_SIZE)
91953 +               sb_set_blocksize(sb, sbi->cluster_size);
91955 +       sbi->block_mask = sb->s_blocksize - 1;
91956 +       sbi->blocks_per_cluster = sbi->cluster_size >> sb->s_blocksize_bits;
91957 +       sbi->volume.blocks = sbi->volume.size >> sb->s_blocksize_bits;
91959 +       /* Maximum size for normal files */
91960 +       sbi->maxbytes = (clusters << sbi->cluster_bits) - 1;
91962 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
91963 +       if (clusters >= (1ull << (64 - sbi->cluster_bits)))
91964 +               sbi->maxbytes = -1;
91965 +       sbi->maxbytes_sparse = -1;
91966 +#else
91967 +       /* Maximum size for sparse file */
91968 +       sbi->maxbytes_sparse = (1ull << (sbi->cluster_bits + 32)) - 1;
91969 +#endif
91971 +       err = 0;
91973 +out:
91974 +       brelse(bh);
91976 +       return err;
91979 +/* try to mount*/
91980 +static int ntfs_fill_super(struct super_block *sb, void *data, int silent)
91982 +       int err;
91983 +       struct ntfs_sb_info *sbi;
91984 +       struct block_device *bdev = sb->s_bdev;
91985 +       struct inode *bd_inode = bdev->bd_inode;
91986 +       struct request_queue *rq = bdev_get_queue(bdev);
91987 +       struct inode *inode = NULL;
91988 +       struct ntfs_inode *ni;
91989 +       size_t i, tt;
91990 +       CLST vcn, lcn, len;
91991 +       struct ATTRIB *attr;
91992 +       const struct VOLUME_INFO *info;
91993 +       u32 idx, done, bytes;
91994 +       struct ATTR_DEF_ENTRY *t;
91995 +       u16 *upcase = NULL;
91996 +       u16 *shared;
91997 +       bool is_ro;
91998 +       struct MFT_REF ref;
92000 +       ref.high = 0;
92002 +       sbi = ntfs_zalloc(sizeof(struct ntfs_sb_info));
92003 +       if (!sbi)
92004 +               return -ENOMEM;
92006 +       sb->s_fs_info = sbi;
92007 +       sbi->sb = sb;
92008 +       sb->s_flags |= SB_NODIRATIME;
92009 +       sb->s_magic = 0x7366746e; // "ntfs"
92010 +       sb->s_op = &ntfs_sops;
92011 +       sb->s_export_op = &ntfs_export_ops;
92012 +       sb->s_time_gran = NTFS_TIME_GRAN; // 100 nsec
92013 +       sb->s_xattr = ntfs_xattr_handlers;
92015 +       ratelimit_state_init(&sbi->msg_ratelimit, DEFAULT_RATELIMIT_INTERVAL,
92016 +                            DEFAULT_RATELIMIT_BURST);
92018 +       err = ntfs_parse_options(sb, data, silent, &sbi->options);
92019 +       if (err)
92020 +               goto out;
92022 +       if (!rq || !blk_queue_discard(rq) || !rq->limits.discard_granularity) {
92023 +               ;
92024 +       } else {
92025 +               sbi->discard_granularity = rq->limits.discard_granularity;
92026 +               sbi->discard_granularity_mask_inv =
92027 +                       ~(u64)(sbi->discard_granularity - 1);
92028 +       }
92030 +       sb_set_blocksize(sb, PAGE_SIZE);
92032 +       /* parse boot */
92033 +       err = ntfs_init_from_boot(sb, rq ? queue_logical_block_size(rq) : 512,
92034 +                                 bd_inode->i_size);
92035 +       if (err)
92036 +               goto out;
92038 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
92039 +       sb->s_maxbytes = MAX_LFS_FILESIZE;
92040 +#else
92041 +       sb->s_maxbytes = 0xFFFFFFFFull << sbi->cluster_bits;
92042 +#endif
92044 +       mutex_init(&sbi->compress.mtx_lznt);
92045 +#ifdef CONFIG_NTFS3_LZX_XPRESS
92046 +       mutex_init(&sbi->compress.mtx_xpress);
92047 +       mutex_init(&sbi->compress.mtx_lzx);
92048 +#endif
92050 +       /*
92051 +        * Load $Volume. This should be done before LogFile
92052 +        * 'cause 'sbi->volume.ni' is used 'ntfs_set_state'
92053 +        */
92054 +       ref.low = cpu_to_le32(MFT_REC_VOL);
92055 +       ref.seq = cpu_to_le16(MFT_REC_VOL);
92056 +       inode = ntfs_iget5(sb, &ref, &NAME_VOLUME);
92057 +       if (IS_ERR(inode)) {
92058 +               err = PTR_ERR(inode);
92059 +               ntfs_err(sb, "Failed to load $Volume.");
92060 +               inode = NULL;
92061 +               goto out;
92062 +       }
92064 +       ni = ntfs_i(inode);
92066 +       /* Load and save label (not necessary) */
92067 +       attr = ni_find_attr(ni, NULL, NULL, ATTR_LABEL, NULL, 0, NULL, NULL);
92069 +       if (!attr) {
92070 +               /* It is ok if no ATTR_LABEL */
92071 +       } else if (!attr->non_res && !is_attr_ext(attr)) {
92072 +               /* $AttrDef allows labels to be up to 128 symbols */
92073 +               err = utf16s_to_utf8s(resident_data(attr),
92074 +                                     le32_to_cpu(attr->res.data_size) >> 1,
92075 +                                     UTF16_LITTLE_ENDIAN, sbi->volume.label,
92076 +                                     sizeof(sbi->volume.label));
92077 +               if (err < 0)
92078 +                       sbi->volume.label[0] = 0;
92079 +       } else {
92080 +               /* should we break mounting here? */
92081 +               //err = -EINVAL;
92082 +               //goto out;
92083 +       }
92085 +       attr = ni_find_attr(ni, attr, NULL, ATTR_VOL_INFO, NULL, 0, NULL, NULL);
92086 +       if (!attr || is_attr_ext(attr)) {
92087 +               err = -EINVAL;
92088 +               goto out;
92089 +       }
92091 +       info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
92092 +       if (!info) {
92093 +               err = -EINVAL;
92094 +               goto out;
92095 +       }
92097 +       sbi->volume.major_ver = info->major_ver;
92098 +       sbi->volume.minor_ver = info->minor_ver;
92099 +       sbi->volume.flags = info->flags;
92101 +       sbi->volume.ni = ni;
92102 +       inode = NULL;
92104 +       /* Load $MFTMirr to estimate recs_mirr */
92105 +       ref.low = cpu_to_le32(MFT_REC_MIRR);
92106 +       ref.seq = cpu_to_le16(MFT_REC_MIRR);
92107 +       inode = ntfs_iget5(sb, &ref, &NAME_MIRROR);
92108 +       if (IS_ERR(inode)) {
92109 +               err = PTR_ERR(inode);
92110 +               ntfs_err(sb, "Failed to load $MFTMirr.");
92111 +               inode = NULL;
92112 +               goto out;
92113 +       }
92115 +       sbi->mft.recs_mirr =
92116 +               ntfs_up_cluster(sbi, inode->i_size) >> sbi->record_bits;
92118 +       iput(inode);
92120 +       /* Load LogFile to replay */
92121 +       ref.low = cpu_to_le32(MFT_REC_LOG);
92122 +       ref.seq = cpu_to_le16(MFT_REC_LOG);
92123 +       inode = ntfs_iget5(sb, &ref, &NAME_LOGFILE);
92124 +       if (IS_ERR(inode)) {
92125 +               err = PTR_ERR(inode);
92126 +               ntfs_err(sb, "Failed to load \x24LogFile.");
92127 +               inode = NULL;
92128 +               goto out;
92129 +       }
92131 +       ni = ntfs_i(inode);
92133 +       err = ntfs_loadlog_and_replay(ni, sbi);
92134 +       if (err)
92135 +               goto out;
92137 +       iput(inode);
92138 +       inode = NULL;
92140 +       is_ro = sb_rdonly(sbi->sb);
92142 +       if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
92143 +               if (!is_ro) {
92144 +                       ntfs_warn(sb,
92145 +                                 "failed to replay log file. Can't mount rw!");
92146 +                       err = -EINVAL;
92147 +                       goto out;
92148 +               }
92149 +       } else if (sbi->volume.flags & VOLUME_FLAG_DIRTY) {
92150 +               if (!is_ro && !sbi->options.force) {
92151 +                       ntfs_warn(
92152 +                               sb,
92153 +                               "volume is dirty and \"force\" flag is not set!");
92154 +                       err = -EINVAL;
92155 +                       goto out;
92156 +               }
92157 +       }
92159 +       /* Load $MFT */
92160 +       ref.low = cpu_to_le32(MFT_REC_MFT);
92161 +       ref.seq = cpu_to_le16(1);
92163 +       inode = ntfs_iget5(sb, &ref, &NAME_MFT);
92164 +       if (IS_ERR(inode)) {
92165 +               err = PTR_ERR(inode);
92166 +               ntfs_err(sb, "Failed to load $MFT.");
92167 +               inode = NULL;
92168 +               goto out;
92169 +       }
92171 +       ni = ntfs_i(inode);
92173 +       sbi->mft.used = ni->i_valid >> sbi->record_bits;
92174 +       tt = inode->i_size >> sbi->record_bits;
92175 +       sbi->mft.next_free = MFT_REC_USER;
92177 +       err = wnd_init(&sbi->mft.bitmap, sb, tt);
92178 +       if (err)
92179 +               goto out;
92181 +       err = ni_load_all_mi(ni);
92182 +       if (err)
92183 +               goto out;
92185 +       sbi->mft.ni = ni;
92187 +       /* Load $BadClus */
92188 +       ref.low = cpu_to_le32(MFT_REC_BADCLUST);
92189 +       ref.seq = cpu_to_le16(MFT_REC_BADCLUST);
92190 +       inode = ntfs_iget5(sb, &ref, &NAME_BADCLUS);
92191 +       if (IS_ERR(inode)) {
92192 +               err = PTR_ERR(inode);
92193 +               ntfs_err(sb, "Failed to load $BadClus.");
92194 +               inode = NULL;
92195 +               goto out;
92196 +       }
92198 +       ni = ntfs_i(inode);
92200 +       for (i = 0; run_get_entry(&ni->file.run, i, &vcn, &lcn, &len); i++) {
92201 +               if (lcn == SPARSE_LCN)
92202 +                       continue;
92204 +               if (!sbi->bad_clusters)
92205 +                       ntfs_notice(sb, "Volume contains bad blocks");
92207 +               sbi->bad_clusters += len;
92208 +       }
92210 +       iput(inode);
92212 +       /* Load $Bitmap */
92213 +       ref.low = cpu_to_le32(MFT_REC_BITMAP);
92214 +       ref.seq = cpu_to_le16(MFT_REC_BITMAP);
92215 +       inode = ntfs_iget5(sb, &ref, &NAME_BITMAP);
92216 +       if (IS_ERR(inode)) {
92217 +               err = PTR_ERR(inode);
92218 +               ntfs_err(sb, "Failed to load $Bitmap.");
92219 +               inode = NULL;
92220 +               goto out;
92221 +       }
92223 +       ni = ntfs_i(inode);
92225 +#ifndef CONFIG_NTFS3_64BIT_CLUSTER
92226 +       if (inode->i_size >> 32) {
92227 +               err = -EINVAL;
92228 +               goto out;
92229 +       }
92230 +#endif
92232 +       /* Check bitmap boundary */
92233 +       tt = sbi->used.bitmap.nbits;
92234 +       if (inode->i_size < bitmap_size(tt)) {
92235 +               err = -EINVAL;
92236 +               goto out;
92237 +       }
92239 +       /* Not necessary */
92240 +       sbi->used.bitmap.set_tail = true;
92241 +       err = wnd_init(&sbi->used.bitmap, sbi->sb, tt);
92242 +       if (err)
92243 +               goto out;
92245 +       iput(inode);
92247 +       /* Compute the mft zone */
92248 +       err = ntfs_refresh_zone(sbi);
92249 +       if (err)
92250 +               goto out;
92252 +       /* Load $AttrDef */
92253 +       ref.low = cpu_to_le32(MFT_REC_ATTR);
92254 +       ref.seq = cpu_to_le16(MFT_REC_ATTR);
92255 +       inode = ntfs_iget5(sbi->sb, &ref, &NAME_ATTRDEF);
92256 +       if (IS_ERR(inode)) {
92257 +               err = PTR_ERR(inode);
92258 +               ntfs_err(sb, "Failed to load $AttrDef -> %d", err);
92259 +               inode = NULL;
92260 +               goto out;
92261 +       }
92263 +       if (inode->i_size < sizeof(struct ATTR_DEF_ENTRY)) {
92264 +               err = -EINVAL;
92265 +               goto out;
92266 +       }
92267 +       bytes = inode->i_size;
92268 +       sbi->def_table = t = ntfs_malloc(bytes);
92269 +       if (!t) {
92270 +               err = -ENOMEM;
92271 +               goto out;
92272 +       }
92274 +       for (done = idx = 0; done < bytes; done += PAGE_SIZE, idx++) {
92275 +               unsigned long tail = bytes - done;
92276 +               struct page *page = ntfs_map_page(inode->i_mapping, idx);
92278 +               if (IS_ERR(page)) {
92279 +                       err = PTR_ERR(page);
92280 +                       goto out;
92281 +               }
92282 +               memcpy(Add2Ptr(t, done), page_address(page),
92283 +                      min(PAGE_SIZE, tail));
92284 +               ntfs_unmap_page(page);
92286 +               if (!idx && ATTR_STD != t->type) {
92287 +                       err = -EINVAL;
92288 +                       goto out;
92289 +               }
92290 +       }
92292 +       t += 1;
92293 +       sbi->def_entries = 1;
92294 +       done = sizeof(struct ATTR_DEF_ENTRY);
92295 +       sbi->reparse.max_size = MAXIMUM_REPARSE_DATA_BUFFER_SIZE;
92296 +       sbi->ea_max_size = 0x10000; /* default formater value */
92298 +       while (done + sizeof(struct ATTR_DEF_ENTRY) <= bytes) {
92299 +               u32 t32 = le32_to_cpu(t->type);
92300 +               u64 sz = le64_to_cpu(t->max_sz);
92302 +               if ((t32 & 0xF) || le32_to_cpu(t[-1].type) >= t32)
92303 +                       break;
92305 +               if (t->type == ATTR_REPARSE)
92306 +                       sbi->reparse.max_size = sz;
92307 +               else if (t->type == ATTR_EA)
92308 +                       sbi->ea_max_size = sz;
92310 +               done += sizeof(struct ATTR_DEF_ENTRY);
92311 +               t += 1;
92312 +               sbi->def_entries += 1;
92313 +       }
92314 +       iput(inode);
92316 +       /* Load $UpCase */
92317 +       ref.low = cpu_to_le32(MFT_REC_UPCASE);
92318 +       ref.seq = cpu_to_le16(MFT_REC_UPCASE);
92319 +       inode = ntfs_iget5(sb, &ref, &NAME_UPCASE);
92320 +       if (IS_ERR(inode)) {
92321 +               err = PTR_ERR(inode);
92322 +               ntfs_err(sb, "Failed to load \x24LogFile.");
92323 +               inode = NULL;
92324 +               goto out;
92325 +       }
92327 +       ni = ntfs_i(inode);
92329 +       if (inode->i_size != 0x10000 * sizeof(short)) {
92330 +               err = -EINVAL;
92331 +               goto out;
92332 +       }
92334 +       sbi->upcase = upcase = ntfs_vmalloc(0x10000 * sizeof(short));
92335 +       if (!upcase) {
92336 +               err = -ENOMEM;
92337 +               goto out;
92338 +       }
92340 +       for (idx = 0; idx < (0x10000 * sizeof(short) >> PAGE_SHIFT); idx++) {
92341 +               const __le16 *src;
92342 +               u16 *dst = Add2Ptr(upcase, idx << PAGE_SHIFT);
92343 +               struct page *page = ntfs_map_page(inode->i_mapping, idx);
92345 +               if (IS_ERR(page)) {
92346 +                       err = PTR_ERR(page);
92347 +                       goto out;
92348 +               }
92350 +               src = page_address(page);
92352 +#ifdef __BIG_ENDIAN
92353 +               for (i = 0; i < PAGE_SIZE / sizeof(u16); i++)
92354 +                       *dst++ = le16_to_cpu(*src++);
92355 +#else
92356 +               memcpy(dst, src, PAGE_SIZE);
92357 +#endif
92358 +               ntfs_unmap_page(page);
92359 +       }
92361 +       shared = ntfs_set_shared(upcase, 0x10000 * sizeof(short));
92362 +       if (shared && upcase != shared) {
92363 +               sbi->upcase = shared;
92364 +               ntfs_vfree(upcase);
92365 +       }
92367 +       iput(inode);
92368 +       inode = NULL;
92370 +       if (is_ntfs3(sbi)) {
92371 +               /* Load $Secure */
92372 +               err = ntfs_security_init(sbi);
92373 +               if (err)
92374 +                       goto out;
92376 +               /* Load $Extend */
92377 +               err = ntfs_extend_init(sbi);
92378 +               if (err)
92379 +                       goto load_root;
92381 +               /* Load $Extend\$Reparse */
92382 +               err = ntfs_reparse_init(sbi);
92383 +               if (err)
92384 +                       goto load_root;
92386 +               /* Load $Extend\$ObjId */
92387 +               err = ntfs_objid_init(sbi);
92388 +               if (err)
92389 +                       goto load_root;
92390 +       }
92392 +load_root:
92393 +       /* Load root */
92394 +       ref.low = cpu_to_le32(MFT_REC_ROOT);
92395 +       ref.seq = cpu_to_le16(MFT_REC_ROOT);
92396 +       inode = ntfs_iget5(sb, &ref, &NAME_ROOT);
92397 +       if (IS_ERR(inode)) {
92398 +               err = PTR_ERR(inode);
92399 +               ntfs_err(sb, "Failed to load root.");
92400 +               inode = NULL;
92401 +               goto out;
92402 +       }
92404 +       ni = ntfs_i(inode);
92406 +       sb->s_root = d_make_root(inode);
92408 +       if (!sb->s_root) {
92409 +               err = -EINVAL;
92410 +               goto out;
92411 +       }
92413 +       return 0;
92415 +out:
92416 +       iput(inode);
92418 +       if (sb->s_root) {
92419 +               d_drop(sb->s_root);
92420 +               sb->s_root = NULL;
92421 +       }
92423 +       put_ntfs(sbi);
92425 +       sb->s_fs_info = NULL;
92426 +       return err;
92429 +void ntfs_unmap_meta(struct super_block *sb, CLST lcn, CLST len)
92431 +       struct ntfs_sb_info *sbi = sb->s_fs_info;
92432 +       struct block_device *bdev = sb->s_bdev;
92433 +       sector_t devblock = (u64)lcn * sbi->blocks_per_cluster;
92434 +       unsigned long blocks = (u64)len * sbi->blocks_per_cluster;
92435 +       unsigned long cnt = 0;
92436 +       unsigned long limit = global_zone_page_state(NR_FREE_PAGES)
92437 +                             << (PAGE_SHIFT - sb->s_blocksize_bits);
92439 +       if (limit >= 0x2000)
92440 +               limit -= 0x1000;
92441 +       else if (limit < 32)
92442 +               limit = 32;
92443 +       else
92444 +               limit >>= 1;
92446 +       while (blocks--) {
92447 +               clean_bdev_aliases(bdev, devblock++, 1);
92448 +               if (cnt++ >= limit) {
92449 +                       sync_blockdev(bdev);
92450 +                       cnt = 0;
92451 +               }
92452 +       }
92456 + * ntfs_discard
92457 + *
92458 + * issue a discard request (trim for SSD)
92459 + */
92460 +int ntfs_discard(struct ntfs_sb_info *sbi, CLST lcn, CLST len)
92462 +       int err;
92463 +       u64 lbo, bytes, start, end;
92464 +       struct super_block *sb;
92466 +       if (sbi->used.next_free_lcn == lcn + len)
92467 +               sbi->used.next_free_lcn = lcn;
92469 +       if (sbi->flags & NTFS_FLAGS_NODISCARD)
92470 +               return -EOPNOTSUPP;
92472 +       if (!sbi->options.discard)
92473 +               return -EOPNOTSUPP;
92475 +       lbo = (u64)lcn << sbi->cluster_bits;
92476 +       bytes = (u64)len << sbi->cluster_bits;
92478 +       /* Align up 'start' on discard_granularity */
92479 +       start = (lbo + sbi->discard_granularity - 1) &
92480 +               sbi->discard_granularity_mask_inv;
92481 +       /* Align down 'end' on discard_granularity */
92482 +       end = (lbo + bytes) & sbi->discard_granularity_mask_inv;
92484 +       sb = sbi->sb;
92485 +       if (start >= end)
92486 +               return 0;
92488 +       err = blkdev_issue_discard(sb->s_bdev, start >> 9, (end - start) >> 9,
92489 +                                  GFP_NOFS, 0);
92491 +       if (err == -EOPNOTSUPP)
92492 +               sbi->flags |= NTFS_FLAGS_NODISCARD;
92494 +       return err;
92497 +static struct dentry *ntfs_mount(struct file_system_type *fs_type, int flags,
92498 +                                const char *dev_name, void *data)
92500 +       return mount_bdev(fs_type, flags, dev_name, data, ntfs_fill_super);
92503 +static struct file_system_type ntfs_fs_type = {
92504 +       .owner = THIS_MODULE,
92505 +       .name = "ntfs3",
92506 +       .mount = ntfs_mount,
92507 +       .kill_sb = kill_block_super,
92508 +       .fs_flags = FS_REQUIRES_DEV,
92511 +static int __init init_ntfs_fs(void)
92513 +       int err;
92515 +       pr_notice("ntfs3: Index binary search\n");
92516 +       pr_notice("ntfs3: Hot fix free clusters\n");
92517 +       pr_notice("ntfs3: Max link count %u\n", NTFS_LINK_MAX);
92519 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
92520 +       pr_notice("ntfs3: Enabled Linux POSIX ACLs support\n");
92521 +#endif
92522 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
92523 +       pr_notice("ntfs3: Activated 64 bits per cluster\n");
92524 +#else
92525 +       pr_notice("ntfs3: Activated 32 bits per cluster\n");
92526 +#endif
92527 +#ifdef CONFIG_NTFS3_LZX_XPRESS
92528 +       pr_notice("ntfs3: Read-only lzx/xpress compression included\n");
92529 +#endif
92531 +       err = ntfs3_init_bitmap();
92532 +       if (err)
92533 +               return err;
92535 +       ntfs_inode_cachep = kmem_cache_create(
92536 +               "ntfs_inode_cache", sizeof(struct ntfs_inode), 0,
92537 +               (SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT),
92538 +               init_once);
92539 +       if (!ntfs_inode_cachep) {
92540 +               err = -ENOMEM;
92541 +               goto out1;
92542 +       }
92544 +       err = register_filesystem(&ntfs_fs_type);
92545 +       if (err)
92546 +               goto out;
92548 +       return 0;
92549 +out:
92550 +       kmem_cache_destroy(ntfs_inode_cachep);
92551 +out1:
92552 +       ntfs3_exit_bitmap();
92553 +       return err;
92556 +static void __exit exit_ntfs_fs(void)
92558 +       if (ntfs_inode_cachep) {
92559 +               rcu_barrier();
92560 +               kmem_cache_destroy(ntfs_inode_cachep);
92561 +       }
92563 +       unregister_filesystem(&ntfs_fs_type);
92564 +       ntfs3_exit_bitmap();
92567 +MODULE_LICENSE("GPL");
92568 +MODULE_DESCRIPTION("ntfs3 read/write filesystem");
92569 +MODULE_INFO(behaviour, "Index binary search");
92570 +MODULE_INFO(behaviour, "Hot fix free clusters");
92571 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
92572 +MODULE_INFO(behaviour, "Enabled Linux POSIX ACLs support");
92573 +#endif
92574 +#ifdef CONFIG_NTFS3_64BIT_CLUSTER
92575 +MODULE_INFO(cluster, "Activated 64 bits per cluster");
92576 +#else
92577 +MODULE_INFO(cluster, "Activated 32 bits per cluster");
92578 +#endif
92579 +#ifdef CONFIG_NTFS3_LZX_XPRESS
92580 +MODULE_INFO(compression, "Read-only lzx/xpress compression included");
92581 +#endif
92583 +MODULE_AUTHOR("Konstantin Komarov");
92584 +MODULE_ALIAS_FS("ntfs3");
92586 +module_init(init_ntfs_fs);
92587 +module_exit(exit_ntfs_fs);
92588 diff --git a/fs/ntfs3/upcase.c b/fs/ntfs3/upcase.c
92589 new file mode 100644
92590 index 000000000000..9617382aca64
92591 --- /dev/null
92592 +++ b/fs/ntfs3/upcase.c
92593 @@ -0,0 +1,105 @@
92594 +// SPDX-License-Identifier: GPL-2.0
92596 + *
92597 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
92598 + *
92599 + */
92600 +#include <linux/blkdev.h>
92601 +#include <linux/buffer_head.h>
92602 +#include <linux/module.h>
92603 +#include <linux/nls.h>
92605 +#include "debug.h"
92606 +#include "ntfs.h"
92607 +#include "ntfs_fs.h"
92609 +static inline u16 upcase_unicode_char(const u16 *upcase, u16 chr)
92611 +       if (chr < 'a')
92612 +               return chr;
92614 +       if (chr <= 'z')
92615 +               return chr - ('a' - 'A');
92617 +       return upcase[chr];
92621 + * Thanks Kari Argillander <kari.argillander@gmail.com> for idea and implementation 'bothcase'
92622 + *
92623 + * Straigth way to compare names:
92624 + * - case insensitive
92625 + * - if name equals and 'bothcases' then
92626 + * - case sensitive
92627 + * 'Straigth way' code scans input names twice in worst case
92628 + * Optimized code scans input names only once
92629 + */
92630 +int ntfs_cmp_names(const __le16 *s1, size_t l1, const __le16 *s2, size_t l2,
92631 +                  const u16 *upcase, bool bothcase)
92633 +       int diff1 = 0;
92634 +       int diff2;
92635 +       size_t len = min(l1, l2);
92637 +       if (!bothcase && upcase)
92638 +               goto case_insentive;
92640 +       for (; len; s1++, s2++, len--) {
92641 +               diff1 = le16_to_cpu(*s1) - le16_to_cpu(*s2);
92642 +               if (diff1) {
92643 +                       if (bothcase && upcase)
92644 +                               goto case_insentive;
92646 +                       return diff1;
92647 +               }
92648 +       }
92649 +       return l1 - l2;
92651 +case_insentive:
92652 +       for (; len; s1++, s2++, len--) {
92653 +               diff2 = upcase_unicode_char(upcase, le16_to_cpu(*s1)) -
92654 +                       upcase_unicode_char(upcase, le16_to_cpu(*s2));
92655 +               if (diff2)
92656 +                       return diff2;
92657 +       }
92659 +       diff2 = l1 - l2;
92660 +       return diff2 ? diff2 : diff1;
92663 +int ntfs_cmp_names_cpu(const struct cpu_str *uni1, const struct le_str *uni2,
92664 +                      const u16 *upcase, bool bothcase)
92666 +       const u16 *s1 = uni1->name;
92667 +       const __le16 *s2 = uni2->name;
92668 +       size_t l1 = uni1->len;
92669 +       size_t l2 = uni2->len;
92670 +       size_t len = min(l1, l2);
92671 +       int diff1 = 0;
92672 +       int diff2;
92674 +       if (!bothcase && upcase)
92675 +               goto case_insentive;
92677 +       for (; len; s1++, s2++, len--) {
92678 +               diff1 = *s1 - le16_to_cpu(*s2);
92679 +               if (diff1) {
92680 +                       if (bothcase && upcase)
92681 +                               goto case_insentive;
92683 +                       return diff1;
92684 +               }
92685 +       }
92686 +       return l1 - l2;
92688 +case_insentive:
92689 +       for (; len; s1++, s2++, len--) {
92690 +               diff2 = upcase_unicode_char(upcase, *s1) -
92691 +                       upcase_unicode_char(upcase, le16_to_cpu(*s2));
92692 +               if (diff2)
92693 +                       return diff2;
92694 +       }
92696 +       diff2 = l1 - l2;
92697 +       return diff2 ? diff2 : diff1;
92699 diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
92700 new file mode 100644
92701 index 000000000000..759df507c92c
92702 --- /dev/null
92703 +++ b/fs/ntfs3/xattr.c
92704 @@ -0,0 +1,1046 @@
92705 +// SPDX-License-Identifier: GPL-2.0
92707 + *
92708 + * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
92709 + *
92710 + */
92712 +#include <linux/blkdev.h>
92713 +#include <linux/buffer_head.h>
92714 +#include <linux/fs.h>
92715 +#include <linux/nls.h>
92716 +#include <linux/posix_acl.h>
92717 +#include <linux/posix_acl_xattr.h>
92718 +#include <linux/xattr.h>
92720 +#include "debug.h"
92721 +#include "ntfs.h"
92722 +#include "ntfs_fs.h"
92724 +// clang-format off
92725 +#define SYSTEM_DOS_ATTRIB    "system.dos_attrib"
92726 +#define SYSTEM_NTFS_ATTRIB   "system.ntfs_attrib"
92727 +#define SYSTEM_NTFS_SECURITY "system.ntfs_security"
92728 +// clang-format on
92730 +static inline size_t unpacked_ea_size(const struct EA_FULL *ea)
92732 +       return ea->size ? le32_to_cpu(ea->size)
92733 +                       : DwordAlign(struct_size(
92734 +                                 ea, name,
92735 +                                 1 + ea->name_len + le16_to_cpu(ea->elength)));
92738 +static inline size_t packed_ea_size(const struct EA_FULL *ea)
92740 +       return struct_size(ea, name,
92741 +                          1 + ea->name_len + le16_to_cpu(ea->elength)) -
92742 +              offsetof(struct EA_FULL, flags);
92746 + * find_ea
92747 + *
92748 + * assume there is at least one xattr in the list
92749 + */
92750 +static inline bool find_ea(const struct EA_FULL *ea_all, u32 bytes,
92751 +                          const char *name, u8 name_len, u32 *off)
92753 +       *off = 0;
92755 +       if (!ea_all || !bytes)
92756 +               return false;
92758 +       for (;;) {
92759 +               const struct EA_FULL *ea = Add2Ptr(ea_all, *off);
92760 +               u32 next_off = *off + unpacked_ea_size(ea);
92762 +               if (next_off > bytes)
92763 +                       return false;
92765 +               if (ea->name_len == name_len &&
92766 +                   !memcmp(ea->name, name, name_len))
92767 +                       return true;
92769 +               *off = next_off;
92770 +               if (next_off >= bytes)
92771 +                       return false;
92772 +       }
92776 + * ntfs_read_ea
92777 + *
92778 + * reads all extended attributes
92779 + * ea - new allocated memory
92780 + * info - pointer into resident data
92781 + */
92782 +static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
92783 +                       size_t add_bytes, const struct EA_INFO **info)
92785 +       int err;
92786 +       struct ATTR_LIST_ENTRY *le = NULL;
92787 +       struct ATTRIB *attr_info, *attr_ea;
92788 +       void *ea_p;
92789 +       u32 size;
92791 +       static_assert(le32_to_cpu(ATTR_EA_INFO) < le32_to_cpu(ATTR_EA));
92793 +       *ea = NULL;
92794 +       *info = NULL;
92796 +       attr_info =
92797 +               ni_find_attr(ni, NULL, &le, ATTR_EA_INFO, NULL, 0, NULL, NULL);
92798 +       attr_ea =
92799 +               ni_find_attr(ni, attr_info, &le, ATTR_EA, NULL, 0, NULL, NULL);
92801 +       if (!attr_ea || !attr_info)
92802 +               return 0;
92804 +       *info = resident_data_ex(attr_info, sizeof(struct EA_INFO));
92805 +       if (!*info)
92806 +               return -EINVAL;
92808 +       /* Check Ea limit */
92809 +       size = le32_to_cpu((*info)->size);
92810 +       if (size > ni->mi.sbi->ea_max_size)
92811 +               return -EFBIG;
92813 +       if (attr_size(attr_ea) > ni->mi.sbi->ea_max_size)
92814 +               return -EFBIG;
92816 +       /* Allocate memory for packed Ea */
92817 +       ea_p = ntfs_malloc(size + add_bytes);
92818 +       if (!ea_p)
92819 +               return -ENOMEM;
92821 +       if (attr_ea->non_res) {
92822 +               struct runs_tree run;
92824 +               run_init(&run);
92826 +               err = attr_load_runs(attr_ea, ni, &run, NULL);
92827 +               if (!err)
92828 +                       err = ntfs_read_run_nb(ni->mi.sbi, &run, 0, ea_p, size,
92829 +                                              NULL);
92830 +               run_close(&run);
92832 +               if (err)
92833 +                       goto out;
92834 +       } else {
92835 +               void *p = resident_data_ex(attr_ea, size);
92837 +               if (!p) {
92838 +                       err = -EINVAL;
92839 +                       goto out;
92840 +               }
92841 +               memcpy(ea_p, p, size);
92842 +       }
92844 +       memset(Add2Ptr(ea_p, size), 0, add_bytes);
92845 +       *ea = ea_p;
92846 +       return 0;
92848 +out:
92849 +       ntfs_free(ea_p);
92850 +       *ea = NULL;
92851 +       return err;
92855 + * ntfs_list_ea
92856 + *
92857 + * copy a list of xattrs names into the buffer
92858 + * provided, or compute the buffer size required
92859 + *
92860 + * Returns a negative error number on failure, or the number of bytes
92861 + * used / required on success.
92862 + */
92863 +static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
92864 +                           size_t bytes_per_buffer)
92866 +       const struct EA_INFO *info;
92867 +       struct EA_FULL *ea_all = NULL;
92868 +       const struct EA_FULL *ea;
92869 +       u32 off, size;
92870 +       int err;
92871 +       size_t ret;
92873 +       err = ntfs_read_ea(ni, &ea_all, 0, &info);
92874 +       if (err)
92875 +               return err;
92877 +       if (!info || !ea_all)
92878 +               return 0;
92880 +       size = le32_to_cpu(info->size);
92882 +       /* Enumerate all xattrs */
92883 +       for (ret = 0, off = 0; off < size; off += unpacked_ea_size(ea)) {
92884 +               ea = Add2Ptr(ea_all, off);
92886 +               if (buffer) {
92887 +                       if (ret + ea->name_len + 1 > bytes_per_buffer) {
92888 +                               err = -ERANGE;
92889 +                               goto out;
92890 +                       }
92892 +                       memcpy(buffer + ret, ea->name, ea->name_len);
92893 +                       buffer[ret + ea->name_len] = 0;
92894 +               }
92896 +               ret += ea->name_len + 1;
92897 +       }
92899 +out:
92900 +       ntfs_free(ea_all);
92901 +       return err ? err : ret;
92904 +static int ntfs_get_ea(struct inode *inode, const char *name, size_t name_len,
92905 +                      void *buffer, size_t size, size_t *required)
92907 +       struct ntfs_inode *ni = ntfs_i(inode);
92908 +       const struct EA_INFO *info;
92909 +       struct EA_FULL *ea_all = NULL;
92910 +       const struct EA_FULL *ea;
92911 +       u32 off, len;
92912 +       int err;
92914 +       if (!(ni->ni_flags & NI_FLAG_EA))
92915 +               return -ENODATA;
92917 +       if (!required)
92918 +               ni_lock(ni);
92920 +       len = 0;
92922 +       if (name_len > 255) {
92923 +               err = -ENAMETOOLONG;
92924 +               goto out;
92925 +       }
92927 +       err = ntfs_read_ea(ni, &ea_all, 0, &info);
92928 +       if (err)
92929 +               goto out;
92931 +       if (!info)
92932 +               goto out;
92934 +       /* Enumerate all xattrs */
92935 +       if (!find_ea(ea_all, le32_to_cpu(info->size), name, name_len, &off)) {
92936 +               err = -ENODATA;
92937 +               goto out;
92938 +       }
92939 +       ea = Add2Ptr(ea_all, off);
92941 +       len = le16_to_cpu(ea->elength);
92942 +       if (!buffer) {
92943 +               err = 0;
92944 +               goto out;
92945 +       }
92947 +       if (len > size) {
92948 +               err = -ERANGE;
92949 +               if (required)
92950 +                       *required = len;
92951 +               goto out;
92952 +       }
92954 +       memcpy(buffer, ea->name + ea->name_len + 1, len);
92955 +       err = 0;
92957 +out:
92958 +       ntfs_free(ea_all);
92959 +       if (!required)
92960 +               ni_unlock(ni);
92962 +       return err ? err : len;
92965 +static noinline int ntfs_set_ea(struct inode *inode, const char *name,
92966 +                               size_t name_len, const void *value,
92967 +                               size_t val_size, int flags, int locked)
92969 +       struct ntfs_inode *ni = ntfs_i(inode);
92970 +       struct ntfs_sb_info *sbi = ni->mi.sbi;
92971 +       int err;
92972 +       struct EA_INFO ea_info;
92973 +       const struct EA_INFO *info;
92974 +       struct EA_FULL *new_ea;
92975 +       struct EA_FULL *ea_all = NULL;
92976 +       size_t add, new_pack;
92977 +       u32 off, size;
92978 +       __le16 size_pack;
92979 +       struct ATTRIB *attr;
92980 +       struct ATTR_LIST_ENTRY *le;
92981 +       struct mft_inode *mi;
92982 +       struct runs_tree ea_run;
92983 +       u64 new_sz;
92984 +       void *p;
92986 +       if (!locked)
92987 +               ni_lock(ni);
92989 +       run_init(&ea_run);
92991 +       if (name_len > 255) {
92992 +               err = -ENAMETOOLONG;
92993 +               goto out;
92994 +       }
92996 +       add = DwordAlign(struct_size(ea_all, name, 1 + name_len + val_size));
92998 +       err = ntfs_read_ea(ni, &ea_all, add, &info);
92999 +       if (err)
93000 +               goto out;
93002 +       if (!info) {
93003 +               memset(&ea_info, 0, sizeof(ea_info));
93004 +               size = 0;
93005 +               size_pack = 0;
93006 +       } else {
93007 +               memcpy(&ea_info, info, sizeof(ea_info));
93008 +               size = le32_to_cpu(ea_info.size);
93009 +               size_pack = ea_info.size_pack;
93010 +       }
93012 +       if (info && find_ea(ea_all, size, name, name_len, &off)) {
93013 +               struct EA_FULL *ea;
93014 +               size_t ea_sz;
93016 +               if (flags & XATTR_CREATE) {
93017 +                       err = -EEXIST;
93018 +                       goto out;
93019 +               }
93021 +               /* Remove current xattr */
93022 +               ea = Add2Ptr(ea_all, off);
93023 +               if (ea->flags & FILE_NEED_EA)
93024 +                       le16_add_cpu(&ea_info.count, -1);
93026 +               ea_sz = unpacked_ea_size(ea);
93028 +               le16_add_cpu(&ea_info.size_pack, 0 - packed_ea_size(ea));
93030 +               memmove(ea, Add2Ptr(ea, ea_sz), size - off - ea_sz);
93032 +               size -= ea_sz;
93033 +               memset(Add2Ptr(ea_all, size), 0, ea_sz);
93035 +               ea_info.size = cpu_to_le32(size);
93037 +               if ((flags & XATTR_REPLACE) && !val_size)
93038 +                       goto update_ea;
93039 +       } else {
93040 +               if (flags & XATTR_REPLACE) {
93041 +                       err = -ENODATA;
93042 +                       goto out;
93043 +               }
93045 +               if (!ea_all) {
93046 +                       ea_all = ntfs_zalloc(add);
93047 +                       if (!ea_all) {
93048 +                               err = -ENOMEM;
93049 +                               goto out;
93050 +                       }
93051 +               }
93052 +       }
93054 +       /* append new xattr */
93055 +       new_ea = Add2Ptr(ea_all, size);
93056 +       new_ea->size = cpu_to_le32(add);
93057 +       new_ea->flags = 0;
93058 +       new_ea->name_len = name_len;
93059 +       new_ea->elength = cpu_to_le16(val_size);
93060 +       memcpy(new_ea->name, name, name_len);
93061 +       new_ea->name[name_len] = 0;
93062 +       memcpy(new_ea->name + name_len + 1, value, val_size);
93063 +       new_pack = le16_to_cpu(ea_info.size_pack) + packed_ea_size(new_ea);
93065 +       /* should fit into 16 bits */
93066 +       if (new_pack > 0xffff) {
93067 +               err = -EFBIG; // -EINVAL?
93068 +               goto out;
93069 +       }
93070 +       ea_info.size_pack = cpu_to_le16(new_pack);
93072 +       /* new size of ATTR_EA */
93073 +       size += add;
93074 +       if (size > sbi->ea_max_size) {
93075 +               err = -EFBIG; // -EINVAL?
93076 +               goto out;
93077 +       }
93078 +       ea_info.size = cpu_to_le32(size);
93080 +update_ea:
93082 +       if (!info) {
93083 +               /* Create xattr */
93084 +               if (!size) {
93085 +                       err = 0;
93086 +                       goto out;
93087 +               }
93089 +               err = ni_insert_resident(ni, sizeof(struct EA_INFO),
93090 +                                        ATTR_EA_INFO, NULL, 0, NULL, NULL);
93091 +               if (err)
93092 +                       goto out;
93094 +               err = ni_insert_resident(ni, 0, ATTR_EA, NULL, 0, NULL, NULL);
93095 +               if (err)
93096 +                       goto out;
93097 +       }
93099 +       new_sz = size;
93100 +       err = attr_set_size(ni, ATTR_EA, NULL, 0, &ea_run, new_sz, &new_sz,
93101 +                           false, NULL);
93102 +       if (err)
93103 +               goto out;
93105 +       le = NULL;
93106 +       attr = ni_find_attr(ni, NULL, &le, ATTR_EA_INFO, NULL, 0, NULL, &mi);
93107 +       if (!attr) {
93108 +               err = -EINVAL;
93109 +               goto out;
93110 +       }
93112 +       if (!size) {
93113 +               /* delete xattr, ATTR_EA_INFO */
93114 +               err = ni_remove_attr_le(ni, attr, le);
93115 +               if (err)
93116 +                       goto out;
93117 +       } else {
93118 +               p = resident_data_ex(attr, sizeof(struct EA_INFO));
93119 +               if (!p) {
93120 +                       err = -EINVAL;
93121 +                       goto out;
93122 +               }
93123 +               memcpy(p, &ea_info, sizeof(struct EA_INFO));
93124 +               mi->dirty = true;
93125 +       }
93127 +       le = NULL;
93128 +       attr = ni_find_attr(ni, NULL, &le, ATTR_EA, NULL, 0, NULL, &mi);
93129 +       if (!attr) {
93130 +               err = -EINVAL;
93131 +               goto out;
93132 +       }
93134 +       if (!size) {
93135 +               /* delete xattr, ATTR_EA */
93136 +               err = ni_remove_attr_le(ni, attr, le);
93137 +               if (err)
93138 +                       goto out;
93139 +       } else if (attr->non_res) {
93140 +               err = ntfs_sb_write_run(sbi, &ea_run, 0, ea_all, size);
93141 +               if (err)
93142 +                       goto out;
93143 +       } else {
93144 +               p = resident_data_ex(attr, size);
93145 +               if (!p) {
93146 +                       err = -EINVAL;
93147 +                       goto out;
93148 +               }
93149 +               memcpy(p, ea_all, size);
93150 +               mi->dirty = true;
93151 +       }
93153 +       /* Check if we delete the last xattr */
93154 +       if (size)
93155 +               ni->ni_flags |= NI_FLAG_EA;
93156 +       else
93157 +               ni->ni_flags &= ~NI_FLAG_EA;
93159 +       if (ea_info.size_pack != size_pack)
93160 +               ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
93161 +       mark_inode_dirty(&ni->vfs_inode);
93163 +out:
93164 +       if (!locked)
93165 +               ni_unlock(ni);
93167 +       run_close(&ea_run);
93168 +       ntfs_free(ea_all);
93170 +       return err;
93173 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
93174 +static inline void ntfs_posix_acl_release(struct posix_acl *acl)
93176 +       if (acl && refcount_dec_and_test(&acl->a_refcount))
93177 +               kfree(acl);
93180 +static struct posix_acl *ntfs_get_acl_ex(struct user_namespace *mnt_userns,
93181 +                                        struct inode *inode, int type,
93182 +                                        int locked)
93184 +       struct ntfs_inode *ni = ntfs_i(inode);
93185 +       const char *name;
93186 +       size_t name_len;
93187 +       struct posix_acl *acl;
93188 +       size_t req;
93189 +       int err;
93190 +       void *buf;
93192 +       /* allocate PATH_MAX bytes */
93193 +       buf = __getname();
93194 +       if (!buf)
93195 +               return ERR_PTR(-ENOMEM);
93197 +       /* Possible values of 'type' was already checked above */
93198 +       if (type == ACL_TYPE_ACCESS) {
93199 +               name = XATTR_NAME_POSIX_ACL_ACCESS;
93200 +               name_len = sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1;
93201 +       } else {
93202 +               name = XATTR_NAME_POSIX_ACL_DEFAULT;
93203 +               name_len = sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1;
93204 +       }
93206 +       if (!locked)
93207 +               ni_lock(ni);
93209 +       err = ntfs_get_ea(inode, name, name_len, buf, PATH_MAX, &req);
93211 +       if (!locked)
93212 +               ni_unlock(ni);
93214 +       /* Translate extended attribute to acl */
93215 +       if (err > 0) {
93216 +               acl = posix_acl_from_xattr(mnt_userns, buf, err);
93217 +               if (!IS_ERR(acl))
93218 +                       set_cached_acl(inode, type, acl);
93219 +       } else {
93220 +               acl = err == -ENODATA ? NULL : ERR_PTR(err);
93221 +       }
93223 +       __putname(buf);
93225 +       return acl;
93229 + * ntfs_get_acl
93230 + *
93231 + * inode_operations::get_acl
93232 + */
93233 +struct posix_acl *ntfs_get_acl(struct inode *inode, int type)
93235 +       /* TODO: init_user_ns? */
93236 +       return ntfs_get_acl_ex(&init_user_ns, inode, type, 0);
93239 +static noinline int ntfs_set_acl_ex(struct user_namespace *mnt_userns,
93240 +                                   struct inode *inode, struct posix_acl *acl,
93241 +                                   int type, int locked)
93243 +       const char *name;
93244 +       size_t size, name_len;
93245 +       void *value = NULL;
93246 +       int err = 0;
93248 +       if (S_ISLNK(inode->i_mode))
93249 +               return -EOPNOTSUPP;
93251 +       switch (type) {
93252 +       case ACL_TYPE_ACCESS:
93253 +               if (acl) {
93254 +                       umode_t mode = inode->i_mode;
93256 +                       err = posix_acl_equiv_mode(acl, &mode);
93257 +                       if (err < 0)
93258 +                               return err;
93260 +                       if (inode->i_mode != mode) {
93261 +                               inode->i_mode = mode;
93262 +                               mark_inode_dirty(inode);
93263 +                       }
93265 +                       if (!err) {
93266 +                               /*
93267 +                                * acl can be exactly represented in the
93268 +                                * traditional file mode permission bits
93269 +                                */
93270 +                               acl = NULL;
93271 +                               goto out;
93272 +                       }
93273 +               }
93274 +               name = XATTR_NAME_POSIX_ACL_ACCESS;
93275 +               name_len = sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1;
93276 +               break;
93278 +       case ACL_TYPE_DEFAULT:
93279 +               if (!S_ISDIR(inode->i_mode))
93280 +                       return acl ? -EACCES : 0;
93281 +               name = XATTR_NAME_POSIX_ACL_DEFAULT;
93282 +               name_len = sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1;
93283 +               break;
93285 +       default:
93286 +               return -EINVAL;
93287 +       }
93289 +       if (!acl)
93290 +               goto out;
93292 +       size = posix_acl_xattr_size(acl->a_count);
93293 +       value = ntfs_malloc(size);
93294 +       if (!value)
93295 +               return -ENOMEM;
93297 +       err = posix_acl_to_xattr(mnt_userns, acl, value, size);
93298 +       if (err)
93299 +               goto out;
93301 +       err = ntfs_set_ea(inode, name, name_len, value, size, 0, locked);
93302 +       if (err)
93303 +               goto out;
93305 +       inode->i_flags &= ~S_NOSEC;
93307 +out:
93308 +       if (!err)
93309 +               set_cached_acl(inode, type, acl);
93311 +       kfree(value);
93313 +       return err;
93317 + * ntfs_set_acl
93318 + *
93319 + * inode_operations::set_acl
93320 + */
93321 +int ntfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
93322 +                struct posix_acl *acl, int type)
93324 +       return ntfs_set_acl_ex(mnt_userns, inode, acl, type, 0);
93327 +static int ntfs_xattr_get_acl(struct user_namespace *mnt_userns,
93328 +                             struct inode *inode, int type, void *buffer,
93329 +                             size_t size)
93331 +       struct posix_acl *acl;
93332 +       int err;
93334 +       if (!(inode->i_sb->s_flags & SB_POSIXACL))
93335 +               return -EOPNOTSUPP;
93337 +       acl = ntfs_get_acl(inode, type);
93338 +       if (IS_ERR(acl))
93339 +               return PTR_ERR(acl);
93341 +       if (!acl)
93342 +               return -ENODATA;
93344 +       err = posix_acl_to_xattr(mnt_userns, acl, buffer, size);
93345 +       ntfs_posix_acl_release(acl);
93347 +       return err;
93350 +static int ntfs_xattr_set_acl(struct user_namespace *mnt_userns,
93351 +                             struct inode *inode, int type, const void *value,
93352 +                             size_t size)
93354 +       struct posix_acl *acl;
93355 +       int err;
93357 +       if (!(inode->i_sb->s_flags & SB_POSIXACL))
93358 +               return -EOPNOTSUPP;
93360 +       if (!inode_owner_or_capable(mnt_userns, inode))
93361 +               return -EPERM;
93363 +       if (!value)
93364 +               return 0;
93366 +       acl = posix_acl_from_xattr(mnt_userns, value, size);
93367 +       if (IS_ERR(acl))
93368 +               return PTR_ERR(acl);
93370 +       if (acl) {
93371 +               err = posix_acl_valid(mnt_userns, acl);
93372 +               if (err)
93373 +                       goto release_and_out;
93374 +       }
93376 +       err = ntfs_set_acl(mnt_userns, inode, acl, type);
93378 +release_and_out:
93379 +       ntfs_posix_acl_release(acl);
93380 +       return err;
93384 + * Initialize the ACLs of a new inode. Called from ntfs_create_inode.
93385 + */
93386 +int ntfs_init_acl(struct user_namespace *mnt_userns, struct inode *inode,
93387 +                 struct inode *dir)
93389 +       struct posix_acl *default_acl, *acl;
93390 +       int err;
93392 +       /*
93393 +        * TODO refactoring lock
93394 +        * ni_lock(dir) ... -> posix_acl_create(dir,...) -> ntfs_get_acl -> ni_lock(dir)
93395 +        */
93396 +       inode->i_default_acl = NULL;
93398 +       default_acl = ntfs_get_acl_ex(mnt_userns, dir, ACL_TYPE_DEFAULT, 1);
93400 +       if (!default_acl || default_acl == ERR_PTR(-EOPNOTSUPP)) {
93401 +               inode->i_mode &= ~current_umask();
93402 +               err = 0;
93403 +               goto out;
93404 +       }
93406 +       if (IS_ERR(default_acl)) {
93407 +               err = PTR_ERR(default_acl);
93408 +               goto out;
93409 +       }
93411 +       acl = default_acl;
93412 +       err = __posix_acl_create(&acl, GFP_NOFS, &inode->i_mode);
93413 +       if (err < 0)
93414 +               goto out1;
93415 +       if (!err) {
93416 +               posix_acl_release(acl);
93417 +               acl = NULL;
93418 +       }
93420 +       if (!S_ISDIR(inode->i_mode)) {
93421 +               posix_acl_release(default_acl);
93422 +               default_acl = NULL;
93423 +       }
93425 +       if (default_acl)
93426 +               err = ntfs_set_acl_ex(mnt_userns, inode, default_acl,
93427 +                                     ACL_TYPE_DEFAULT, 1);
93429 +       if (!acl)
93430 +               inode->i_acl = NULL;
93431 +       else if (!err)
93432 +               err = ntfs_set_acl_ex(mnt_userns, inode, acl, ACL_TYPE_ACCESS,
93433 +                                     1);
93435 +       posix_acl_release(acl);
93436 +out1:
93437 +       posix_acl_release(default_acl);
93439 +out:
93440 +       return err;
93442 +#endif
93445 + * ntfs_acl_chmod
93446 + *
93447 + * helper for 'ntfs3_setattr'
93448 + */
93449 +int ntfs_acl_chmod(struct user_namespace *mnt_userns, struct inode *inode)
93451 +       struct super_block *sb = inode->i_sb;
93453 +       if (!(sb->s_flags & SB_POSIXACL))
93454 +               return 0;
93456 +       if (S_ISLNK(inode->i_mode))
93457 +               return -EOPNOTSUPP;
93459 +       return posix_acl_chmod(mnt_userns, inode, inode->i_mode);
93463 + * ntfs_permission
93464 + *
93465 + * inode_operations::permission
93466 + */
93467 +int ntfs_permission(struct user_namespace *mnt_userns, struct inode *inode,
93468 +                   int mask)
93470 +       if (ntfs_sb(inode->i_sb)->options.no_acs_rules) {
93471 +               /* "no access rules" mode - allow all changes */
93472 +               return 0;
93473 +       }
93475 +       return generic_permission(mnt_userns, inode, mask);
93479 + * ntfs_listxattr
93480 + *
93481 + * inode_operations::listxattr
93482 + */
93483 +ssize_t ntfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
93485 +       struct inode *inode = d_inode(dentry);
93486 +       struct ntfs_inode *ni = ntfs_i(inode);
93487 +       ssize_t ret;
93489 +       if (!(ni->ni_flags & NI_FLAG_EA)) {
93490 +               /* no xattr in file */
93491 +               return 0;
93492 +       }
93494 +       ni_lock(ni);
93496 +       ret = ntfs_list_ea(ni, buffer, size);
93498 +       ni_unlock(ni);
93500 +       return ret;
93503 +static int ntfs_getxattr(const struct xattr_handler *handler, struct dentry *de,
93504 +                        struct inode *inode, const char *name, void *buffer,
93505 +                        size_t size)
93507 +       int err;
93508 +       struct ntfs_inode *ni = ntfs_i(inode);
93509 +       size_t name_len = strlen(name);
93511 +       /* Dispatch request */
93512 +       if (name_len == sizeof(SYSTEM_DOS_ATTRIB) - 1 &&
93513 +           !memcmp(name, SYSTEM_DOS_ATTRIB, sizeof(SYSTEM_DOS_ATTRIB))) {
93514 +               /* system.dos_attrib */
93515 +               if (!buffer) {
93516 +                       err = sizeof(u8);
93517 +               } else if (size < sizeof(u8)) {
93518 +                       err = -ENODATA;
93519 +               } else {
93520 +                       err = sizeof(u8);
93521 +                       *(u8 *)buffer = le32_to_cpu(ni->std_fa);
93522 +               }
93523 +               goto out;
93524 +       }
93526 +       if (name_len == sizeof(SYSTEM_NTFS_ATTRIB) - 1 &&
93527 +           !memcmp(name, SYSTEM_NTFS_ATTRIB, sizeof(SYSTEM_NTFS_ATTRIB))) {
93528 +               /* system.ntfs_attrib */
93529 +               if (!buffer) {
93530 +                       err = sizeof(u32);
93531 +               } else if (size < sizeof(u32)) {
93532 +                       err = -ENODATA;
93533 +               } else {
93534 +                       err = sizeof(u32);
93535 +                       *(u32 *)buffer = le32_to_cpu(ni->std_fa);
93536 +               }
93537 +               goto out;
93538 +       }
93540 +       if (name_len == sizeof(SYSTEM_NTFS_SECURITY) - 1 &&
93541 +           !memcmp(name, SYSTEM_NTFS_SECURITY, sizeof(SYSTEM_NTFS_SECURITY))) {
93542 +               /* system.ntfs_security*/
93543 +               struct SECURITY_DESCRIPTOR_RELATIVE *sd = NULL;
93544 +               size_t sd_size = 0;
93546 +               if (!is_ntfs3(ni->mi.sbi)) {
93547 +                       /* we should get nt4 security */
93548 +                       err = -EINVAL;
93549 +                       goto out;
93550 +               } else if (le32_to_cpu(ni->std_security_id) <
93551 +                          SECURITY_ID_FIRST) {
93552 +                       err = -ENOENT;
93553 +                       goto out;
93554 +               }
93556 +               err = ntfs_get_security_by_id(ni->mi.sbi, ni->std_security_id,
93557 +                                             &sd, &sd_size);
93558 +               if (err)
93559 +                       goto out;
93561 +               if (!is_sd_valid(sd, sd_size)) {
93562 +                       ntfs_inode_warn(
93563 +                               inode,
93564 +                               "looks like you get incorrect security descriptor id=%u",
93565 +                               ni->std_security_id);
93566 +               }
93568 +               if (!buffer) {
93569 +                       err = sd_size;
93570 +               } else if (size < sd_size) {
93571 +                       err = -ENODATA;
93572 +               } else {
93573 +                       err = sd_size;
93574 +                       memcpy(buffer, sd, sd_size);
93575 +               }
93576 +               ntfs_free(sd);
93577 +               goto out;
93578 +       }
93580 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
93581 +       if ((name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1 &&
93582 +            !memcmp(name, XATTR_NAME_POSIX_ACL_ACCESS,
93583 +                    sizeof(XATTR_NAME_POSIX_ACL_ACCESS))) ||
93584 +           (name_len == sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1 &&
93585 +            !memcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT,
93586 +                    sizeof(XATTR_NAME_POSIX_ACL_DEFAULT)))) {
93587 +               /* TODO: init_user_ns? */
93588 +               err = ntfs_xattr_get_acl(
93589 +                       &init_user_ns, inode,
93590 +                       name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1
93591 +                               ? ACL_TYPE_ACCESS
93592 +                               : ACL_TYPE_DEFAULT,
93593 +                       buffer, size);
93594 +               goto out;
93595 +       }
93596 +#endif
93597 +       /* deal with ntfs extended attribute */
93598 +       err = ntfs_get_ea(inode, name, name_len, buffer, size, NULL);
93600 +out:
93601 +       return err;
93605 + * ntfs_setxattr
93606 + *
93607 + * inode_operations::setxattr
93608 + */
93609 +static noinline int ntfs_setxattr(const struct xattr_handler *handler,
93610 +                                 struct user_namespace *mnt_userns,
93611 +                                 struct dentry *de, struct inode *inode,
93612 +                                 const char *name, const void *value,
93613 +                                 size_t size, int flags)
93615 +       int err = -EINVAL;
93616 +       struct ntfs_inode *ni = ntfs_i(inode);
93617 +       size_t name_len = strlen(name);
93618 +       enum FILE_ATTRIBUTE new_fa;
93620 +       /* Dispatch request */
93621 +       if (name_len == sizeof(SYSTEM_DOS_ATTRIB) - 1 &&
93622 +           !memcmp(name, SYSTEM_DOS_ATTRIB, sizeof(SYSTEM_DOS_ATTRIB))) {
93623 +               if (sizeof(u8) != size)
93624 +                       goto out;
93625 +               new_fa = cpu_to_le32(*(u8 *)value);
93626 +               goto set_new_fa;
93627 +       }
93629 +       if (name_len == sizeof(SYSTEM_NTFS_ATTRIB) - 1 &&
93630 +           !memcmp(name, SYSTEM_NTFS_ATTRIB, sizeof(SYSTEM_NTFS_ATTRIB))) {
93631 +               if (size != sizeof(u32))
93632 +                       goto out;
93633 +               new_fa = cpu_to_le32(*(u32 *)value);
93635 +               if (S_ISREG(inode->i_mode)) {
93636 +                       /* Process compressed/sparsed in special way*/
93637 +                       ni_lock(ni);
93638 +                       err = ni_new_attr_flags(ni, new_fa);
93639 +                       ni_unlock(ni);
93640 +                       if (err)
93641 +                               goto out;
93642 +               }
93643 +set_new_fa:
93644 +               /*
93645 +                * Thanks Mark Harmstone:
93646 +                * keep directory bit consistency
93647 +                */
93648 +               if (S_ISDIR(inode->i_mode))
93649 +                       new_fa |= FILE_ATTRIBUTE_DIRECTORY;
93650 +               else
93651 +                       new_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
93653 +               if (ni->std_fa != new_fa) {
93654 +                       ni->std_fa = new_fa;
93655 +                       if (new_fa & FILE_ATTRIBUTE_READONLY)
93656 +                               inode->i_mode &= ~0222;
93657 +                       else
93658 +                               inode->i_mode |= 0222;
93659 +                       /* std attribute always in primary record */
93660 +                       ni->mi.dirty = true;
93661 +                       mark_inode_dirty(inode);
93662 +               }
93663 +               err = 0;
93665 +               goto out;
93666 +       }
93668 +       if (name_len == sizeof(SYSTEM_NTFS_SECURITY) - 1 &&
93669 +           !memcmp(name, SYSTEM_NTFS_SECURITY, sizeof(SYSTEM_NTFS_SECURITY))) {
93670 +               /* system.ntfs_security*/
93671 +               __le32 security_id;
93672 +               bool inserted;
93673 +               struct ATTR_STD_INFO5 *std;
93675 +               if (!is_ntfs3(ni->mi.sbi)) {
93676 +                       /*
93677 +                        * we should replace ATTR_SECURE
93678 +                        * Skip this way cause it is nt4 feature
93679 +                        */
93680 +                       err = -EINVAL;
93681 +                       goto out;
93682 +               }
93684 +               if (!is_sd_valid(value, size)) {
93685 +                       err = -EINVAL;
93686 +                       ntfs_inode_warn(
93687 +                               inode,
93688 +                               "you try to set invalid security descriptor");
93689 +                       goto out;
93690 +               }
93692 +               err = ntfs_insert_security(ni->mi.sbi, value, size,
93693 +                                          &security_id, &inserted);
93694 +               if (err)
93695 +                       goto out;
93697 +               ni_lock(ni);
93698 +               std = ni_std5(ni);
93699 +               if (!std) {
93700 +                       err = -EINVAL;
93701 +               } else if (std->security_id != security_id) {
93702 +                       std->security_id = ni->std_security_id = security_id;
93703 +                       /* std attribute always in primary record */
93704 +                       ni->mi.dirty = true;
93705 +                       mark_inode_dirty(&ni->vfs_inode);
93706 +               }
93707 +               ni_unlock(ni);
93708 +               goto out;
93709 +       }
93711 +#ifdef CONFIG_NTFS3_FS_POSIX_ACL
93712 +       if ((name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1 &&
93713 +            !memcmp(name, XATTR_NAME_POSIX_ACL_ACCESS,
93714 +                    sizeof(XATTR_NAME_POSIX_ACL_ACCESS))) ||
93715 +           (name_len == sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) - 1 &&
93716 +            !memcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT,
93717 +                    sizeof(XATTR_NAME_POSIX_ACL_DEFAULT)))) {
93718 +               /* TODO: init_user_ns? */
93719 +               err = ntfs_xattr_set_acl(
93720 +                       &init_user_ns, inode,
93721 +                       name_len == sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1
93722 +                               ? ACL_TYPE_ACCESS
93723 +                               : ACL_TYPE_DEFAULT,
93724 +                       value, size);
93725 +               goto out;
93726 +       }
93727 +#endif
93728 +       /* deal with ntfs extended attribute */
93729 +       err = ntfs_set_ea(inode, name, name_len, value, size, flags, 0);
93731 +out:
93732 +       return err;
93735 +static bool ntfs_xattr_user_list(struct dentry *dentry)
93737 +       return true;
93740 +static const struct xattr_handler ntfs_xattr_handler = {
93741 +       .prefix = "",
93742 +       .get = ntfs_getxattr,
93743 +       .set = ntfs_setxattr,
93744 +       .list = ntfs_xattr_user_list,
93747 +const struct xattr_handler *ntfs_xattr_handlers[] = {
93748 +       &ntfs_xattr_handler,
93749 +       NULL,
93751 diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
93752 index 0b2891c6c71e..2846b943e80c 100644
93753 --- a/fs/overlayfs/copy_up.c
93754 +++ b/fs/overlayfs/copy_up.c
93755 @@ -932,7 +932,7 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
93756  static int ovl_copy_up_flags(struct dentry *dentry, int flags)
93758         int err = 0;
93759 -       const struct cred *old_cred = ovl_override_creds(dentry->d_sb);
93760 +       const struct cred *old_cred;
93761         bool disconnected = (dentry->d_flags & DCACHE_DISCONNECTED);
93763         /*
93764 @@ -943,6 +943,7 @@ static int ovl_copy_up_flags(struct dentry *dentry, int flags)
93765         if (WARN_ON(disconnected && d_is_dir(dentry)))
93766                 return -EIO;
93768 +       old_cred = ovl_override_creds(dentry->d_sb);
93769         while (!err) {
93770                 struct dentry *next;
93771                 struct dentry *parent = NULL;
93772 diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
93773 index 3fe05fb5d145..71e264e2f16b 100644
93774 --- a/fs/overlayfs/namei.c
93775 +++ b/fs/overlayfs/namei.c
93776 @@ -919,6 +919,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
93777                         continue;
93779                 if ((uppermetacopy || d.metacopy) && !ofs->config.metacopy) {
93780 +                       dput(this);
93781                         err = -EPERM;
93782                         pr_warn_ratelimited("refusing to follow metacopy origin for (%pd2)\n", dentry);
93783                         goto out_put;
93784 diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
93785 index 95cff83786a5..2322f854533c 100644
93786 --- a/fs/overlayfs/overlayfs.h
93787 +++ b/fs/overlayfs/overlayfs.h
93788 @@ -319,9 +319,6 @@ int ovl_check_setxattr(struct dentry *dentry, struct dentry *upperdentry,
93789                        enum ovl_xattr ox, const void *value, size_t size,
93790                        int xerr);
93791  int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry);
93792 -void ovl_set_flag(unsigned long flag, struct inode *inode);
93793 -void ovl_clear_flag(unsigned long flag, struct inode *inode);
93794 -bool ovl_test_flag(unsigned long flag, struct inode *inode);
93795  bool ovl_inuse_trylock(struct dentry *dentry);
93796  void ovl_inuse_unlock(struct dentry *dentry);
93797  bool ovl_is_inuse(struct dentry *dentry);
93798 @@ -335,6 +332,21 @@ char *ovl_get_redirect_xattr(struct ovl_fs *ofs, struct dentry *dentry,
93799                              int padding);
93800  int ovl_sync_status(struct ovl_fs *ofs);
93802 +static inline void ovl_set_flag(unsigned long flag, struct inode *inode)
93804 +       set_bit(flag, &OVL_I(inode)->flags);
93807 +static inline void ovl_clear_flag(unsigned long flag, struct inode *inode)
93809 +       clear_bit(flag, &OVL_I(inode)->flags);
93812 +static inline bool ovl_test_flag(unsigned long flag, struct inode *inode)
93814 +       return test_bit(flag, &OVL_I(inode)->flags);
93817  static inline bool ovl_is_impuredir(struct super_block *sb,
93818                                     struct dentry *dentry)
93820 @@ -439,6 +451,18 @@ int ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
93821                         struct dentry *dentry, int level);
93822  int ovl_indexdir_cleanup(struct ovl_fs *ofs);
93825 + * Can we iterate real dir directly?
93826 + *
93827 + * Non-merge dir may contain whiteouts from a time it was a merge upper, before
93828 + * lower dir was removed under it and possibly before it was rotated from upper
93829 + * to lower layer.
93830 + */
93831 +static inline bool ovl_dir_is_real(struct dentry *dir)
93833 +       return !ovl_test_flag(OVL_WHITEOUTS, d_inode(dir));
93836  /* inode.c */
93837  int ovl_set_nlink_upper(struct dentry *dentry);
93838  int ovl_set_nlink_lower(struct dentry *dentry);
93839 diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
93840 index f404a78e6b60..cc1e80257064 100644
93841 --- a/fs/overlayfs/readdir.c
93842 +++ b/fs/overlayfs/readdir.c
93843 @@ -319,18 +319,6 @@ static inline int ovl_dir_read(struct path *realpath,
93844         return err;
93848 - * Can we iterate real dir directly?
93849 - *
93850 - * Non-merge dir may contain whiteouts from a time it was a merge upper, before
93851 - * lower dir was removed under it and possibly before it was rotated from upper
93852 - * to lower layer.
93853 - */
93854 -static bool ovl_dir_is_real(struct dentry *dir)
93856 -       return !ovl_test_flag(OVL_WHITEOUTS, d_inode(dir));
93859  static void ovl_dir_reset(struct file *file)
93861         struct ovl_dir_file *od = file->private_data;
93862 diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
93863 index fdd72f1a9c5e..787ce7c38fba 100644
93864 --- a/fs/overlayfs/super.c
93865 +++ b/fs/overlayfs/super.c
93866 @@ -380,6 +380,8 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry)
93867                            ofs->config.metacopy ? "on" : "off");
93868         if (ofs->config.ovl_volatile)
93869                 seq_puts(m, ",volatile");
93870 +       if (ofs->config.userxattr)
93871 +               seq_puts(m, ",userxattr");
93872         return 0;
93875 @@ -1826,7 +1828,8 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb,
93876   * - upper/work dir of any overlayfs instance
93877   */
93878  static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs,
93879 -                          struct dentry *dentry, const char *name)
93880 +                          struct dentry *dentry, const char *name,
93881 +                          bool is_lower)
93883         struct dentry *next = dentry, *parent;
93884         int err = 0;
93885 @@ -1838,7 +1841,7 @@ static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs,
93887         /* Walk back ancestors to root (inclusive) looking for traps */
93888         while (!err && parent != next) {
93889 -               if (ovl_lookup_trap_inode(sb, parent)) {
93890 +               if (is_lower && ovl_lookup_trap_inode(sb, parent)) {
93891                         err = -ELOOP;
93892                         pr_err("overlapping %s path\n", name);
93893                 } else if (ovl_is_inuse(parent)) {
93894 @@ -1864,7 +1867,7 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
93896         if (ovl_upper_mnt(ofs)) {
93897                 err = ovl_check_layer(sb, ofs, ovl_upper_mnt(ofs)->mnt_root,
93898 -                                     "upperdir");
93899 +                                     "upperdir", false);
93900                 if (err)
93901                         return err;
93903 @@ -1875,7 +1878,8 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
93904                  * workbasedir.  In that case, we already have their traps in
93905                  * inode cache and we will catch that case on lookup.
93906                  */
93907 -               err = ovl_check_layer(sb, ofs, ofs->workbasedir, "workdir");
93908 +               err = ovl_check_layer(sb, ofs, ofs->workbasedir, "workdir",
93909 +                                     false);
93910                 if (err)
93911                         return err;
93912         }
93913 @@ -1883,7 +1887,7 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
93914         for (i = 1; i < ofs->numlayer; i++) {
93915                 err = ovl_check_layer(sb, ofs,
93916                                       ofs->layers[i].mnt->mnt_root,
93917 -                                     "lowerdir");
93918 +                                     "lowerdir", true);
93919                 if (err)
93920                         return err;
93921         }
93922 diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
93923 index 7f5a01a11f97..404a0a32ddf6 100644
93924 --- a/fs/overlayfs/util.c
93925 +++ b/fs/overlayfs/util.c
93926 @@ -422,18 +422,20 @@ void ovl_inode_update(struct inode *inode, struct dentry *upperdentry)
93927         }
93930 -static void ovl_dentry_version_inc(struct dentry *dentry, bool impurity)
93931 +static void ovl_dir_version_inc(struct dentry *dentry, bool impurity)
93933         struct inode *inode = d_inode(dentry);
93935         WARN_ON(!inode_is_locked(inode));
93936 +       WARN_ON(!d_is_dir(dentry));
93937         /*
93938 -        * Version is used by readdir code to keep cache consistent.  For merge
93939 -        * dirs all changes need to be noted.  For non-merge dirs, cache only
93940 -        * contains impure (ones which have been copied up and have origins)
93941 -        * entries, so only need to note changes to impure entries.
93942 +        * Version is used by readdir code to keep cache consistent.
93943 +        * For merge dirs (or dirs with origin) all changes need to be noted.
93944 +        * For non-merge dirs, cache contains only impure entries (i.e. ones
93945 +        * which have been copied up and have origins), so only need to note
93946 +        * changes to impure entries.
93947          */
93948 -       if (OVL_TYPE_MERGE(ovl_path_type(dentry)) || impurity)
93949 +       if (!ovl_dir_is_real(dentry) || impurity)
93950                 OVL_I(inode)->version++;
93953 @@ -442,7 +444,7 @@ void ovl_dir_modified(struct dentry *dentry, bool impurity)
93954         /* Copy mtime/ctime */
93955         ovl_copyattr(d_inode(ovl_dentry_upper(dentry)), d_inode(dentry));
93957 -       ovl_dentry_version_inc(dentry, impurity);
93958 +       ovl_dir_version_inc(dentry, impurity);
93961  u64 ovl_dentry_version_get(struct dentry *dentry)
93962 @@ -638,21 +640,6 @@ int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry)
93963         return err;
93966 -void ovl_set_flag(unsigned long flag, struct inode *inode)
93968 -       set_bit(flag, &OVL_I(inode)->flags);
93971 -void ovl_clear_flag(unsigned long flag, struct inode *inode)
93973 -       clear_bit(flag, &OVL_I(inode)->flags);
93976 -bool ovl_test_flag(unsigned long flag, struct inode *inode)
93978 -       return test_bit(flag, &OVL_I(inode)->flags);
93981  /**
93982   * Caller must hold a reference to inode to prevent it from being freed while
93983   * it is marked inuse.
93984 diff --git a/fs/proc/array.c b/fs/proc/array.c
93985 index bb87e4d89cd8..7ec59171f197 100644
93986 --- a/fs/proc/array.c
93987 +++ b/fs/proc/array.c
93988 @@ -342,8 +342,10 @@ static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
93989         seq_put_decimal_ull(m, "NoNewPrivs:\t", task_no_new_privs(p));
93990  #ifdef CONFIG_SECCOMP
93991         seq_put_decimal_ull(m, "\nSeccomp:\t", p->seccomp.mode);
93992 +#ifdef CONFIG_SECCOMP_FILTER
93993         seq_put_decimal_ull(m, "\nSeccomp_filters:\t",
93994                             atomic_read(&p->seccomp.filter_count));
93995 +#endif
93996  #endif
93997         seq_puts(m, "\nSpeculation_Store_Bypass:\t");
93998         switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
93999 diff --git a/fs/proc/generic.c b/fs/proc/generic.c
94000 index bc86aa87cc41..5600da30e289 100644
94001 --- a/fs/proc/generic.c
94002 +++ b/fs/proc/generic.c
94003 @@ -756,7 +756,7 @@ int remove_proc_subtree(const char *name, struct proc_dir_entry *parent)
94004         while (1) {
94005                 next = pde_subdir_first(de);
94006                 if (next) {
94007 -                       if (unlikely(pde_is_permanent(root))) {
94008 +                       if (unlikely(pde_is_permanent(next))) {
94009                                 write_unlock(&proc_subdir_lock);
94010                                 WARN(1, "removing permanent /proc entry '%s/%s'",
94011                                         next->parent->name, next->name);
94012 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
94013 index e862cab69583..d292f20c4e3d 100644
94014 --- a/fs/proc/task_mmu.c
94015 +++ b/fs/proc/task_mmu.c
94016 @@ -19,6 +19,7 @@
94017  #include <linux/shmem_fs.h>
94018  #include <linux/uaccess.h>
94019  #include <linux/pkeys.h>
94020 +#include <linux/mm_inline.h>
94022  #include <asm/elf.h>
94023  #include <asm/tlb.h>
94024 @@ -1718,7 +1719,7 @@ static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
94025         if (PageSwapCache(page))
94026                 md->swapcache += nr_pages;
94028 -       if (PageActive(page) || PageUnevictable(page))
94029 +       if (PageUnevictable(page) || page_is_active(compound_head(page), NULL))
94030                 md->active += nr_pages;
94032         if (PageWriteback(page))
94033 diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
94034 index d963ae7902f9..67b194ba1b03 100644
94035 --- a/fs/pstore/platform.c
94036 +++ b/fs/pstore/platform.c
94037 @@ -218,7 +218,7 @@ static int zbufsize_842(size_t size)
94038  #if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS)
94039  static int zbufsize_zstd(size_t size)
94041 -       return ZSTD_compressBound(size);
94042 +       return zstd_compress_bound(size);
94044  #endif
94046 diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
94047 index 7b1128398976..89d492916dea 100644
94048 --- a/fs/squashfs/file.c
94049 +++ b/fs/squashfs/file.c
94050 @@ -211,11 +211,11 @@ static long long read_indexes(struct super_block *sb, int n,
94051   * If the skip factor is limited in this way then the file will use multiple
94052   * slots.
94053   */
94054 -static inline int calculate_skip(int blocks)
94055 +static inline int calculate_skip(u64 blocks)
94057 -       int skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
94058 +       u64 skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
94059                  * SQUASHFS_META_INDEXES);
94060 -       return min(SQUASHFS_CACHED_BLKS - 1, skip + 1);
94061 +       return min((u64) SQUASHFS_CACHED_BLKS - 1, skip + 1);
94065 diff --git a/fs/squashfs/zstd_wrapper.c b/fs/squashfs/zstd_wrapper.c
94066 index b7cb1faa652d..6967c0aae801 100644
94067 --- a/fs/squashfs/zstd_wrapper.c
94068 +++ b/fs/squashfs/zstd_wrapper.c
94069 @@ -34,7 +34,7 @@ static void *zstd_init(struct squashfs_sb_info *msblk, void *buff)
94070                 goto failed;
94071         wksp->window_size = max_t(size_t,
94072                         msblk->block_size, SQUASHFS_METADATA_SIZE);
94073 -       wksp->mem_size = ZSTD_DStreamWorkspaceBound(wksp->window_size);
94074 +       wksp->mem_size = zstd_dstream_workspace_bound(wksp->window_size);
94075         wksp->mem = vmalloc(wksp->mem_size);
94076         if (wksp->mem == NULL)
94077                 goto failed;
94078 @@ -63,15 +63,15 @@ static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
94079         struct squashfs_page_actor *output)
94081         struct workspace *wksp = strm;
94082 -       ZSTD_DStream *stream;
94083 +       zstd_dstream *stream;
94084         size_t total_out = 0;
94085         int error = 0;
94086 -       ZSTD_inBuffer in_buf = { NULL, 0, 0 };
94087 -       ZSTD_outBuffer out_buf = { NULL, 0, 0 };
94088 +       zstd_in_buffer in_buf = { NULL, 0, 0 };
94089 +       zstd_out_buffer out_buf = { NULL, 0, 0 };
94090         struct bvec_iter_all iter_all = {};
94091         struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
94093 -       stream = ZSTD_initDStream(wksp->window_size, wksp->mem, wksp->mem_size);
94094 +       stream = zstd_init_dstream(wksp->window_size, wksp->mem, wksp->mem_size);
94096         if (!stream) {
94097                 ERROR("Failed to initialize zstd decompressor\n");
94098 @@ -116,14 +116,14 @@ static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
94099                 }
94101                 total_out -= out_buf.pos;
94102 -               zstd_err = ZSTD_decompressStream(stream, &out_buf, &in_buf);
94103 +               zstd_err = zstd_decompress_stream(stream, &out_buf, &in_buf);
94104                 total_out += out_buf.pos; /* add the additional data produced */
94105                 if (zstd_err == 0)
94106                         break;
94108 -               if (ZSTD_isError(zstd_err)) {
94109 +               if (zstd_is_error(zstd_err)) {
94110                         ERROR("zstd decompression error: %d\n",
94111 -                                       (int)ZSTD_getErrorCode(zstd_err));
94112 +                                       (int)zstd_get_error_code(zstd_err));
94113                         error = -EIO;
94114                         break;
94115                 }
94116 diff --git a/fs/stat.c b/fs/stat.c
94117 index fbc171d038aa..1fa38bdec1a6 100644
94118 --- a/fs/stat.c
94119 +++ b/fs/stat.c
94120 @@ -86,12 +86,20 @@ int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
94121         /* SB_NOATIME means filesystem supplies dummy atime value */
94122         if (inode->i_sb->s_flags & SB_NOATIME)
94123                 stat->result_mask &= ~STATX_ATIME;
94125 +       /*
94126 +        * Note: If you add another clause to set an attribute flag, please
94127 +        * update attributes_mask below.
94128 +        */
94129         if (IS_AUTOMOUNT(inode))
94130                 stat->attributes |= STATX_ATTR_AUTOMOUNT;
94132         if (IS_DAX(inode))
94133                 stat->attributes |= STATX_ATTR_DAX;
94135 +       stat->attributes_mask |= (STATX_ATTR_AUTOMOUNT |
94136 +                                 STATX_ATTR_DAX);
94138         mnt_userns = mnt_user_ns(path->mnt);
94139         if (inode->i_op->getattr)
94140                 return inode->i_op->getattr(mnt_userns, path, stat,
94141 diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
94142 index 0f8a6a16421b..1929ec63a0cb 100644
94143 --- a/fs/ubifs/replay.c
94144 +++ b/fs/ubifs/replay.c
94145 @@ -223,7 +223,8 @@ static bool inode_still_linked(struct ubifs_info *c, struct replay_entry *rino)
94146          */
94147         list_for_each_entry_reverse(r, &c->replay_list, list) {
94148                 ubifs_assert(c, r->sqnum >= rino->sqnum);
94149 -               if (key_inum(c, &r->key) == key_inum(c, &rino->key))
94150 +               if (key_inum(c, &r->key) == key_inum(c, &rino->key) &&
94151 +                   key_type(c, &r->key) == UBIFS_INO_KEY)
94152                         return r->deletion == 0;
94154         }
94155 diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
94156 index 472b3039eabb..902e5f7e6642 100644
94157 --- a/fs/xfs/libxfs/xfs_attr.c
94158 +++ b/fs/xfs/libxfs/xfs_attr.c
94159 @@ -928,6 +928,7 @@ xfs_attr_node_addname(
94160          * Search to see if name already exists, and get back a pointer
94161          * to where it should go.
94162          */
94163 +       error = 0;
94164         retval = xfs_attr_node_hasname(args, &state);
94165         if (retval != -ENOATTR && retval != -EEXIST)
94166                 goto out;
94167 diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
94168 index fcde59c65a81..cb3d6b1c655d 100644
94169 --- a/include/crypto/acompress.h
94170 +++ b/include/crypto/acompress.h
94171 @@ -165,6 +165,8 @@ static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
94172   * crypto_free_acomp() -- free ACOMPRESS tfm handle
94173   *
94174   * @tfm:       ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
94175 + *
94176 + * If @tfm is a NULL or error pointer, this function does nothing.
94177   */
94178  static inline void crypto_free_acomp(struct crypto_acomp *tfm)
94180 diff --git a/include/crypto/aead.h b/include/crypto/aead.h
94181 index fcc12c593ef8..e728469c4ccc 100644
94182 --- a/include/crypto/aead.h
94183 +++ b/include/crypto/aead.h
94184 @@ -185,6 +185,8 @@ static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
94185  /**
94186   * crypto_free_aead() - zeroize and free aead handle
94187   * @tfm: cipher handle to be freed
94188 + *
94189 + * If @tfm is a NULL or error pointer, this function does nothing.
94190   */
94191  static inline void crypto_free_aead(struct crypto_aead *tfm)
94193 diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
94194 index 1d3aa252caba..5764b46bd1ec 100644
94195 --- a/include/crypto/akcipher.h
94196 +++ b/include/crypto/akcipher.h
94197 @@ -174,6 +174,8 @@ static inline struct crypto_akcipher *crypto_akcipher_reqtfm(
94198   * crypto_free_akcipher() - free AKCIPHER tfm handle
94199   *
94200   * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
94201 + *
94202 + * If @tfm is a NULL or error pointer, this function does nothing.
94203   */
94204  static inline void crypto_free_akcipher(struct crypto_akcipher *tfm)
94206 diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h
94207 index 3a1c72fdb7cf..dabaee698718 100644
94208 --- a/include/crypto/chacha.h
94209 +++ b/include/crypto/chacha.h
94210 @@ -47,13 +47,18 @@ static inline void hchacha_block(const u32 *state, u32 *out, int nrounds)
94211                 hchacha_block_generic(state, out, nrounds);
94214 -void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv);
94215 -static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv)
94216 +static inline void chacha_init_consts(u32 *state)
94218         state[0]  = 0x61707865; /* "expa" */
94219         state[1]  = 0x3320646e; /* "nd 3" */
94220         state[2]  = 0x79622d32; /* "2-by" */
94221         state[3]  = 0x6b206574; /* "te k" */
94224 +void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv);
94225 +static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv)
94227 +       chacha_init_consts(state);
94228         state[4]  = key[0];
94229         state[5]  = key[1];
94230         state[6]  = key[2];
94231 diff --git a/include/crypto/hash.h b/include/crypto/hash.h
94232 index 13f8a6a54ca8..b2bc1e46e86a 100644
94233 --- a/include/crypto/hash.h
94234 +++ b/include/crypto/hash.h
94235 @@ -281,6 +281,8 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
94236  /**
94237   * crypto_free_ahash() - zeroize and free the ahash handle
94238   * @tfm: cipher handle to be freed
94239 + *
94240 + * If @tfm is a NULL or error pointer, this function does nothing.
94241   */
94242  static inline void crypto_free_ahash(struct crypto_ahash *tfm)
94244 @@ -724,6 +726,8 @@ static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
94245  /**
94246   * crypto_free_shash() - zeroize and free the message digest handle
94247   * @tfm: cipher handle to be freed
94248 + *
94249 + * If @tfm is a NULL or error pointer, this function does nothing.
94250   */
94251  static inline void crypto_free_shash(struct crypto_shash *tfm)
94253 diff --git a/include/crypto/internal/poly1305.h b/include/crypto/internal/poly1305.h
94254 index 064e52ca5248..196aa769f296 100644
94255 --- a/include/crypto/internal/poly1305.h
94256 +++ b/include/crypto/internal/poly1305.h
94257 @@ -18,7 +18,8 @@
94258   * only the ε-almost-∆-universal hash function (not the full MAC) is computed.
94259   */
94261 -void poly1305_core_setkey(struct poly1305_core_key *key, const u8 *raw_key);
94262 +void poly1305_core_setkey(struct poly1305_core_key *key,
94263 +                         const u8 raw_key[POLY1305_BLOCK_SIZE]);
94264  static inline void poly1305_core_init(struct poly1305_state *state)
94266         *state = (struct poly1305_state){};
94267 diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
94268 index 88b591215d5c..cccceadc164b 100644
94269 --- a/include/crypto/kpp.h
94270 +++ b/include/crypto/kpp.h
94271 @@ -154,6 +154,8 @@ static inline void crypto_kpp_set_flags(struct crypto_kpp *tfm, u32 flags)
94272   * crypto_free_kpp() - free KPP tfm handle
94273   *
94274   * @tfm: KPP tfm handle allocated with crypto_alloc_kpp()
94275 + *
94276 + * If @tfm is a NULL or error pointer, this function does nothing.
94277   */
94278  static inline void crypto_free_kpp(struct crypto_kpp *tfm)
94280 diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
94281 index f1f67fc749cf..090692ec3bc7 100644
94282 --- a/include/crypto/poly1305.h
94283 +++ b/include/crypto/poly1305.h
94284 @@ -58,8 +58,10 @@ struct poly1305_desc_ctx {
94285         };
94286  };
94288 -void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key);
94289 -void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key);
94290 +void poly1305_init_arch(struct poly1305_desc_ctx *desc,
94291 +                       const u8 key[POLY1305_KEY_SIZE]);
94292 +void poly1305_init_generic(struct poly1305_desc_ctx *desc,
94293 +                          const u8 key[POLY1305_KEY_SIZE]);
94295  static inline void poly1305_init(struct poly1305_desc_ctx *desc, const u8 *key)
94297 diff --git a/include/crypto/rng.h b/include/crypto/rng.h
94298 index 8b4b844b4eef..17bb3673d3c1 100644
94299 --- a/include/crypto/rng.h
94300 +++ b/include/crypto/rng.h
94301 @@ -111,6 +111,8 @@ static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
94302  /**
94303   * crypto_free_rng() - zeroize and free RNG handle
94304   * @tfm: cipher handle to be freed
94305 + *
94306 + * If @tfm is a NULL or error pointer, this function does nothing.
94307   */
94308  static inline void crypto_free_rng(struct crypto_rng *tfm)
94310 diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
94311 index 6a733b171a5d..ef0fc9ed4342 100644
94312 --- a/include/crypto/skcipher.h
94313 +++ b/include/crypto/skcipher.h
94314 @@ -196,6 +196,8 @@ static inline struct crypto_tfm *crypto_skcipher_tfm(
94315  /**
94316   * crypto_free_skcipher() - zeroize and free cipher handle
94317   * @tfm: cipher handle to be freed
94318 + *
94319 + * If @tfm is a NULL or error pointer, this function does nothing.
94320   */
94321  static inline void crypto_free_skcipher(struct crypto_skcipher *tfm)
94323 diff --git a/include/keys/trusted-type.h b/include/keys/trusted-type.h
94324 index a94c03a61d8f..b2ed3481c6a0 100644
94325 --- a/include/keys/trusted-type.h
94326 +++ b/include/keys/trusted-type.h
94327 @@ -30,6 +30,7 @@ struct trusted_key_options {
94328         uint16_t keytype;
94329         uint32_t keyhandle;
94330         unsigned char keyauth[TPM_DIGEST_SIZE];
94331 +       uint32_t blobauth_len;
94332         unsigned char blobauth[TPM_DIGEST_SIZE];
94333         uint32_t pcrinfo_len;
94334         unsigned char pcrinfo[MAX_PCRINFO_SIZE];
94335 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
94336 index 158aefae1030..40c48e30f1eb 100644
94337 --- a/include/linux/blkdev.h
94338 +++ b/include/linux/blkdev.h
94339 @@ -620,6 +620,7 @@ struct request_queue {
94341  #define QUEUE_FLAG_MQ_DEFAULT  ((1 << QUEUE_FLAG_IO_STAT) |            \
94342                                  (1 << QUEUE_FLAG_SAME_COMP) |          \
94343 +                                (1 << QUEUE_FLAG_SAME_FORCE) |         \
94344                                  (1 << QUEUE_FLAG_NOWAIT))
94346  void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
94347 diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
94348 index 971b33aca13d..99bc82342ca0 100644
94349 --- a/include/linux/bpf_verifier.h
94350 +++ b/include/linux/bpf_verifier.h
94351 @@ -299,10 +299,11 @@ struct bpf_verifier_state_list {
94352  };
94354  /* Possible states for alu_state member. */
94355 -#define BPF_ALU_SANITIZE_SRC           1U
94356 -#define BPF_ALU_SANITIZE_DST           2U
94357 +#define BPF_ALU_SANITIZE_SRC           (1U << 0)
94358 +#define BPF_ALU_SANITIZE_DST           (1U << 1)
94359  #define BPF_ALU_NEG_VALUE              (1U << 2)
94360  #define BPF_ALU_NON_POINTER            (1U << 3)
94361 +#define BPF_ALU_IMMEDIATE              (1U << 4)
94362  #define BPF_ALU_SANITIZE               (BPF_ALU_SANITIZE_SRC | \
94363                                          BPF_ALU_SANITIZE_DST)
94365 diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
94366 index 4f2f79de083e..bd5744360cfa 100644
94367 --- a/include/linux/cgroup.h
94368 +++ b/include/linux/cgroup.h
94369 @@ -432,6 +432,18 @@ static inline void cgroup_put(struct cgroup *cgrp)
94370         css_put(&cgrp->self);
94373 +extern struct mutex cgroup_mutex;
94375 +static inline void cgroup_lock(void)
94377 +       mutex_lock(&cgroup_mutex);
94380 +static inline void cgroup_unlock(void)
94382 +       mutex_unlock(&cgroup_mutex);
94385  /**
94386   * task_css_set_check - obtain a task's css_set with extra access conditions
94387   * @task: the task to obtain css_set for
94388 @@ -446,7 +458,6 @@ static inline void cgroup_put(struct cgroup *cgrp)
94389   * as locks used during the cgroup_subsys::attach() methods.
94390   */
94391  #ifdef CONFIG_PROVE_RCU
94392 -extern struct mutex cgroup_mutex;
94393  extern spinlock_t css_set_lock;
94394  #define task_css_set_check(task, __c)                                  \
94395         rcu_dereference_check((task)->cgroups,                          \
94396 @@ -704,6 +715,8 @@ struct cgroup;
94397  static inline u64 cgroup_id(const struct cgroup *cgrp) { return 1; }
94398  static inline void css_get(struct cgroup_subsys_state *css) {}
94399  static inline void css_put(struct cgroup_subsys_state *css) {}
94400 +static inline void cgroup_lock(void) {}
94401 +static inline void cgroup_unlock(void) {}
94402  static inline int cgroup_attach_task_all(struct task_struct *from,
94403                                          struct task_struct *t) { return 0; }
94404  static inline int cgroupstats_build(struct cgroupstats *stats,
94405 diff --git a/include/linux/compat.h b/include/linux/compat.h
94406 index 6e65be753603..d4c1b402b962 100644
94407 --- a/include/linux/compat.h
94408 +++ b/include/linux/compat.h
94409 @@ -365,6 +365,17 @@ struct compat_robust_list_head {
94410         compat_uptr_t                   list_op_pending;
94411  };
94413 +struct compat_futex_waitv {
94414 +       compat_uptr_t uaddr;
94415 +       compat_uint_t val;
94416 +       compat_uint_t flags;
94419 +struct compat_futex_requeue {
94420 +       compat_uptr_t uaddr;
94421 +       compat_uint_t flags;
94424  #ifdef CONFIG_COMPAT_OLD_SIGACTION
94425  struct compat_old_sigaction {
94426         compat_uptr_t                   sa_handler;
94427 @@ -654,6 +665,18 @@ asmlinkage long
94428  compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
94429                            compat_size_t __user *len_ptr);
94431 +/* kernel/futex2.c */
94432 +asmlinkage long compat_sys_futex_waitv(struct compat_futex_waitv *waiters,
94433 +                                      compat_uint_t nr_futexes, compat_uint_t flags,
94434 +                                      struct __kernel_timespec __user *timo);
94436 +asmlinkage long compat_sys_futex_requeue(struct compat_futex_requeue *uaddr1,
94437 +                                        struct compat_futex_requeue *uaddr2,
94438 +                                        compat_uint_t nr_wake,
94439 +                                        compat_uint_t nr_requeue,
94440 +                                        compat_uint_t cmpval,
94441 +                                        compat_uint_t flags);
94443  /* kernel/itimer.c */
94444  asmlinkage long compat_sys_getitimer(int which,
94445                                      struct old_itimerval32 __user *it);
94446 diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
94447 index f14adb882338..cc7c3fda2aa6 100644
94448 --- a/include/linux/cpuhotplug.h
94449 +++ b/include/linux/cpuhotplug.h
94450 @@ -135,6 +135,7 @@ enum cpuhp_state {
94451         CPUHP_AP_RISCV_TIMER_STARTING,
94452         CPUHP_AP_CLINT_TIMER_STARTING,
94453         CPUHP_AP_CSKY_TIMER_STARTING,
94454 +       CPUHP_AP_TI_GP_TIMER_STARTING,
94455         CPUHP_AP_HYPERV_TIMER_STARTING,
94456         CPUHP_AP_KVM_STARTING,
94457         CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
94458 diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
94459 index 706b68d1359b..13d1f4c14d7b 100644
94460 --- a/include/linux/dma-iommu.h
94461 +++ b/include/linux/dma-iommu.h
94462 @@ -40,6 +40,8 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
94463  void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
94464                 struct iommu_domain *domain);
94466 +extern bool iommu_dma_forcedac;
94468  #else /* CONFIG_IOMMU_DMA */
94470  struct iommu_domain;
94471 diff --git a/include/linux/elevator.h b/include/linux/elevator.h
94472 index 1fe8e105b83b..dcb2f9022c1d 100644
94473 --- a/include/linux/elevator.h
94474 +++ b/include/linux/elevator.h
94475 @@ -34,7 +34,7 @@ struct elevator_mq_ops {
94476         void (*depth_updated)(struct blk_mq_hw_ctx *);
94478         bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
94479 -       bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *, unsigned int);
94480 +       bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int);
94481         int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
94482         void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
94483         void (*requests_merged)(struct request_queue *, struct request *, struct request *);
94484 diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
94485 index 71177b17eee5..66e2423d9feb 100644
94486 --- a/include/linux/firmware/xlnx-zynqmp.h
94487 +++ b/include/linux/firmware/xlnx-zynqmp.h
94488 @@ -354,11 +354,6 @@ int zynqmp_pm_read_pggs(u32 index, u32 *value);
94489  int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype);
94490  int zynqmp_pm_set_boot_health_status(u32 value);
94491  #else
94492 -static inline struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void)
94494 -       return ERR_PTR(-ENODEV);
94497  static inline int zynqmp_pm_get_api_version(u32 *version)
94499         return -ENODEV;
94500 diff --git a/include/linux/freezer.h b/include/linux/freezer.h
94501 index 27828145ca09..504cc97bf475 100644
94502 --- a/include/linux/freezer.h
94503 +++ b/include/linux/freezer.h
94504 @@ -311,6 +311,7 @@ static inline void set_freezable(void) {}
94505  #define wait_event_freezekillable_unsafe(wq, condition)                        \
94506                 wait_event_killable(wq, condition)
94508 +#define pm_freezing (false)
94509  #endif /* !CONFIG_FREEZER */
94511  #endif /* FREEZER_H_INCLUDED */
94512 diff --git a/include/linux/fs.h b/include/linux/fs.h
94513 index ec8f3ddf4a6a..33683ff94cb3 100644
94514 --- a/include/linux/fs.h
94515 +++ b/include/linux/fs.h
94516 @@ -683,6 +683,7 @@ struct inode {
94517         };
94518         atomic64_t              i_version;
94519         atomic64_t              i_sequence; /* see futex */
94520 +       atomic64_t              i_sequence2; /* see futex2 */
94521         atomic_t                i_count;
94522         atomic_t                i_dio_count;
94523         atomic_t                i_writecount;
94524 diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
94525 index 286de0520574..ecf0032a0995 100644
94526 --- a/include/linux/gpio/driver.h
94527 +++ b/include/linux/gpio/driver.h
94528 @@ -624,8 +624,17 @@ void gpiochip_irq_domain_deactivate(struct irq_domain *domain,
94529  bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc,
94530                                 unsigned int offset);
94532 +#ifdef CONFIG_GPIOLIB_IRQCHIP
94533  int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
94534                                 struct irq_domain *domain);
94535 +#else
94536 +static inline int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
94537 +                                             struct irq_domain *domain)
94539 +       WARN_ON(1);
94540 +       return -EINVAL;
94542 +#endif
94544  int gpiochip_generic_request(struct gpio_chip *gc, unsigned int offset);
94545  void gpiochip_generic_free(struct gpio_chip *gc, unsigned int offset);
94546 diff --git a/include/linux/hid.h b/include/linux/hid.h
94547 index ef702b3f56e3..3e33eb14118c 100644
94548 --- a/include/linux/hid.h
94549 +++ b/include/linux/hid.h
94550 @@ -262,6 +262,8 @@ struct hid_item {
94551  #define HID_CP_SELECTION       0x000c0080
94552  #define HID_CP_MEDIASELECTION  0x000c0087
94553  #define HID_CP_SELECTDISC      0x000c00ba
94554 +#define HID_CP_VOLUMEUP                0x000c00e9
94555 +#define HID_CP_VOLUMEDOWN      0x000c00ea
94556  #define HID_CP_PLAYBACKSPEED   0x000c00f1
94557  #define HID_CP_PROXIMITY       0x000c0109
94558  #define HID_CP_SPEAKERSYSTEM   0x000c0160
94559 diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
94560 index ba973efcd369..0ba7b3f9029c 100644
94561 --- a/include/linux/huge_mm.h
94562 +++ b/include/linux/huge_mm.h
94563 @@ -443,6 +443,11 @@ static inline bool is_huge_zero_page(struct page *page)
94564         return false;
94567 +static inline bool is_huge_zero_pmd(pmd_t pmd)
94569 +       return false;
94572  static inline bool is_huge_zero_pud(pud_t pud)
94574         return false;
94575 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
94576 index 56622658b215..a670ae129f4b 100644
94577 --- a/include/linux/i2c.h
94578 +++ b/include/linux/i2c.h
94579 @@ -687,6 +687,8 @@ struct i2c_adapter_quirks {
94580  #define I2C_AQ_NO_ZERO_LEN_READ                BIT(5)
94581  #define I2C_AQ_NO_ZERO_LEN_WRITE       BIT(6)
94582  #define I2C_AQ_NO_ZERO_LEN             (I2C_AQ_NO_ZERO_LEN_READ | I2C_AQ_NO_ZERO_LEN_WRITE)
94583 +/* adapter cannot do repeated START */
94584 +#define I2C_AQ_NO_REP_START            BIT(7)
94586  /*
94587   * i2c_adapter is the structure used to identify a physical i2c bus along
94588 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
94589 index 1bc46b88711a..d1f32b33415a 100644
94590 --- a/include/linux/intel-iommu.h
94591 +++ b/include/linux/intel-iommu.h
94592 @@ -372,6 +372,7 @@ enum {
94593  /* PASID cache invalidation granu */
94594  #define QI_PC_ALL_PASIDS       0
94595  #define QI_PC_PASID_SEL                1
94596 +#define QI_PC_GLOBAL           3
94598  #define QI_EIOTLB_ADDR(addr)   ((u64)(addr) & VTD_PAGE_MASK)
94599  #define QI_EIOTLB_IH(ih)       (((u64)ih) << 6)
94600 diff --git a/include/linux/iommu.h b/include/linux/iommu.h
94601 index 5e7fe519430a..9ca6e6b8084d 100644
94602 --- a/include/linux/iommu.h
94603 +++ b/include/linux/iommu.h
94604 @@ -547,7 +547,7 @@ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
94605          * structure can be rewritten.
94606          */
94607         if (gather->pgsize != size ||
94608 -           end < gather->start || start > gather->end) {
94609 +           end + 1 < gather->start || start > gather->end + 1) {
94610                 if (gather->pgsize)
94611                         iommu_iotlb_sync(domain, gather);
94612                 gather->pgsize = size;
94613 diff --git a/include/linux/ioport.h b/include/linux/ioport.h
94614 index 55de385c839c..647744d8514e 100644
94615 --- a/include/linux/ioport.h
94616 +++ b/include/linux/ioport.h
94617 @@ -331,7 +331,7 @@ static inline void irqresource_disabled(struct resource *res, u32 irq)
94619         res->start = irq;
94620         res->end = irq;
94621 -       res->flags = IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET;
94622 +       res->flags |= IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET;
94625  extern struct address_space *iomem_get_mapping(void);
94626 diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
94627 index 05e22770af51..3ccd19f13f5c 100644
94628 --- a/include/linux/ipc_namespace.h
94629 +++ b/include/linux/ipc_namespace.h
94630 @@ -120,6 +120,9 @@ extern int mq_init_ns(struct ipc_namespace *ns);
94631  static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; }
94632  #endif
94634 +extern struct ipc_namespace *get_ipc_ns_exported(struct ipc_namespace *ns);
94635 +extern struct ipc_namespace *show_init_ipc_ns(void);
94637  #if defined(CONFIG_IPC_NS)
94638  extern struct ipc_namespace *copy_ipcs(unsigned long flags,
94639         struct user_namespace *user_ns, struct ipc_namespace *ns);
94640 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
94641 index 1b65e7204344..99dccea4293c 100644
94642 --- a/include/linux/kvm_host.h
94643 +++ b/include/linux/kvm_host.h
94644 @@ -192,8 +192,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
94645                     int len, void *val);
94646  int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
94647                             int len, struct kvm_io_device *dev);
94648 -void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
94649 -                              struct kvm_io_device *dev);
94650 +int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
94651 +                             struct kvm_io_device *dev);
94652  struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
94653                                          gpa_t addr);
94655 diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
94656 index 0c04d39a7967..cff95ed1ee2b 100644
94657 --- a/include/linux/memcontrol.h
94658 +++ b/include/linux/memcontrol.h
94659 @@ -212,6 +212,8 @@ struct obj_cgroup {
94660         };
94661  };
94663 +struct lru_gen_mm_list;
94665  /*
94666   * The memory controller data structure. The memory controller controls both
94667   * page cache and RSS per cgroup. We would eventually like to provide
94668 @@ -335,6 +337,10 @@ struct mem_cgroup {
94669         struct deferred_split deferred_split_queue;
94670  #endif
94672 +#ifdef CONFIG_LRU_GEN
94673 +       struct lru_gen_mm_list *mm_list;
94674 +#endif
94676         struct mem_cgroup_per_node *nodeinfo[0];
94677         /* WARNING: nodeinfo must be the last member here */
94678  };
94679 @@ -1077,7 +1083,6 @@ static inline struct mem_cgroup *page_memcg(struct page *page)
94681  static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
94683 -       WARN_ON_ONCE(!rcu_read_lock_held());
94684         return NULL;
94687 diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h
94688 index 1dbabf1b3cb8..6e0f66a2e727 100644
94689 --- a/include/linux/mfd/da9063/registers.h
94690 +++ b/include/linux/mfd/da9063/registers.h
94691 @@ -1037,6 +1037,9 @@
94692  #define                DA9063_NONKEY_PIN_AUTODOWN      0x02
94693  #define                DA9063_NONKEY_PIN_AUTOFLPRT     0x03
94695 +/* DA9063_REG_CONFIG_J (addr=0x10F) */
94696 +#define DA9063_TWOWIRE_TO                      0x40
94698  /* DA9063_REG_MON_REG_5 (addr=0x116) */
94699  #define DA9063_MON_A8_IDX_MASK                 0x07
94700  #define                DA9063_MON_A8_IDX_NONE          0x00
94701 diff --git a/include/linux/mfd/intel-m10-bmc.h b/include/linux/mfd/intel-m10-bmc.h
94702 index 74d4e193966a..9b54ca13eac3 100644
94703 --- a/include/linux/mfd/intel-m10-bmc.h
94704 +++ b/include/linux/mfd/intel-m10-bmc.h
94705 @@ -11,7 +11,7 @@
94707  #define M10BMC_LEGACY_SYS_BASE         0x300400
94708  #define M10BMC_SYS_BASE                        0x300800
94709 -#define M10BMC_MEM_END                 0x200000fc
94710 +#define M10BMC_MEM_END                 0x1fffffff
94712  /* Register offset of system registers */
94713  #define NIOS2_FW_VERSION               0x0
94714 diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
94715 index 53b89631a1d9..ab07f09f2bad 100644
94716 --- a/include/linux/mlx5/driver.h
94717 +++ b/include/linux/mlx5/driver.h
94718 @@ -1226,7 +1226,7 @@ enum {
94719         MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
94720  };
94722 -static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev)
94723 +static inline bool mlx5_is_roce_init_enabled(struct mlx5_core_dev *dev)
94725         struct devlink *devlink = priv_to_devlink(dev);
94726         union devlink_param_value val;
94727 diff --git a/include/linux/mm.h b/include/linux/mm.h
94728 index 8ba434287387..c0ecb207198c 100644
94729 --- a/include/linux/mm.h
94730 +++ b/include/linux/mm.h
94731 @@ -203,6 +203,9 @@ static inline void __mm_zero_struct_page(struct page *page)
94733  extern int sysctl_max_map_count;
94735 +extern unsigned long sysctl_clean_low_kbytes;
94736 +extern unsigned long sysctl_clean_min_kbytes;
94738  extern unsigned long sysctl_user_reserve_kbytes;
94739  extern unsigned long sysctl_admin_reserve_kbytes;
94741 @@ -1070,6 +1073,8 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
94742  #define ZONES_PGOFF            (NODES_PGOFF - ZONES_WIDTH)
94743  #define LAST_CPUPID_PGOFF      (ZONES_PGOFF - LAST_CPUPID_WIDTH)
94744  #define KASAN_TAG_PGOFF                (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
94745 +#define LRU_GEN_PGOFF          (KASAN_TAG_PGOFF - LRU_GEN_WIDTH)
94746 +#define LRU_USAGE_PGOFF                (LRU_GEN_PGOFF - LRU_USAGE_WIDTH)
94748  /*
94749   * Define the bit shifts to access each section.  For non-existent
94750 @@ -3170,5 +3175,37 @@ extern int sysctl_nr_trim_pages;
94752  void mem_dump_obj(void *object);
94754 +/**
94755 + * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it
94756 + * @seals: the seals to check
94757 + * @vma: the vma to operate on
94758 + *
94759 + * Check whether F_SEAL_FUTURE_WRITE is set; if so, do proper check/handling on
94760 + * the vma flags.  Return 0 if check pass, or <0 for errors.
94761 + */
94762 +static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
94764 +       if (seals & F_SEAL_FUTURE_WRITE) {
94765 +               /*
94766 +                * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
94767 +                * "future write" seal active.
94768 +                */
94769 +               if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
94770 +                       return -EPERM;
94772 +               /*
94773 +                * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
94774 +                * MAP_SHARED and read-only, take care to not allow mprotect to
94775 +                * revert protections on such mappings. Do this only for shared
94776 +                * mappings. For private mappings, don't need to mask
94777 +                * VM_MAYWRITE as we still want them to be COW-writable.
94778 +                */
94779 +               if (vma->vm_flags & VM_SHARED)
94780 +                       vma->vm_flags &= ~(VM_MAYWRITE);
94781 +       }
94783 +       return 0;
94786  #endif /* __KERNEL__ */
94787  #endif /* _LINUX_MM_H */
94788 diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
94789 index 355ea1ee32bd..5eb4b12972ec 100644
94790 --- a/include/linux/mm_inline.h
94791 +++ b/include/linux/mm_inline.h
94792 @@ -79,11 +79,299 @@ static __always_inline enum lru_list page_lru(struct page *page)
94793         return lru;
94796 +#ifdef CONFIG_LRU_GEN
94798 +#ifdef CONFIG_LRU_GEN_ENABLED
94799 +DECLARE_STATIC_KEY_TRUE(lru_gen_static_key);
94800 +#define lru_gen_enabled() static_branch_likely(&lru_gen_static_key)
94801 +#else
94802 +DECLARE_STATIC_KEY_FALSE(lru_gen_static_key);
94803 +#define lru_gen_enabled() static_branch_unlikely(&lru_gen_static_key)
94804 +#endif
94806 +/* We track at most MAX_NR_GENS generations using the sliding window technique. */
94807 +static inline int lru_gen_from_seq(unsigned long seq)
94809 +       return seq % MAX_NR_GENS;
94812 +/* Convert the level of usage to a tier. See the comment on MAX_NR_TIERS. */
94813 +static inline int lru_tier_from_usage(int usage)
94815 +       return order_base_2(usage + 1);
94818 +/* Return a proper index regardless whether we keep a full history of stats. */
94819 +static inline int sid_from_seq_or_gen(int seq_or_gen)
94821 +       return seq_or_gen % NR_STAT_GENS;
94824 +/* The youngest and the second youngest generations are considered active. */
94825 +static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen)
94827 +       unsigned long max_seq = READ_ONCE(lruvec->evictable.max_seq);
94829 +       VM_BUG_ON(!max_seq);
94830 +       VM_BUG_ON(gen >= MAX_NR_GENS);
94832 +       return gen == lru_gen_from_seq(max_seq) || gen == lru_gen_from_seq(max_seq - 1);
94835 +/* Update the sizes of the multigenerational lru. */
94836 +static inline void lru_gen_update_size(struct page *page, struct lruvec *lruvec,
94837 +                                      int old_gen, int new_gen)
94839 +       int file = page_is_file_lru(page);
94840 +       int zone = page_zonenum(page);
94841 +       int delta = thp_nr_pages(page);
94842 +       enum lru_list lru = LRU_FILE * file;
94843 +       struct lrugen *lrugen = &lruvec->evictable;
94845 +       lockdep_assert_held(&lruvec->lru_lock);
94846 +       VM_BUG_ON(old_gen != -1 && old_gen >= MAX_NR_GENS);
94847 +       VM_BUG_ON(new_gen != -1 && new_gen >= MAX_NR_GENS);
94848 +       VM_BUG_ON(old_gen == -1 && new_gen == -1);
94850 +       if (old_gen >= 0)
94851 +               WRITE_ONCE(lrugen->sizes[old_gen][file][zone],
94852 +                          lrugen->sizes[old_gen][file][zone] - delta);
94853 +       if (new_gen >= 0)
94854 +               WRITE_ONCE(lrugen->sizes[new_gen][file][zone],
94855 +                          lrugen->sizes[new_gen][file][zone] + delta);
94857 +       if (old_gen < 0) {
94858 +               if (lru_gen_is_active(lruvec, new_gen))
94859 +                       lru += LRU_ACTIVE;
94860 +               update_lru_size(lruvec, lru, zone, delta);
94861 +               return;
94862 +       }
94864 +       if (new_gen < 0) {
94865 +               if (lru_gen_is_active(lruvec, old_gen))
94866 +                       lru += LRU_ACTIVE;
94867 +               update_lru_size(lruvec, lru, zone, -delta);
94868 +               return;
94869 +       }
94871 +       if (!lru_gen_is_active(lruvec, old_gen) && lru_gen_is_active(lruvec, new_gen)) {
94872 +               update_lru_size(lruvec, lru, zone, -delta);
94873 +               update_lru_size(lruvec, lru + LRU_ACTIVE, zone, delta);
94874 +       }
94876 +       VM_BUG_ON(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen));
94879 +/* Add a page to a list of the multigenerational lru. Return true on success. */
94880 +static inline bool lru_gen_addition(struct page *page, struct lruvec *lruvec, bool front)
94882 +       int gen;
94883 +       unsigned long old_flags, new_flags;
94884 +       int file = page_is_file_lru(page);
94885 +       int zone = page_zonenum(page);
94886 +       struct lrugen *lrugen = &lruvec->evictable;
94888 +       if (PageUnevictable(page) || !lrugen->enabled[file])
94889 +               return false;
94890 +       /*
94891 +        * If a page is being faulted in, add it to the youngest generation.
94892 +        * try_walk_mm_list() may look at the size of the youngest generation to
94893 +        * determine if the aging is due.
94894 +        *
94895 +        * If a page can't be evicted immediately, i.e., a shmem page not in
94896 +        * swap cache, a dirty page waiting on writeback, or a page rejected by
94897 +        * evict_lru_gen_pages() due to races, dirty buffer heads, etc., add it
94898 +        * to the second oldest generation.
94899 +        *
94900 +        * If a page could be evicted immediately, i.e., deactivated, rotated by
94901 +        * writeback, or allocated for buffered io, add it to the oldest
94902 +        * generation.
94903 +        */
94904 +       if (PageActive(page))
94905 +               gen = lru_gen_from_seq(lrugen->max_seq);
94906 +       else if ((!file && !PageSwapCache(page)) ||
94907 +                (PageReclaim(page) && (PageDirty(page) || PageWriteback(page))) ||
94908 +                (!PageReferenced(page) && PageWorkingset(page)))
94909 +               gen = lru_gen_from_seq(lrugen->min_seq[file] + 1);
94910 +       else
94911 +               gen = lru_gen_from_seq(lrugen->min_seq[file]);
94913 +       do {
94914 +               old_flags = READ_ONCE(page->flags);
94915 +               VM_BUG_ON_PAGE(old_flags & LRU_GEN_MASK, page);
94917 +               new_flags = (old_flags & ~(LRU_GEN_MASK | BIT(PG_active))) |
94918 +                           ((gen + 1UL) << LRU_GEN_PGOFF);
94919 +               /* see the comment in evict_lru_gen_pages() */
94920 +               if (!(old_flags & BIT(PG_referenced)))
94921 +                       new_flags &= ~(LRU_USAGE_MASK | LRU_TIER_FLAGS);
94922 +       } while (cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
94924 +       lru_gen_update_size(page, lruvec, -1, gen);
94925 +       if (front)
94926 +               list_add(&page->lru, &lrugen->lists[gen][file][zone]);
94927 +       else
94928 +               list_add_tail(&page->lru, &lrugen->lists[gen][file][zone]);
94930 +       return true;
94933 +/* Delete a page from a list of the multigenerational lru. Return true on success. */
94934 +static inline bool lru_gen_deletion(struct page *page, struct lruvec *lruvec)
94936 +       int gen;
94937 +       unsigned long old_flags, new_flags;
94939 +       do {
94940 +               old_flags = READ_ONCE(page->flags);
94941 +               if (!(old_flags & LRU_GEN_MASK))
94942 +                       return false;
94944 +               VM_BUG_ON_PAGE(PageActive(page), page);
94945 +               VM_BUG_ON_PAGE(PageUnevictable(page), page);
94947 +               gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
94949 +               new_flags = old_flags & ~LRU_GEN_MASK;
94950 +               /* mark page active accordingly */
94951 +               if (lru_gen_is_active(lruvec, gen))
94952 +                       new_flags |= BIT(PG_active);
94953 +       } while (cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
94955 +       lru_gen_update_size(page, lruvec, gen, -1);
94956 +       list_del(&page->lru);
94958 +       return true;
94961 +/* Activate a page from page cache or swap cache after it's mapped. */
94962 +static inline void lru_gen_activation(struct page *page, struct vm_area_struct *vma)
94964 +       if (!lru_gen_enabled())
94965 +               return;
94967 +       if (PageActive(page) || PageUnevictable(page) || vma_is_dax(vma) ||
94968 +           (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)))
94969 +               return;
94970 +       /*
94971 +        * TODO: pass vm_fault to add_to_page_cache_lru() and
94972 +        * __read_swap_cache_async() so they can activate pages directly when in
94973 +        * the page fault path.
94974 +        */
94975 +       activate_page(page);
94978 +/* Return -1 when a page is not on a list of the multigenerational lru. */
94979 +static inline int page_lru_gen(struct page *page)
94981 +       return ((READ_ONCE(page->flags) & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
94984 +/* This function works regardless whether the multigenerational lru is enabled. */
94985 +static inline bool page_is_active(struct page *page, struct lruvec *lruvec)
94987 +       struct mem_cgroup *memcg;
94988 +       int gen = page_lru_gen(page);
94989 +       bool active = false;
94991 +       VM_BUG_ON_PAGE(PageTail(page), page);
94993 +       if (gen < 0)
94994 +               return PageActive(page);
94996 +       if (lruvec) {
94997 +               VM_BUG_ON_PAGE(PageUnevictable(page), page);
94998 +               VM_BUG_ON_PAGE(PageActive(page), page);
94999 +               lockdep_assert_held(&lruvec->lru_lock);
95001 +               return lru_gen_is_active(lruvec, gen);
95002 +       }
95004 +       rcu_read_lock();
95006 +       memcg = page_memcg_rcu(page);
95007 +       lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
95008 +       active = lru_gen_is_active(lruvec, gen);
95010 +       rcu_read_unlock();
95012 +       return active;
95015 +/* Return the level of usage of a page. See the comment on MAX_NR_TIERS. */
95016 +static inline int page_tier_usage(struct page *page)
95018 +       unsigned long flags = READ_ONCE(page->flags);
95020 +       return flags & BIT(PG_workingset) ?
95021 +              ((flags & LRU_USAGE_MASK) >> LRU_USAGE_PGOFF) + 1 : 0;
95024 +/* Increment the usage counter after a page is accessed via file descriptors. */
95025 +static inline bool page_inc_usage(struct page *page)
95027 +       unsigned long old_flags, new_flags;
95029 +       if (!lru_gen_enabled())
95030 +               return PageActive(page);
95032 +       do {
95033 +               old_flags = READ_ONCE(page->flags);
95035 +               if (!(old_flags & BIT(PG_workingset)))
95036 +                       new_flags = old_flags | BIT(PG_workingset);
95037 +               else
95038 +                       new_flags = (old_flags & ~LRU_USAGE_MASK) | min(LRU_USAGE_MASK,
95039 +                                   (old_flags & LRU_USAGE_MASK) + BIT(LRU_USAGE_PGOFF));
95041 +               if (old_flags == new_flags)
95042 +                       break;
95043 +       } while (cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
95045 +       return true;
95048 +#else /* CONFIG_LRU_GEN */
95050 +static inline bool lru_gen_enabled(void)
95052 +       return false;
95055 +static inline bool lru_gen_addition(struct page *page, struct lruvec *lruvec, bool front)
95057 +       return false;
95060 +static inline bool lru_gen_deletion(struct page *page, struct lruvec *lruvec)
95062 +       return false;
95065 +static inline void lru_gen_activation(struct page *page, struct vm_area_struct *vma)
95069 +static inline bool page_is_active(struct page *page, struct lruvec *lruvec)
95071 +       return PageActive(page);
95074 +static inline bool page_inc_usage(struct page *page)
95076 +       return PageActive(page);
95079 +#endif /* CONFIG_LRU_GEN */
95081  static __always_inline void add_page_to_lru_list(struct page *page,
95082                                 struct lruvec *lruvec)
95084         enum lru_list lru = page_lru(page);
95086 +       if (lru_gen_addition(page, lruvec, true))
95087 +               return;
95089         update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
95090         list_add(&page->lru, &lruvec->lists[lru]);
95092 @@ -93,6 +381,9 @@ static __always_inline void add_page_to_lru_list_tail(struct page *page,
95094         enum lru_list lru = page_lru(page);
95096 +       if (lru_gen_addition(page, lruvec, false))
95097 +               return;
95099         update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
95100         list_add_tail(&page->lru, &lruvec->lists[lru]);
95102 @@ -100,6 +391,9 @@ static __always_inline void add_page_to_lru_list_tail(struct page *page,
95103  static __always_inline void del_page_from_lru_list(struct page *page,
95104                                 struct lruvec *lruvec)
95106 +       if (lru_gen_deletion(page, lruvec))
95107 +               return;
95109         list_del(&page->lru);
95110         update_lru_size(lruvec, page_lru(page), page_zonenum(page),
95111                         -thp_nr_pages(page));
95112 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
95113 index 6613b26a8894..b936703a39a2 100644
95114 --- a/include/linux/mm_types.h
95115 +++ b/include/linux/mm_types.h
95116 @@ -15,6 +15,8 @@
95117  #include <linux/page-flags-layout.h>
95118  #include <linux/workqueue.h>
95119  #include <linux/seqlock.h>
95120 +#include <linux/nodemask.h>
95121 +#include <linux/mmdebug.h>
95123  #include <asm/mmu.h>
95125 @@ -97,10 +99,10 @@ struct page {
95126                 };
95127                 struct {        /* page_pool used by netstack */
95128                         /**
95129 -                        * @dma_addr: might require a 64-bit value even on
95130 +                        * @dma_addr: might require a 64-bit value on
95131                          * 32-bit architectures.
95132                          */
95133 -                       dma_addr_t dma_addr;
95134 +                       unsigned long dma_addr[2];
95135                 };
95136                 struct {        /* slab, slob and slub */
95137                         union {
95138 @@ -383,6 +385,8 @@ struct core_state {
95139         struct completion startup;
95140  };
95142 +#define ANON_AND_FILE 2
95144  struct kioctx_table;
95145  struct mm_struct {
95146         struct {
95147 @@ -561,6 +565,22 @@ struct mm_struct {
95149  #ifdef CONFIG_IOMMU_SUPPORT
95150                 u32 pasid;
95151 +#endif
95152 +#ifdef CONFIG_LRU_GEN
95153 +               struct {
95154 +                       /* the node of a global or per-memcg mm_struct list */
95155 +                       struct list_head list;
95156 +#ifdef CONFIG_MEMCG
95157 +                       /* points to memcg of the owner task above */
95158 +                       struct mem_cgroup *memcg;
95159 +#endif
95160 +                       /* whether this mm_struct has been used since the last walk */
95161 +                       nodemask_t nodes[ANON_AND_FILE];
95162 +#ifndef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
95163 +                       /* the number of CPUs using this mm_struct */
95164 +                       atomic_t nr_cpus;
95165 +#endif
95166 +               } lrugen;
95167  #endif
95168         } __randomize_layout;
95170 @@ -588,6 +608,103 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
95171         return (struct cpumask *)&mm->cpu_bitmap;
95174 +#ifdef CONFIG_LRU_GEN
95176 +void lru_gen_init_mm(struct mm_struct *mm);
95177 +void lru_gen_add_mm(struct mm_struct *mm);
95178 +void lru_gen_del_mm(struct mm_struct *mm);
95179 +#ifdef CONFIG_MEMCG
95180 +int lru_gen_alloc_mm_list(struct mem_cgroup *memcg);
95181 +void lru_gen_free_mm_list(struct mem_cgroup *memcg);
95182 +void lru_gen_migrate_mm(struct mm_struct *mm);
95183 +#endif
95186 + * Track the usage so mm_struct's that haven't been used since the last walk can
95187 + * be skipped. This function adds a theoretical overhead to each context switch,
95188 + * which hasn't been measurable.
95189 + */
95190 +static inline void lru_gen_switch_mm(struct mm_struct *old, struct mm_struct *new)
95192 +       int file;
95194 +       /* exclude init_mm, efi_mm, etc. */
95195 +       if (!core_kernel_data((unsigned long)old)) {
95196 +               VM_BUG_ON(old == &init_mm);
95198 +               for (file = 0; file < ANON_AND_FILE; file++)
95199 +                       nodes_setall(old->lrugen.nodes[file]);
95201 +#ifndef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
95202 +               atomic_dec(&old->lrugen.nr_cpus);
95203 +               VM_BUG_ON_MM(atomic_read(&old->lrugen.nr_cpus) < 0, old);
95204 +#endif
95205 +       } else
95206 +               VM_BUG_ON_MM(READ_ONCE(old->lrugen.list.prev) ||
95207 +                            READ_ONCE(old->lrugen.list.next), old);
95209 +       if (!core_kernel_data((unsigned long)new)) {
95210 +               VM_BUG_ON(new == &init_mm);
95212 +#ifndef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
95213 +               atomic_inc(&new->lrugen.nr_cpus);
95214 +               VM_BUG_ON_MM(atomic_read(&new->lrugen.nr_cpus) < 0, new);
95215 +#endif
95216 +       } else
95217 +               VM_BUG_ON_MM(READ_ONCE(new->lrugen.list.prev) ||
95218 +                            READ_ONCE(new->lrugen.list.next), new);
95221 +/* Return whether this mm_struct is being used on any CPUs. */
95222 +static inline bool lru_gen_mm_is_active(struct mm_struct *mm)
95224 +#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
95225 +       return !cpumask_empty(mm_cpumask(mm));
95226 +#else
95227 +       return atomic_read(&mm->lrugen.nr_cpus);
95228 +#endif
95231 +#else /* CONFIG_LRU_GEN */
95233 +static inline void lru_gen_init_mm(struct mm_struct *mm)
95237 +static inline void lru_gen_add_mm(struct mm_struct *mm)
95241 +static inline void lru_gen_del_mm(struct mm_struct *mm)
95245 +#ifdef CONFIG_MEMCG
95246 +static inline int lru_gen_alloc_mm_list(struct mem_cgroup *memcg)
95248 +       return 0;
95251 +static inline void lru_gen_free_mm_list(struct mem_cgroup *memcg)
95255 +static inline void lru_gen_migrate_mm(struct mm_struct *mm)
95258 +#endif
95260 +static inline void lru_gen_switch_mm(struct mm_struct *old, struct mm_struct *new)
95264 +static inline bool lru_gen_mm_is_active(struct mm_struct *mm)
95266 +       return false;
95269 +#endif /* CONFIG_LRU_GEN */
95271  struct mmu_gather;
95272  extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
95273  extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
95274 diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
95275 index 26a3c7bc29ae..a3a4e374f802 100644
95276 --- a/include/linux/mmc/host.h
95277 +++ b/include/linux/mmc/host.h
95278 @@ -302,9 +302,6 @@ struct mmc_host {
95279         u32                     ocr_avail_sdio; /* SDIO-specific OCR */
95280         u32                     ocr_avail_sd;   /* SD-specific OCR */
95281         u32                     ocr_avail_mmc;  /* MMC-specific OCR */
95282 -#ifdef CONFIG_PM_SLEEP
95283 -       struct notifier_block   pm_notify;
95284 -#endif
95285         struct wakeup_source    *ws;            /* Enable consume of uevents */
95286         u32                     max_current_330;
95287         u32                     max_current_300;
95288 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
95289 index 47946cec7584..a22e9e40083f 100644
95290 --- a/include/linux/mmzone.h
95291 +++ b/include/linux/mmzone.h
95292 @@ -285,14 +285,124 @@ static inline bool is_active_lru(enum lru_list lru)
95293         return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
95296 -#define ANON_AND_FILE 2
95298  enum lruvec_flags {
95299         LRUVEC_CONGESTED,               /* lruvec has many dirty pages
95300                                          * backed by a congested BDI
95301                                          */
95302  };
95304 +struct lruvec;
95305 +struct page_vma_mapped_walk;
95307 +#define LRU_GEN_MASK           ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF)
95308 +#define LRU_USAGE_MASK         ((BIT(LRU_USAGE_WIDTH) - 1) << LRU_USAGE_PGOFF)
95310 +#ifdef CONFIG_LRU_GEN
95313 + * For each lruvec, evictable pages are divided into multiple generations. The
95314 + * youngest and the oldest generation numbers, AKA max_seq and min_seq, are
95315 + * monotonically increasing. The sliding window technique is used to track at
95316 + * most MAX_NR_GENS and at least MIN_NR_GENS generations. An offset within the
95317 + * window, AKA gen, indexes an array of per-type and per-zone lists for the
95318 + * corresponding generation. All pages from this array of lists have gen+1
95319 + * stored in page->flags. 0 is reserved to indicate that pages are not on the
95320 + * lists.
95321 + */
95322 +#define MAX_NR_GENS            ((unsigned int)CONFIG_NR_LRU_GENS)
95325 + * Each generation is then divided into multiple tiers. Tiers represent levels
95326 + * of usage from file descriptors, i.e., mark_page_accessed(). In contrast to
95327 + * moving across generations which requires the lru lock, moving across tiers
95328 + * only involves an atomic operation on page->flags and therefore has a
95329 + * negligible cost.
95330 + *
95331 + * The purposes of tiers are to:
95332 + *   1) estimate whether pages accessed multiple times via file descriptors are
95333 + *   more active than pages accessed only via page tables by separating the two
95334 + *   access types into upper tiers and the base tier and comparing refault rates
95335 + *   across tiers.
95336 + *   2) improve buffered io performance by deferring activations of pages
95337 + *   accessed multiple times until the eviction. That is activations happen in
95338 + *   the reclaim path, not the access path.
95339 + *
95340 + * Pages accessed N times via file descriptors belong to tier order_base_2(N).
95341 + * The base tier uses the following page flag:
95342 + *   !PageReferenced() -- readahead pages
95343 + *   PageReferenced() -- single-access pages
95344 + * All upper tiers use the following page flags:
95345 + *   PageReferenced() && PageWorkingset() -- multi-access pages
95346 + * in addition to the bits storing N-2 accesses. Therefore, we can support one
95347 + * upper tier without using additional bits in page->flags.
95348 + *
95349 + * Note that
95350 + *   1) PageWorkingset() is always set for upper tiers because we want to
95351 + *    maintain the existing psi behavior.
95352 + *   2) !PageReferenced() && PageWorkingset() is not a valid tier. See the
95353 + *   comment in evict_lru_gen_pages().
95354 + *   3) pages accessed only via page tables belong to the base tier.
95355 + *
95356 + * Pages from the base tier are evicted regardless of the refault rate. Pages
95357 + * from upper tiers will be moved to the next generation, if their refault rates
95358 + * are higher than that of the base tier.
95359 + */
95360 +#define MAX_NR_TIERS           ((unsigned int)CONFIG_TIERS_PER_GEN)
95361 +#define LRU_TIER_FLAGS         (BIT(PG_referenced) | BIT(PG_workingset))
95362 +#define LRU_USAGE_SHIFT                (CONFIG_TIERS_PER_GEN - 1)
95364 +/* Whether to keep historical stats for each generation. */
95365 +#ifdef CONFIG_LRU_GEN_STATS
95366 +#define NR_STAT_GENS           ((unsigned int)CONFIG_NR_LRU_GENS)
95367 +#else
95368 +#define NR_STAT_GENS           1U
95369 +#endif
95371 +struct lrugen {
95372 +       /* the aging increments the max generation number */
95373 +       unsigned long max_seq;
95374 +       /* the eviction increments the min generation numbers */
95375 +       unsigned long min_seq[ANON_AND_FILE];
95376 +       /* the birth time of each generation in jiffies */
95377 +       unsigned long timestamps[MAX_NR_GENS];
95378 +       /* the lists of the multigenerational lru */
95379 +       struct list_head lists[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
95380 +       /* the sizes of the multigenerational lru in pages */
95381 +       unsigned long sizes[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
95382 +       /* to determine which type and its tiers to evict */
95383 +       atomic_long_t evicted[NR_STAT_GENS][ANON_AND_FILE][MAX_NR_TIERS];
95384 +       atomic_long_t refaulted[NR_STAT_GENS][ANON_AND_FILE][MAX_NR_TIERS];
95385 +       /* the base tier is inactive and won't be activated */
95386 +       unsigned long activated[NR_STAT_GENS][ANON_AND_FILE][MAX_NR_TIERS - 1];
95387 +       /* arithmetic mean weighted by geometric series 1/2, 1/4, ... */
95388 +       unsigned long avg_total[ANON_AND_FILE][MAX_NR_TIERS];
95389 +       unsigned long avg_refaulted[ANON_AND_FILE][MAX_NR_TIERS];
95390 +       /* reclaim priority to compare across memcgs */
95391 +       atomic_t priority;
95392 +       /* whether the multigenerational lru is enabled */
95393 +       bool enabled[ANON_AND_FILE];
95396 +void lru_gen_init_lruvec(struct lruvec *lruvec);
95397 +void lru_gen_set_state(bool enable, bool main, bool swap);
95398 +void lru_gen_scan_around(struct page_vma_mapped_walk *pvmw);
95400 +#else /* CONFIG_LRU_GEN */
95402 +static inline void lru_gen_init_lruvec(struct lruvec *lruvec)
95406 +static inline void lru_gen_set_state(bool enable, bool main, bool swap)
95410 +static inline void lru_gen_scan_around(struct page_vma_mapped_walk *pvmw)
95414 +#endif /* CONFIG_LRU_GEN */
95416  struct lruvec {
95417         struct list_head                lists[NR_LRU_LISTS];
95418         /* per lruvec lru_lock for memcg */
95419 @@ -310,6 +420,10 @@ struct lruvec {
95420         unsigned long                   refaults[ANON_AND_FILE];
95421         /* Various lruvec state flags (enum lruvec_flags) */
95422         unsigned long                   flags;
95423 +#ifdef CONFIG_LRU_GEN
95424 +       /* unevictable pages are on LRU_UNEVICTABLE */
95425 +       struct lrugen                   evictable;
95426 +#endif
95427  #ifdef CONFIG_MEMCG
95428         struct pglist_data *pgdat;
95429  #endif
95430 diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
95431 index 3327239fa2f9..cc29dee508f7 100644
95432 --- a/include/linux/nfs_xdr.h
95433 +++ b/include/linux/nfs_xdr.h
95434 @@ -15,6 +15,8 @@
95435  #define NFS_DEF_FILE_IO_SIZE   (4096U)
95436  #define NFS_MIN_FILE_IO_SIZE   (1024U)
95438 +#define NFS_BITMASK_SZ         3
95440  struct nfs4_string {
95441         unsigned int len;
95442         char *data;
95443 @@ -525,7 +527,8 @@ struct nfs_closeargs {
95444         struct nfs_seqid *      seqid;
95445         fmode_t                 fmode;
95446         u32                     share_access;
95447 -       u32 *                   bitmask;
95448 +       const u32 *             bitmask;
95449 +       u32                     bitmask_store[NFS_BITMASK_SZ];
95450         struct nfs4_layoutreturn_args *lr_args;
95451  };
95453 @@ -608,7 +611,8 @@ struct nfs4_delegreturnargs {
95454         struct nfs4_sequence_args       seq_args;
95455         const struct nfs_fh *fhandle;
95456         const nfs4_stateid *stateid;
95457 -       u32 * bitmask;
95458 +       const u32 *bitmask;
95459 +       u32 bitmask_store[NFS_BITMASK_SZ];
95460         struct nfs4_layoutreturn_args *lr_args;
95461  };
95463 @@ -648,7 +652,8 @@ struct nfs_pgio_args {
95464         union {
95465                 unsigned int            replen;                 /* used by read */
95466                 struct {
95467 -                       u32 *                   bitmask;        /* used by write */
95468 +                       const u32 *             bitmask;        /* used by write */
95469 +                       u32 bitmask_store[NFS_BITMASK_SZ];      /* used by write */
95470                         enum nfs3_stable_how    stable;         /* used by write */
95471                 };
95472         };
95473 diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
95474 index ac398e143c9a..89fe4e3592f9 100644
95475 --- a/include/linux/nodemask.h
95476 +++ b/include/linux/nodemask.h
95477 @@ -486,6 +486,7 @@ static inline int num_node_state(enum node_states state)
95478  #define first_online_node      0
95479  #define first_memory_node      0
95480  #define next_online_node(nid)  (MAX_NUMNODES)
95481 +#define next_memory_node(nid)  (MAX_NUMNODES)
95482  #define nr_node_ids            1U
95483  #define nr_online_nodes                1U
95485 diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h
95486 index 7d4ec26d8a3e..df83aaec8498 100644
95487 --- a/include/linux/page-flags-layout.h
95488 +++ b/include/linux/page-flags-layout.h
95489 @@ -24,6 +24,17 @@
95490  #error ZONES_SHIFT -- too many zones configured adjust calculation
95491  #endif
95493 +#ifdef CONFIG_LRU_GEN
95495 + * LRU_GEN_WIDTH is generated from order_base_2(CONFIG_NR_LRU_GENS + 1). And the
95496 + * comment on MAX_NR_TIERS explains why we offset by 2 here.
95497 + */
95498 +#define LRU_USAGE_WIDTH                (CONFIG_TIERS_PER_GEN - 2)
95499 +#else
95500 +#define LRU_GEN_WIDTH          0
95501 +#define LRU_USAGE_WIDTH                0
95502 +#endif
95504  #ifdef CONFIG_SPARSEMEM
95505  #include <asm/sparsemem.h>
95507 @@ -56,7 +67,8 @@
95509  #define ZONES_WIDTH            ZONES_SHIFT
95511 -#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
95512 +#if SECTIONS_WIDTH+ZONES_WIDTH+LRU_GEN_WIDTH+LRU_USAGE_WIDTH+NODES_SHIFT \
95513 +       <= BITS_PER_LONG - NR_PAGEFLAGS
95514  #define NODES_WIDTH            NODES_SHIFT
95515  #else
95516  #ifdef CONFIG_SPARSEMEM_VMEMMAP
95517 @@ -83,14 +95,16 @@
95518  #define KASAN_TAG_WIDTH 0
95519  #endif
95521 -#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT+KASAN_TAG_WIDTH \
95522 +#if SECTIONS_WIDTH+ZONES_WIDTH+LRU_GEN_WIDTH+LRU_USAGE_WIDTH+ \
95523 +       NODES_WIDTH+KASAN_TAG_WIDTH+LAST_CPUPID_SHIFT \
95524         <= BITS_PER_LONG - NR_PAGEFLAGS
95525  #define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT
95526  #else
95527  #define LAST_CPUPID_WIDTH 0
95528  #endif
95530 -#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \
95531 +#if SECTIONS_WIDTH+ZONES_WIDTH+LRU_GEN_WIDTH+LRU_USAGE_WIDTH+ \
95532 +       NODES_WIDTH+KASAN_TAG_WIDTH+LAST_CPUPID_WIDTH \
95533         > BITS_PER_LONG - NR_PAGEFLAGS
95534  #error "Not enough bits in page flags"
95535  #endif
95536 diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
95537 index 04a34c08e0a6..e58984fca32a 100644
95538 --- a/include/linux/page-flags.h
95539 +++ b/include/linux/page-flags.h
95540 @@ -817,7 +817,7 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
95541          1UL << PG_private      | 1UL << PG_private_2   |       \
95542          1UL << PG_writeback    | 1UL << PG_reserved    |       \
95543          1UL << PG_slab         | 1UL << PG_active      |       \
95544 -        1UL << PG_unevictable  | __PG_MLOCKED)
95545 +        1UL << PG_unevictable  | __PG_MLOCKED | LRU_GEN_MASK)
95547  /*
95548   * Flags checked when a page is prepped for return by the page allocator.
95549 @@ -828,7 +828,7 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
95550   * alloc-free cycle to prevent from reusing the page.
95551   */
95552  #define PAGE_FLAGS_CHECK_AT_PREP       \
95553 -       (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
95554 +       ((((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_USAGE_MASK)
95556  #define PAGE_FLAGS_PRIVATE                             \
95557         (1UL << PG_private | 1UL << PG_private_2)
95558 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
95559 index 3f7f89ea5e51..3d478abf411c 100644
95560 --- a/include/linux/perf_event.h
95561 +++ b/include/linux/perf_event.h
95562 @@ -607,6 +607,7 @@ struct swevent_hlist {
95563  #define PERF_ATTACH_TASK_DATA  0x08
95564  #define PERF_ATTACH_ITRACE     0x10
95565  #define PERF_ATTACH_SCHED_CB   0x20
95566 +#define PERF_ATTACH_CHILD      0x40
95568  struct perf_cgroup;
95569  struct perf_buffer;
95570 diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
95571 index 5e772392a379..08dd9b8c055a 100644
95572 --- a/include/linux/pgtable.h
95573 +++ b/include/linux/pgtable.h
95574 @@ -193,7 +193,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
95575  #endif
95577  #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
95578 -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
95579 +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG)
95580  static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
95581                                             unsigned long address,
95582                                             pmd_t *pmdp)
95583 @@ -214,7 +214,7 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
95584         BUILD_BUG();
95585         return 0;
95587 -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
95588 +#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG */
95589  #endif
95591  #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
95592 diff --git a/include/linux/phy.h b/include/linux/phy.h
95593 index 1a12e4436b5b..8644b097dea3 100644
95594 --- a/include/linux/phy.h
95595 +++ b/include/linux/phy.h
95596 @@ -493,6 +493,7 @@ struct macsec_ops;
95597   * @loopback_enabled: Set true if this PHY has been loopbacked successfully.
95598   * @downshifted_rate: Set true if link speed has been downshifted.
95599   * @is_on_sfp_module: Set true if PHY is located on an SFP module.
95600 + * @mac_managed_pm: Set true if MAC driver takes of suspending/resuming PHY
95601   * @state: State of the PHY for management purposes
95602   * @dev_flags: Device-specific flags used by the PHY driver.
95603   * @irq: IRQ number of the PHY's interrupt (-1 if none)
95604 @@ -567,6 +568,7 @@ struct phy_device {
95605         unsigned loopback_enabled:1;
95606         unsigned downshifted_rate:1;
95607         unsigned is_on_sfp_module:1;
95608 +       unsigned mac_managed_pm:1;
95610         unsigned autoneg:1;
95611         /* The most recently read link state */
95612 diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
95613 index 3f23f6e430bf..cd81e060863c 100644
95614 --- a/include/linux/platform_device.h
95615 +++ b/include/linux/platform_device.h
95616 @@ -359,4 +359,7 @@ static inline int is_sh_early_platform_device(struct platform_device *pdev)
95618  #endif /* CONFIG_SUPERH */
95620 +/* For now only SuperH uses it */
95621 +void early_platform_cleanup(void);
95623  #endif /* _PLATFORM_DEVICE_H_ */
95624 diff --git a/include/linux/pm.h b/include/linux/pm.h
95625 index 482313a8ccfc..628718697679 100644
95626 --- a/include/linux/pm.h
95627 +++ b/include/linux/pm.h
95628 @@ -602,6 +602,7 @@ struct dev_pm_info {
95629         unsigned int            idle_notification:1;
95630         unsigned int            request_pending:1;
95631         unsigned int            deferred_resume:1;
95632 +       unsigned int            needs_force_resume:1;
95633         unsigned int            runtime_auto:1;
95634         bool                    ignore_children:1;
95635         unsigned int            no_callbacks:1;
95636 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
95637 index b492ae00cc90..6c08a085367b 100644
95638 --- a/include/linux/pm_runtime.h
95639 +++ b/include/linux/pm_runtime.h
95640 @@ -265,7 +265,7 @@ static inline void pm_runtime_no_callbacks(struct device *dev) {}
95641  static inline void pm_runtime_irq_safe(struct device *dev) {}
95642  static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; }
95644 -static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; }
95645 +static inline bool pm_runtime_has_no_callbacks(struct device *dev) { return false; }
95646  static inline void pm_runtime_mark_last_busy(struct device *dev) {}
95647  static inline void __pm_runtime_use_autosuspend(struct device *dev,
95648                                                 bool use) {}
95649 diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
95650 index 111a40d0d3d5..8d5f4f40fb41 100644
95651 --- a/include/linux/power/bq27xxx_battery.h
95652 +++ b/include/linux/power/bq27xxx_battery.h
95653 @@ -53,7 +53,6 @@ struct bq27xxx_reg_cache {
95654         int capacity;
95655         int energy;
95656         int flags;
95657 -       int power_avg;
95658         int health;
95659  };
95661 diff --git a/include/linux/reset.h b/include/linux/reset.h
95662 index b9109efa2a5c..9700124affa3 100644
95663 --- a/include/linux/reset.h
95664 +++ b/include/linux/reset.h
95665 @@ -47,6 +47,11 @@ static inline int reset_control_reset(struct reset_control *rstc)
95666         return 0;
95669 +static inline int reset_control_rearm(struct reset_control *rstc)
95671 +       return 0;
95674  static inline int reset_control_assert(struct reset_control *rstc)
95676         return 0;
95677 diff --git a/include/linux/sched.h b/include/linux/sched.h
95678 index ef00bb22164c..b4b0b69d76f1 100644
95679 --- a/include/linux/sched.h
95680 +++ b/include/linux/sched.h
95681 @@ -216,13 +216,40 @@ struct task_group;
95683  extern void scheduler_tick(void);
95685 -#define        MAX_SCHEDULE_TIMEOUT            LONG_MAX
95687 +#define        MAX_SCHEDULE_TIMEOUT    LONG_MAX
95688  extern long schedule_timeout(long timeout);
95689  extern long schedule_timeout_interruptible(long timeout);
95690  extern long schedule_timeout_killable(long timeout);
95691  extern long schedule_timeout_uninterruptible(long timeout);
95692  extern long schedule_timeout_idle(long timeout);
95694 +#ifdef CONFIG_HIGH_RES_TIMERS
95695 +extern long schedule_msec_hrtimeout(long timeout);
95696 +extern long schedule_min_hrtimeout(void);
95697 +extern long schedule_msec_hrtimeout_interruptible(long timeout);
95698 +extern long schedule_msec_hrtimeout_uninterruptible(long timeout);
95699 +#else
95700 +static inline long schedule_msec_hrtimeout(long timeout)
95702 +       return schedule_timeout(msecs_to_jiffies(timeout));
95705 +static inline long schedule_min_hrtimeout(void)
95707 +       return schedule_timeout(1);
95710 +static inline long schedule_msec_hrtimeout_interruptible(long timeout)
95712 +       return schedule_timeout_interruptible(msecs_to_jiffies(timeout));
95715 +static inline long schedule_msec_hrtimeout_uninterruptible(long timeout)
95717 +       return schedule_timeout_uninterruptible(msecs_to_jiffies(timeout));
95719 +#endif
95721  asmlinkage void schedule(void);
95722  extern void schedule_preempt_disabled(void);
95723  asmlinkage void preempt_schedule_irq(void);
95724 @@ -450,10 +477,22 @@ struct sched_statistics {
95725  #endif
95726  };
95728 +#ifdef CONFIG_CACULE_SCHED
95729 +struct cacule_node {
95730 +       struct cacule_node*             next;
95731 +       struct cacule_node*             prev;
95732 +       u64                             cacule_start_time;
95733 +       u64                             vruntime;
95735 +#endif
95737  struct sched_entity {
95738         /* For load-balancing: */
95739         struct load_weight              load;
95740         struct rb_node                  run_node;
95741 +#ifdef CONFIG_CACULE_SCHED
95742 +       struct cacule_node              cacule_node;
95743 +#endif
95744         struct list_head                group_node;
95745         unsigned int                    on_rq;
95747 diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
95748 index 3c31ba88aca5..cb819c3d86f3 100644
95749 --- a/include/linux/sched/sysctl.h
95750 +++ b/include/linux/sched/sysctl.h
95751 @@ -31,6 +31,12 @@ extern unsigned int sysctl_sched_min_granularity;
95752  extern unsigned int sysctl_sched_wakeup_granularity;
95753  extern unsigned int sysctl_sched_child_runs_first;
95755 +#ifdef CONFIG_CACULE_SCHED
95756 +extern int interactivity_factor;
95757 +extern unsigned int interactivity_threshold;
95758 +extern int cacule_max_lifetime;
95759 +#endif
95761  enum sched_tunable_scaling {
95762         SCHED_TUNABLESCALING_NONE,
95763         SCHED_TUNABLESCALING_LOG,
95764 diff --git a/include/linux/smp.h b/include/linux/smp.h
95765 index 70c6f6284dcf..238a3f97a415 100644
95766 --- a/include/linux/smp.h
95767 +++ b/include/linux/smp.h
95768 @@ -73,7 +73,7 @@ void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
95769  void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
95770                            void *info, bool wait, const struct cpumask *mask);
95772 -int smp_call_function_single_async(int cpu, call_single_data_t *csd);
95773 +int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
95775  #ifdef CONFIG_SMP
95777 diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
95778 index 592897fa4f03..643139b1eafe 100644
95779 --- a/include/linux/spi/spi.h
95780 +++ b/include/linux/spi/spi.h
95781 @@ -510,6 +510,9 @@ struct spi_controller {
95783  #define SPI_MASTER_GPIO_SS             BIT(5)  /* GPIO CS must select slave */
95785 +       /* flag indicating this is a non-devres managed controller */
95786 +       bool                    devm_allocated;
95788         /* flag indicating this is an SPI slave controller */
95789         bool                    slave;
95791 diff --git a/include/linux/swap.h b/include/linux/swap.h
95792 index 4cc6ec3bf0ab..0e7532c7db22 100644
95793 --- a/include/linux/swap.h
95794 +++ b/include/linux/swap.h
95795 @@ -344,13 +344,14 @@ extern void lru_add_drain_cpu(int cpu);
95796  extern void lru_add_drain_cpu_zone(struct zone *zone);
95797  extern void lru_add_drain_all(void);
95798  extern void rotate_reclaimable_page(struct page *page);
95799 +extern void activate_page(struct page *page);
95800  extern void deactivate_file_page(struct page *page);
95801  extern void deactivate_page(struct page *page);
95802  extern void mark_page_lazyfree(struct page *page);
95803  extern void swap_setup(void);
95805 -extern void lru_cache_add_inactive_or_unevictable(struct page *page,
95806 -                                               struct vm_area_struct *vma);
95807 +extern void lru_cache_add_page_vma(struct page *page, struct vm_area_struct *vma,
95808 +                                  bool faulting);
95810  /* linux/mm/vmscan.c */
95811  extern unsigned long zone_reclaimable_pages(struct zone *zone);
95812 diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
95813 index 2839dc9a7c01..b07b7d4334a6 100644
95814 --- a/include/linux/syscalls.h
95815 +++ b/include/linux/syscalls.h
95816 @@ -69,6 +69,8 @@ struct io_uring_params;
95817  struct clone_args;
95818  struct open_how;
95819  struct mount_attr;
95820 +struct futex_waitv;
95821 +struct futex_requeue;
95823  #include <linux/types.h>
95824  #include <linux/aio_abi.h>
95825 @@ -619,6 +621,20 @@ asmlinkage long sys_get_robust_list(int pid,
95826  asmlinkage long sys_set_robust_list(struct robust_list_head __user *head,
95827                                     size_t len);
95829 +/* kernel/futex2.c */
95830 +asmlinkage long sys_futex_wait(void __user *uaddr, unsigned int val,
95831 +                              unsigned int flags,
95832 +                              struct __kernel_timespec __user *timo);
95833 +asmlinkage long sys_futex_wake(void __user *uaddr, unsigned int nr_wake,
95834 +                              unsigned int flags);
95835 +asmlinkage long sys_futex_waitv(struct futex_waitv __user *waiters,
95836 +                               unsigned int nr_futexes, unsigned int flags,
95837 +                               struct __kernel_timespec __user *timo);
95838 +asmlinkage long sys_futex_requeue(struct futex_requeue __user *uaddr1,
95839 +                                 struct futex_requeue __user *uaddr2,
95840 +                                 unsigned int nr_wake, unsigned int nr_requeue,
95841 +                                 unsigned int cmpval, unsigned int flags);
95843  /* kernel/hrtimer.c */
95844  asmlinkage long sys_nanosleep(struct __kernel_timespec __user *rqtp,
95845                               struct __kernel_timespec __user *rmtp);
95846 @@ -1300,6 +1316,8 @@ int ksys_ipc(unsigned int call, int first, unsigned long second,
95847         unsigned long third, void __user * ptr, long fifth);
95848  int compat_ksys_ipc(u32 call, int first, int second,
95849         u32 third, u32 ptr, u32 fifth);
95850 +long ksys_futex_wake(void __user *uaddr, unsigned long nr_wake,
95851 +                    unsigned int flags);
95853  /*
95854   * The following kernel syscall equivalents are just wrappers to fs-internal
95855 diff --git a/include/linux/tcp.h b/include/linux/tcp.h
95856 index 48d8a363319e..1bd559c69e83 100644
95857 --- a/include/linux/tcp.h
95858 +++ b/include/linux/tcp.h
95859 @@ -225,7 +225,8 @@ struct tcp_sock {
95860         u8      compressed_ack;
95861         u8      dup_ack_counter:2,
95862                 tlp_retrans:1,  /* TLP is a retransmission */
95863 -               unused:5;
95864 +               fast_ack_mode:2, /* which fast ack mode ? */
95865 +               unused:3;
95866         u32     chrono_start;   /* Start time in jiffies of a TCP chrono */
95867         u32     chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
95868         u8      chrono_type:2,  /* current chronograph type */
95869 diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
95870 index 61c3372d3f32..2f719b471d52 100644
95871 --- a/include/linux/tty_driver.h
95872 +++ b/include/linux/tty_driver.h
95873 @@ -228,7 +228,7 @@
95874   *
95875   *     Called when the device receives a TIOCGICOUNT ioctl. Passed a kernel
95876   *     structure to complete. This method is optional and will only be called
95877 - *     if provided (otherwise EINVAL will be returned).
95878 + *     if provided (otherwise ENOTTY will be returned).
95879   */
95881  #include <linux/export.h>
95882 diff --git a/include/linux/udp.h b/include/linux/udp.h
95883 index aa84597bdc33..ae58ff3b6b5b 100644
95884 --- a/include/linux/udp.h
95885 +++ b/include/linux/udp.h
95886 @@ -51,7 +51,9 @@ struct udp_sock {
95887                                            * different encapsulation layer set
95888                                            * this
95889                                            */
95890 -                        gro_enabled:1; /* Can accept GRO packets */
95891 +                        gro_enabled:1, /* Request GRO aggregation */
95892 +                        accept_udp_l4:1,
95893 +                        accept_udp_fraglist:1;
95894         /*
95895          * Following member retains the information to create a UDP header
95896          * when the socket is uncorked.
95897 @@ -131,8 +133,16 @@ static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
95899  static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
95901 -       return !udp_sk(sk)->gro_enabled && skb_is_gso(skb) &&
95902 -              skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4;
95903 +       if (!skb_is_gso(skb))
95904 +               return false;
95906 +       if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 && !udp_sk(sk)->accept_udp_l4)
95907 +               return true;
95909 +       if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST && !udp_sk(sk)->accept_udp_fraglist)
95910 +               return true;
95912 +       return false;
95915  #define udp_portaddr_for_each_entry(__sk, list) \
95916 diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
95917 index 70d681918d01..bf00259493e0 100644
95918 --- a/include/linux/usb/pd.h
95919 +++ b/include/linux/usb/pd.h
95920 @@ -493,4 +493,6 @@ static inline unsigned int rdo_max_power(u32 rdo)
95921  #define PD_N_CAPS_COUNT                (PD_T_NO_RESPONSE / PD_T_SEND_SOURCE_CAP)
95922  #define PD_N_HARD_RESET_COUNT  2
95924 +#define PD_P_SNK_STDBY_MW      2500    /* 2500 mW */
95926  #endif /* __LINUX_USB_PD_H */
95927 diff --git a/include/linux/zstd.h b/include/linux/zstd.h
95928 index e87f78c9b19c..446ecabcdd02 100644
95929 --- a/include/linux/zstd.h
95930 +++ b/include/linux/zstd.h
95931 @@ -1,138 +1,97 @@
95932 +/* SPDX-License-Identifier: GPL-2.0-only */
95933  /*
95934 - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
95935 + * Copyright (c) Yann Collet, Facebook, Inc.
95936   * All rights reserved.
95937   *
95938 - * This source code is licensed under the BSD-style license found in the
95939 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
95940 - * An additional grant of patent rights can be found in the PATENTS file in the
95941 - * same directory.
95942 - *
95943 - * This program is free software; you can redistribute it and/or modify it under
95944 - * the terms of the GNU General Public License version 2 as published by the
95945 - * Free Software Foundation. This program is dual-licensed; you may select
95946 - * either version 2 of the GNU General Public License ("GPL") or BSD license
95947 - * ("BSD").
95948 + * This source code is licensed under both the BSD-style license (found in the
95949 + * LICENSE file in the root directory of https://github.com/facebook/zstd) and
95950 + * the GPLv2 (found in the COPYING file in the root directory of
95951 + * https://github.com/facebook/zstd). You may select, at your option, one of the
95952 + * above-listed licenses.
95953   */
95955 -#ifndef ZSTD_H
95956 -#define ZSTD_H
95957 +#ifndef LINUX_ZSTD_H
95958 +#define LINUX_ZSTD_H
95960 -/* ======   Dependency   ======*/
95961 -#include <linux/types.h>   /* size_t */
95962 +/**
95963 + * This is a kernel-style API that wraps the upstream zstd API, which cannot be
95964 + * used directly because the symbols aren't exported. It exposes the minimal
95965 + * functionality which is currently required by users of zstd in the kernel.
95966 + * Expose extra functions from lib/zstd/zstd.h as needed.
95967 + */
95969 +/* ======   Dependency   ====== */
95970 +#include <linux/types.h>
95971 +#include <linux/zstd_errors.h>
95972 +#include <linux/zstd_lib.h>
95974 -/*-*****************************************************************************
95975 - * Introduction
95976 +/* ======   Helper Functions   ====== */
95977 +/**
95978 + * zstd_compress_bound() - maximum compressed size in worst case scenario
95979 + * @src_size: The size of the data to compress.
95980   *
95981 - * zstd, short for Zstandard, is a fast lossless compression algorithm,
95982 - * targeting real-time compression scenarios at zlib-level and better
95983 - * compression ratios. The zstd compression library provides in-memory
95984 - * compression and decompression functions. The library supports compression
95985 - * levels from 1 up to ZSTD_maxCLevel() which is 22. Levels >= 20, labeled
95986 - * ultra, should be used with caution, as they require more memory.
95987 - * Compression can be done in:
95988 - *  - a single step, reusing a context (described as Explicit memory management)
95989 - *  - unbounded multiple steps (described as Streaming compression)
95990 - * The compression ratio achievable on small data can be highly improved using
95991 - * compression with a dictionary in:
95992 - *  - a single step (described as Simple dictionary API)
95993 - *  - a single step, reusing a dictionary (described as Fast dictionary API)
95994 - ******************************************************************************/
95996 -/*======  Helper functions  ======*/
95997 + * Return:    The maximum compressed size in the worst case scenario.
95998 + */
95999 +size_t zstd_compress_bound(size_t src_size);
96001  /**
96002 - * enum ZSTD_ErrorCode - zstd error codes
96003 + * zstd_is_error() - tells if a size_t function result is an error code
96004 + * @code:  The function result to check for error.
96005   *
96006 - * Functions that return size_t can be checked for errors using ZSTD_isError()
96007 - * and the ZSTD_ErrorCode can be extracted using ZSTD_getErrorCode().
96008 + * Return: Non-zero iff the code is an error.
96009 + */
96010 +unsigned int zstd_is_error(size_t code);
96012 +/**
96013 + * enum zstd_error_code - zstd error codes
96014   */
96015 -typedef enum {
96016 -       ZSTD_error_no_error,
96017 -       ZSTD_error_GENERIC,
96018 -       ZSTD_error_prefix_unknown,
96019 -       ZSTD_error_version_unsupported,
96020 -       ZSTD_error_parameter_unknown,
96021 -       ZSTD_error_frameParameter_unsupported,
96022 -       ZSTD_error_frameParameter_unsupportedBy32bits,
96023 -       ZSTD_error_frameParameter_windowTooLarge,
96024 -       ZSTD_error_compressionParameter_unsupported,
96025 -       ZSTD_error_init_missing,
96026 -       ZSTD_error_memory_allocation,
96027 -       ZSTD_error_stage_wrong,
96028 -       ZSTD_error_dstSize_tooSmall,
96029 -       ZSTD_error_srcSize_wrong,
96030 -       ZSTD_error_corruption_detected,
96031 -       ZSTD_error_checksum_wrong,
96032 -       ZSTD_error_tableLog_tooLarge,
96033 -       ZSTD_error_maxSymbolValue_tooLarge,
96034 -       ZSTD_error_maxSymbolValue_tooSmall,
96035 -       ZSTD_error_dictionary_corrupted,
96036 -       ZSTD_error_dictionary_wrong,
96037 -       ZSTD_error_dictionaryCreation_failed,
96038 -       ZSTD_error_maxCode
96039 -} ZSTD_ErrorCode;
96040 +typedef ZSTD_ErrorCode zstd_error_code;
96042  /**
96043 - * ZSTD_maxCLevel() - maximum compression level available
96044 + * zstd_get_error_code() - translates an error function result to an error code
96045 + * @code:  The function result for which zstd_is_error(code) is true.
96046   *
96047 - * Return: Maximum compression level available.
96048 + * Return: A unique error code for this error.
96049   */
96050 -int ZSTD_maxCLevel(void);
96051 +zstd_error_code zstd_get_error_code(size_t code);
96053  /**
96054 - * ZSTD_compressBound() - maximum compressed size in worst case scenario
96055 - * @srcSize: The size of the data to compress.
96056 + * zstd_get_error_name() - translates an error function result to a string
96057 + * @code:  The function result for which zstd_is_error(code) is true.
96058   *
96059 - * Return:   The maximum compressed size in the worst case scenario.
96060 + * Return: An error string corresponding to the error code.
96061   */
96062 -size_t ZSTD_compressBound(size_t srcSize);
96063 +const char *zstd_get_error_name(size_t code);
96065  /**
96066 - * ZSTD_isError() - tells if a size_t function result is an error code
96067 - * @code:  The function result to check for error.
96068 + * zstd_min_clevel() - minimum allowed compression level
96069   *
96070 - * Return: Non-zero iff the code is an error.
96071 + * Return: The minimum allowed compression level.
96072   */
96073 -static __attribute__((unused)) unsigned int ZSTD_isError(size_t code)
96075 -       return code > (size_t)-ZSTD_error_maxCode;
96077 +int zstd_min_clevel(void);
96079  /**
96080 - * ZSTD_getErrorCode() - translates an error function result to a ZSTD_ErrorCode
96081 - * @functionResult: The result of a function for which ZSTD_isError() is true.
96082 + * zstd_max_clevel() - maximum allowed compression level
96083   *
96084 - * Return:          The ZSTD_ErrorCode corresponding to the functionResult or 0
96085 - *                  if the functionResult isn't an error.
96086 + * Return: The maximum allowed compression level.
96087   */
96088 -static __attribute__((unused)) ZSTD_ErrorCode ZSTD_getErrorCode(
96089 -       size_t functionResult)
96091 -       if (!ZSTD_isError(functionResult))
96092 -               return (ZSTD_ErrorCode)0;
96093 -       return (ZSTD_ErrorCode)(0 - functionResult);
96095 +int zstd_max_clevel(void);
96097 +/* ======   Parameter Selection   ====== */
96099  /**
96100 - * enum ZSTD_strategy - zstd compression search strategy
96101 + * enum zstd_strategy - zstd compression search strategy
96102   *
96103 - * From faster to stronger.
96104 + * From faster to stronger. See zstd_lib.h.
96105   */
96106 -typedef enum {
96107 -       ZSTD_fast,
96108 -       ZSTD_dfast,
96109 -       ZSTD_greedy,
96110 -       ZSTD_lazy,
96111 -       ZSTD_lazy2,
96112 -       ZSTD_btlazy2,
96113 -       ZSTD_btopt,
96114 -       ZSTD_btopt2
96115 -} ZSTD_strategy;
96116 +typedef ZSTD_strategy zstd_strategy;
96118  /**
96119 - * struct ZSTD_compressionParameters - zstd compression parameters
96120 + * struct zstd_compression_parameters - zstd compression parameters
96121   * @windowLog:    Log of the largest match distance. Larger means more
96122   *                compression, and more memory needed during decompression.
96123 - * @chainLog:     Fully searched segment. Larger means more compression, slower,
96124 - *                and more memory (useless for fast).
96125 + * @chainLog:     Fully searched segment. Larger means more compression,
96126 + *                slower, and more memory (useless for fast).
96127   * @hashLog:      Dispatch table. Larger means more compression,
96128   *                slower, and more memory.
96129   * @searchLog:    Number of searches. Larger means more compression and slower.
96130 @@ -141,1017 +100,348 @@ typedef enum {
96131   * @targetLength: Acceptable match size for optimal parser (only). Larger means
96132   *                more compression, and slower.
96133   * @strategy:     The zstd compression strategy.
96134 + *
96135 + * See zstd_lib.h.
96136   */
96137 -typedef struct {
96138 -       unsigned int windowLog;
96139 -       unsigned int chainLog;
96140 -       unsigned int hashLog;
96141 -       unsigned int searchLog;
96142 -       unsigned int searchLength;
96143 -       unsigned int targetLength;
96144 -       ZSTD_strategy strategy;
96145 -} ZSTD_compressionParameters;
96146 +typedef ZSTD_compressionParameters zstd_compression_parameters;
96148  /**
96149 - * struct ZSTD_frameParameters - zstd frame parameters
96150 - * @contentSizeFlag: Controls whether content size will be present in the frame
96151 - *                   header (when known).
96152 - * @checksumFlag:    Controls whether a 32-bit checksum is generated at the end
96153 - *                   of the frame for error detection.
96154 - * @noDictIDFlag:    Controls whether dictID will be saved into the frame header
96155 - *                   when using dictionary compression.
96156 + * struct zstd_frame_parameters - zstd frame parameters
96157 + * @contentSizeFlag: Controls whether content size will be present in the
96158 + *                   frame header (when known).
96159 + * @checksumFlag:    Controls whether a 32-bit checksum is generated at the
96160 + *                   end of the frame for error detection.
96161 + * @noDictIDFlag:    Controls whether dictID will be saved into the frame
96162 + *                   header when using dictionary compression.
96163   *
96164 - * The default value is all fields set to 0.
96165 + * The default value is all fields set to 0. See zstd_lib.h.
96166   */
96167 -typedef struct {
96168 -       unsigned int contentSizeFlag;
96169 -       unsigned int checksumFlag;
96170 -       unsigned int noDictIDFlag;
96171 -} ZSTD_frameParameters;
96172 +typedef ZSTD_frameParameters zstd_frame_parameters;
96174  /**
96175 - * struct ZSTD_parameters - zstd parameters
96176 + * struct zstd_parameters - zstd parameters
96177   * @cParams: The compression parameters.
96178   * @fParams: The frame parameters.
96179   */
96180 -typedef struct {
96181 -       ZSTD_compressionParameters cParams;
96182 -       ZSTD_frameParameters fParams;
96183 -} ZSTD_parameters;
96184 +typedef ZSTD_parameters zstd_parameters;
96186  /**
96187 - * ZSTD_getCParams() - returns ZSTD_compressionParameters for selected level
96188 - * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel().
96189 - * @estimatedSrcSize: The estimated source size to compress or 0 if unknown.
96190 - * @dictSize:         The dictionary size or 0 if a dictionary isn't being used.
96191 + * zstd_get_params() - returns zstd_parameters for selected level
96192 + * @level:              The compression level
96193 + * @estimated_src_size: The estimated source size to compress or 0
96194 + *                      if unknown.
96195   *
96196 - * Return:            The selected ZSTD_compressionParameters.
96197 + * Return:              The selected zstd_parameters.
96198   */
96199 -ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel,
96200 -       unsigned long long estimatedSrcSize, size_t dictSize);
96201 +zstd_parameters zstd_get_params(int level,
96202 +       unsigned long long estimated_src_size);
96204 -/**
96205 - * ZSTD_getParams() - returns ZSTD_parameters for selected level
96206 - * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel().
96207 - * @estimatedSrcSize: The estimated source size to compress or 0 if unknown.
96208 - * @dictSize:         The dictionary size or 0 if a dictionary isn't being used.
96209 - *
96210 - * The same as ZSTD_getCParams() except also selects the default frame
96211 - * parameters (all zero).
96212 - *
96213 - * Return:            The selected ZSTD_parameters.
96214 - */
96215 -ZSTD_parameters ZSTD_getParams(int compressionLevel,
96216 -       unsigned long long estimatedSrcSize, size_t dictSize);
96217 +/* ======   Single-pass Compression   ====== */
96219 -/*-*************************************
96220 - * Explicit memory management
96221 - **************************************/
96222 +typedef ZSTD_CCtx zstd_cctx;
96224  /**
96225 - * ZSTD_CCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_CCtx
96226 - * @cParams: The compression parameters to be used for compression.
96227 + * zstd_cctx_workspace_bound() - max memory needed to initialize a zstd_cctx
96228 + * @parameters: The compression parameters to be used.
96229   *
96230   * If multiple compression parameters might be used, the caller must call
96231 - * ZSTD_CCtxWorkspaceBound() for each set of parameters and use the maximum
96232 + * zstd_cctx_workspace_bound() for each set of parameters and use the maximum
96233   * size.
96234   *
96235 - * Return:   A lower bound on the size of the workspace that is passed to
96236 - *           ZSTD_initCCtx().
96237 + * Return:      A lower bound on the size of the workspace that is passed to
96238 + *              zstd_init_cctx().
96239   */
96240 -size_t ZSTD_CCtxWorkspaceBound(ZSTD_compressionParameters cParams);
96241 +size_t zstd_cctx_workspace_bound(const zstd_compression_parameters *parameters);
96243  /**
96244 - * struct ZSTD_CCtx - the zstd compression context
96245 - *
96246 - * When compressing many times it is recommended to allocate a context just once
96247 - * and reuse it for each successive compression operation.
96248 - */
96249 -typedef struct ZSTD_CCtx_s ZSTD_CCtx;
96250 -/**
96251 - * ZSTD_initCCtx() - initialize a zstd compression context
96252 - * @workspace:     The workspace to emplace the context into. It must outlive
96253 - *                 the returned context.
96254 - * @workspaceSize: The size of workspace. Use ZSTD_CCtxWorkspaceBound() to
96255 - *                 determine how large the workspace must be.
96256 - *
96257 - * Return:         A compression context emplaced into workspace.
96258 - */
96259 -ZSTD_CCtx *ZSTD_initCCtx(void *workspace, size_t workspaceSize);
96261 -/**
96262 - * ZSTD_compressCCtx() - compress src into dst
96263 - * @ctx:         The context. Must have been initialized with a workspace at
96264 - *               least as large as ZSTD_CCtxWorkspaceBound(params.cParams).
96265 - * @dst:         The buffer to compress src into.
96266 - * @dstCapacity: The size of the destination buffer. May be any size, but
96267 - *               ZSTD_compressBound(srcSize) is guaranteed to be large enough.
96268 - * @src:         The data to compress.
96269 - * @srcSize:     The size of the data to compress.
96270 - * @params:      The parameters to use for compression. See ZSTD_getParams().
96271 - *
96272 - * Return:       The compressed size or an error, which can be checked using
96273 - *               ZSTD_isError().
96274 - */
96275 -size_t ZSTD_compressCCtx(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity,
96276 -       const void *src, size_t srcSize, ZSTD_parameters params);
96278 -/**
96279 - * ZSTD_DCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_DCtx
96280 - *
96281 - * Return: A lower bound on the size of the workspace that is passed to
96282 - *         ZSTD_initDCtx().
96283 - */
96284 -size_t ZSTD_DCtxWorkspaceBound(void);
96286 -/**
96287 - * struct ZSTD_DCtx - the zstd decompression context
96288 - *
96289 - * When decompressing many times it is recommended to allocate a context just
96290 - * once and reuse it for each successive decompression operation.
96291 - */
96292 -typedef struct ZSTD_DCtx_s ZSTD_DCtx;
96293 -/**
96294 - * ZSTD_initDCtx() - initialize a zstd decompression context
96295 - * @workspace:     The workspace to emplace the context into. It must outlive
96296 - *                 the returned context.
96297 - * @workspaceSize: The size of workspace. Use ZSTD_DCtxWorkspaceBound() to
96298 - *                 determine how large the workspace must be.
96299 - *
96300 - * Return:         A decompression context emplaced into workspace.
96301 - */
96302 -ZSTD_DCtx *ZSTD_initDCtx(void *workspace, size_t workspaceSize);
96304 -/**
96305 - * ZSTD_decompressDCtx() - decompress zstd compressed src into dst
96306 - * @ctx:         The decompression context.
96307 - * @dst:         The buffer to decompress src into.
96308 - * @dstCapacity: The size of the destination buffer. Must be at least as large
96309 - *               as the decompressed size. If the caller cannot upper bound the
96310 - *               decompressed size, then it's better to use the streaming API.
96311 - * @src:         The zstd compressed data to decompress. Multiple concatenated
96312 - *               frames and skippable frames are allowed.
96313 - * @srcSize:     The exact size of the data to decompress.
96314 - *
96315 - * Return:       The decompressed size or an error, which can be checked using
96316 - *               ZSTD_isError().
96317 - */
96318 -size_t ZSTD_decompressDCtx(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity,
96319 -       const void *src, size_t srcSize);
96321 -/*-************************
96322 - * Simple dictionary API
96323 - **************************/
96325 -/**
96326 - * ZSTD_compress_usingDict() - compress src into dst using a dictionary
96327 - * @ctx:         The context. Must have been initialized with a workspace at
96328 - *               least as large as ZSTD_CCtxWorkspaceBound(params.cParams).
96329 - * @dst:         The buffer to compress src into.
96330 - * @dstCapacity: The size of the destination buffer. May be any size, but
96331 - *               ZSTD_compressBound(srcSize) is guaranteed to be large enough.
96332 - * @src:         The data to compress.
96333 - * @srcSize:     The size of the data to compress.
96334 - * @dict:        The dictionary to use for compression.
96335 - * @dictSize:    The size of the dictionary.
96336 - * @params:      The parameters to use for compression. See ZSTD_getParams().
96337 - *
96338 - * Compression using a predefined dictionary. The same dictionary must be used
96339 - * during decompression.
96340 - *
96341 - * Return:       The compressed size or an error, which can be checked using
96342 - *               ZSTD_isError().
96343 - */
96344 -size_t ZSTD_compress_usingDict(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity,
96345 -       const void *src, size_t srcSize, const void *dict, size_t dictSize,
96346 -       ZSTD_parameters params);
96348 -/**
96349 - * ZSTD_decompress_usingDict() - decompress src into dst using a dictionary
96350 - * @ctx:         The decompression context.
96351 - * @dst:         The buffer to decompress src into.
96352 - * @dstCapacity: The size of the destination buffer. Must be at least as large
96353 - *               as the decompressed size. If the caller cannot upper bound the
96354 - *               decompressed size, then it's better to use the streaming API.
96355 - * @src:         The zstd compressed data to decompress. Multiple concatenated
96356 - *               frames and skippable frames are allowed.
96357 - * @srcSize:     The exact size of the data to decompress.
96358 - * @dict:        The dictionary to use for decompression. The same dictionary
96359 - *               must've been used to compress the data.
96360 - * @dictSize:    The size of the dictionary.
96361 - *
96362 - * Return:       The decompressed size or an error, which can be checked using
96363 - *               ZSTD_isError().
96364 - */
96365 -size_t ZSTD_decompress_usingDict(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity,
96366 -       const void *src, size_t srcSize, const void *dict, size_t dictSize);
96368 -/*-**************************
96369 - * Fast dictionary API
96370 - ***************************/
96372 -/**
96373 - * ZSTD_CDictWorkspaceBound() - memory needed to initialize a ZSTD_CDict
96374 - * @cParams: The compression parameters to be used for compression.
96375 + * zstd_init_cctx() - initialize a zstd compression context
96376 + * @workspace:      The workspace to emplace the context into. It must outlive
96377 + *                  the returned context.
96378 + * @workspace_size: The size of workspace. Use zstd_cctx_workspace_bound() to
96379 + *                  determine how large the workspace must be.
96380   *
96381 - * Return:   A lower bound on the size of the workspace that is passed to
96382 - *           ZSTD_initCDict().
96383 - */
96384 -size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams);
96386 -/**
96387 - * struct ZSTD_CDict - a digested dictionary to be used for compression
96388 + * Return:          A zstd compression context or NULL on error.
96389   */
96390 -typedef struct ZSTD_CDict_s ZSTD_CDict;
96391 +zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size);
96393  /**
96394 - * ZSTD_initCDict() - initialize a digested dictionary for compression
96395 - * @dictBuffer:    The dictionary to digest. The buffer is referenced by the
96396 - *                 ZSTD_CDict so it must outlive the returned ZSTD_CDict.
96397 - * @dictSize:      The size of the dictionary.
96398 - * @params:        The parameters to use for compression. See ZSTD_getParams().
96399 - * @workspace:     The workspace. It must outlive the returned ZSTD_CDict.
96400 - * @workspaceSize: The workspace size. Must be at least
96401 - *                 ZSTD_CDictWorkspaceBound(params.cParams).
96402 + * zstd_compress_cctx() - compress src into dst with the initialized parameters
96403 + * @cctx:         The context. Must have been initialized with zstd_init_cctx().
96404 + * @dst:          The buffer to compress src into.
96405 + * @dst_capacity: The size of the destination buffer. May be any size, but
96406 + *                ZSTD_compressBound(srcSize) is guaranteed to be large enough.
96407 + * @src:          The data to compress.
96408 + * @src_size:     The size of the data to compress.
96409 + * @parameters:   The compression parameters to be used.
96410   *
96411 - * When compressing multiple messages / blocks with the same dictionary it is
96412 - * recommended to load it just once. The ZSTD_CDict merely references the
96413 - * dictBuffer, so it must outlive the returned ZSTD_CDict.
96414 - *
96415 - * Return:         The digested dictionary emplaced into workspace.
96416 + * Return:        The compressed size or an error, which can be checked using
96417 + *                zstd_is_error().
96418   */
96419 -ZSTD_CDict *ZSTD_initCDict(const void *dictBuffer, size_t dictSize,
96420 -       ZSTD_parameters params, void *workspace, size_t workspaceSize);
96421 +size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity,
96422 +       const void *src, size_t src_size, const zstd_parameters *parameters);
96424 -/**
96425 - * ZSTD_compress_usingCDict() - compress src into dst using a ZSTD_CDict
96426 - * @ctx:         The context. Must have been initialized with a workspace at
96427 - *               least as large as ZSTD_CCtxWorkspaceBound(cParams) where
96428 - *               cParams are the compression parameters used to initialize the
96429 - *               cdict.
96430 - * @dst:         The buffer to compress src into.
96431 - * @dstCapacity: The size of the destination buffer. May be any size, but
96432 - *               ZSTD_compressBound(srcSize) is guaranteed to be large enough.
96433 - * @src:         The data to compress.
96434 - * @srcSize:     The size of the data to compress.
96435 - * @cdict:       The digested dictionary to use for compression.
96436 - * @params:      The parameters to use for compression. See ZSTD_getParams().
96437 - *
96438 - * Compression using a digested dictionary. The same dictionary must be used
96439 - * during decompression.
96440 - *
96441 - * Return:       The compressed size or an error, which can be checked using
96442 - *               ZSTD_isError().
96443 - */
96444 -size_t ZSTD_compress_usingCDict(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
96445 -       const void *src, size_t srcSize, const ZSTD_CDict *cdict);
96446 +/* ======   Single-pass Decompression   ====== */
96448 +typedef ZSTD_DCtx zstd_dctx;
96450  /**
96451 - * ZSTD_DDictWorkspaceBound() - memory needed to initialize a ZSTD_DDict
96452 + * zstd_dctx_workspace_bound() - max memory needed to initialize a zstd_dctx
96453   *
96454 - * Return:  A lower bound on the size of the workspace that is passed to
96455 - *          ZSTD_initDDict().
96456 - */
96457 -size_t ZSTD_DDictWorkspaceBound(void);
96459 -/**
96460 - * struct ZSTD_DDict - a digested dictionary to be used for decompression
96461 + * Return: A lower bound on the size of the workspace that is passed to
96462 + *         zstd_init_dctx().
96463   */
96464 -typedef struct ZSTD_DDict_s ZSTD_DDict;
96465 +size_t zstd_dctx_workspace_bound(void);
96467  /**
96468 - * ZSTD_initDDict() - initialize a digested dictionary for decompression
96469 - * @dictBuffer:    The dictionary to digest. The buffer is referenced by the
96470 - *                 ZSTD_DDict so it must outlive the returned ZSTD_DDict.
96471 - * @dictSize:      The size of the dictionary.
96472 - * @workspace:     The workspace. It must outlive the returned ZSTD_DDict.
96473 - * @workspaceSize: The workspace size. Must be at least
96474 - *                 ZSTD_DDictWorkspaceBound().
96475 - *
96476 - * When decompressing multiple messages / blocks with the same dictionary it is
96477 - * recommended to load it just once. The ZSTD_DDict merely references the
96478 - * dictBuffer, so it must outlive the returned ZSTD_DDict.
96479 + * zstd_init_dctx() - initialize a zstd decompression context
96480 + * @workspace:      The workspace to emplace the context into. It must outlive
96481 + *                  the returned context.
96482 + * @workspace_size: The size of workspace. Use zstd_dctx_workspace_bound() to
96483 + *                  determine how large the workspace must be.
96484   *
96485 - * Return:         The digested dictionary emplaced into workspace.
96486 + * Return:          A zstd decompression context or NULL on error.
96487   */
96488 -ZSTD_DDict *ZSTD_initDDict(const void *dictBuffer, size_t dictSize,
96489 -       void *workspace, size_t workspaceSize);
96490 +zstd_dctx *zstd_init_dctx(void *workspace, size_t workspace_size);
96492  /**
96493 - * ZSTD_decompress_usingDDict() - decompress src into dst using a ZSTD_DDict
96494 - * @ctx:         The decompression context.
96495 - * @dst:         The buffer to decompress src into.
96496 - * @dstCapacity: The size of the destination buffer. Must be at least as large
96497 - *               as the decompressed size. If the caller cannot upper bound the
96498 - *               decompressed size, then it's better to use the streaming API.
96499 - * @src:         The zstd compressed data to decompress. Multiple concatenated
96500 - *               frames and skippable frames are allowed.
96501 - * @srcSize:     The exact size of the data to decompress.
96502 - * @ddict:       The digested dictionary to use for decompression. The same
96503 - *               dictionary must've been used to compress the data.
96504 + * zstd_decompress_dctx() - decompress zstd compressed src into dst
96505 + * @dctx:         The decompression context.
96506 + * @dst:          The buffer to decompress src into.
96507 + * @dst_capacity: The size of the destination buffer. Must be at least as large
96508 + *                as the decompressed size. If the caller cannot upper bound the
96509 + *                decompressed size, then it's better to use the streaming API.
96510 + * @src:          The zstd compressed data to decompress. Multiple concatenated
96511 + *                frames and skippable frames are allowed.
96512 + * @src_size:     The exact size of the data to decompress.
96513   *
96514 - * Return:       The decompressed size or an error, which can be checked using
96515 - *               ZSTD_isError().
96516 + * Return:        The decompressed size or an error, which can be checked using
96517 + *                zstd_is_error().
96518   */
96519 -size_t ZSTD_decompress_usingDDict(ZSTD_DCtx *dctx, void *dst,
96520 -       size_t dstCapacity, const void *src, size_t srcSize,
96521 -       const ZSTD_DDict *ddict);
96522 +size_t zstd_decompress_dctx(zstd_dctx *dctx, void *dst, size_t dst_capacity,
96523 +       const void *src, size_t src_size);
96526 -/*-**************************
96527 - * Streaming
96528 - ***************************/
96529 +/* ======   Streaming Buffers   ====== */
96531  /**
96532 - * struct ZSTD_inBuffer - input buffer for streaming
96533 + * struct zstd_in_buffer - input buffer for streaming
96534   * @src:  Start of the input buffer.
96535   * @size: Size of the input buffer.
96536   * @pos:  Position where reading stopped. Will be updated.
96537   *        Necessarily 0 <= pos <= size.
96538 + *
96539 + * See zstd_lib.h.
96540   */
96541 -typedef struct ZSTD_inBuffer_s {
96542 -       const void *src;
96543 -       size_t size;
96544 -       size_t pos;
96545 -} ZSTD_inBuffer;
96546 +typedef ZSTD_inBuffer zstd_in_buffer;
96548  /**
96549 - * struct ZSTD_outBuffer - output buffer for streaming
96550 + * struct zstd_out_buffer - output buffer for streaming
96551   * @dst:  Start of the output buffer.
96552   * @size: Size of the output buffer.
96553   * @pos:  Position where writing stopped. Will be updated.
96554   *        Necessarily 0 <= pos <= size.
96555 + *
96556 + * See zstd_lib.h.
96557   */
96558 -typedef struct ZSTD_outBuffer_s {
96559 -       void *dst;
96560 -       size_t size;
96561 -       size_t pos;
96562 -} ZSTD_outBuffer;
96563 +typedef ZSTD_outBuffer zstd_out_buffer;
96565 +/* ======   Streaming Compression   ====== */
96568 -/*-*****************************************************************************
96569 - * Streaming compression - HowTo
96570 - *
96571 - * A ZSTD_CStream object is required to track streaming operation.
96572 - * Use ZSTD_initCStream() to initialize a ZSTD_CStream object.
96573 - * ZSTD_CStream objects can be reused multiple times on consecutive compression
96574 - * operations. It is recommended to re-use ZSTD_CStream in situations where many
96575 - * streaming operations will be achieved consecutively. Use one separate
96576 - * ZSTD_CStream per thread for parallel execution.
96577 - *
96578 - * Use ZSTD_compressStream() repetitively to consume input stream.
96579 - * The function will automatically update both `pos` fields.
96580 - * Note that it may not consume the entire input, in which case `pos < size`,
96581 - * and it's up to the caller to present again remaining data.
96582 - * It returns a hint for the preferred number of bytes to use as an input for
96583 - * the next function call.
96584 - *
96585 - * At any moment, it's possible to flush whatever data remains within internal
96586 - * buffer, using ZSTD_flushStream(). `output->pos` will be updated. There might
96587 - * still be some content left within the internal buffer if `output->size` is
96588 - * too small. It returns the number of bytes left in the internal buffer and
96589 - * must be called until it returns 0.
96590 - *
96591 - * ZSTD_endStream() instructs to finish a frame. It will perform a flush and
96592 - * write frame epilogue. The epilogue is required for decoders to consider a
96593 - * frame completed. Similar to ZSTD_flushStream(), it may not be able to flush
96594 - * the full content if `output->size` is too small. In which case, call again
96595 - * ZSTD_endStream() to complete the flush. It returns the number of bytes left
96596 - * in the internal buffer and must be called until it returns 0.
96597 - ******************************************************************************/
96598 +typedef ZSTD_CStream zstd_cstream;
96600  /**
96601 - * ZSTD_CStreamWorkspaceBound() - memory needed to initialize a ZSTD_CStream
96602 - * @cParams: The compression parameters to be used for compression.
96603 + * zstd_cstream_workspace_bound() - memory needed to initialize a zstd_cstream
96604 + * @cparams: The compression parameters to be used for compression.
96605   *
96606   * Return:   A lower bound on the size of the workspace that is passed to
96607 - *           ZSTD_initCStream() and ZSTD_initCStream_usingCDict().
96608 - */
96609 -size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams);
96611 -/**
96612 - * struct ZSTD_CStream - the zstd streaming compression context
96613 - */
96614 -typedef struct ZSTD_CStream_s ZSTD_CStream;
96616 -/*===== ZSTD_CStream management functions =====*/
96617 -/**
96618 - * ZSTD_initCStream() - initialize a zstd streaming compression context
96619 - * @params:         The zstd compression parameters.
96620 - * @pledgedSrcSize: If params.fParams.contentSizeFlag == 1 then the caller must
96621 - *                  pass the source size (zero means empty source). Otherwise,
96622 - *                  the caller may optionally pass the source size, or zero if
96623 - *                  unknown.
96624 - * @workspace:      The workspace to emplace the context into. It must outlive
96625 - *                  the returned context.
96626 - * @workspaceSize:  The size of workspace.
96627 - *                  Use ZSTD_CStreamWorkspaceBound(params.cParams) to determine
96628 - *                  how large the workspace must be.
96629 - *
96630 - * Return:          The zstd streaming compression context.
96631 + *           zstd_init_cstream().
96632   */
96633 -ZSTD_CStream *ZSTD_initCStream(ZSTD_parameters params,
96634 -       unsigned long long pledgedSrcSize, void *workspace,
96635 -       size_t workspaceSize);
96636 +size_t zstd_cstream_workspace_bound(const zstd_compression_parameters *cparams);
96638  /**
96639 - * ZSTD_initCStream_usingCDict() - initialize a streaming compression context
96640 - * @cdict:          The digested dictionary to use for compression.
96641 - * @pledgedSrcSize: Optionally the source size, or zero if unknown.
96642 - * @workspace:      The workspace to emplace the context into. It must outlive
96643 - *                  the returned context.
96644 - * @workspaceSize:  The size of workspace. Call ZSTD_CStreamWorkspaceBound()
96645 - *                  with the cParams used to initialize the cdict to determine
96646 - *                  how large the workspace must be.
96647 + * zstd_init_cstream() - initialize a zstd streaming compression context
96648 + * @parameters        The zstd parameters to use for compression.
96649 + * @pledged_src_size: If params.fParams.contentSizeFlag == 1 then the caller
96650 + *                    must pass the source size (zero means empty source).
96651 + *                    Otherwise, the caller may optionally pass the source
96652 + *                    size, or zero if unknown.
96653 + * @workspace:        The workspace to emplace the context into. It must outlive
96654 + *                    the returned context.
96655 + * @workspace_size:   The size of workspace.
96656 + *                    Use zstd_cstream_workspace_bound(params->cparams) to
96657 + *                    determine how large the workspace must be.
96658   *
96659 - * Return:          The zstd streaming compression context.
96660 + * Return:            The zstd streaming compression context or NULL on error.
96661   */
96662 -ZSTD_CStream *ZSTD_initCStream_usingCDict(const ZSTD_CDict *cdict,
96663 -       unsigned long long pledgedSrcSize, void *workspace,
96664 -       size_t workspaceSize);
96665 +zstd_cstream *zstd_init_cstream(const zstd_parameters *parameters,
96666 +       unsigned long long pledged_src_size, void *workspace, size_t workspace_size);
96668 -/*===== Streaming compression functions =====*/
96669  /**
96670 - * ZSTD_resetCStream() - reset the context using parameters from creation
96671 - * @zcs:            The zstd streaming compression context to reset.
96672 - * @pledgedSrcSize: Optionally the source size, or zero if unknown.
96673 + * zstd_reset_cstream() - reset the context using parameters from creation
96674 + * @cstream:          The zstd streaming compression context to reset.
96675 + * @pledged_src_size: Optionally the source size, or zero if unknown.
96676   *
96677   * Resets the context using the parameters from creation. Skips dictionary
96678 - * loading, since it can be reused. If `pledgedSrcSize` is non-zero the frame
96679 + * loading, since it can be reused. If `pledged_src_size` is non-zero the frame
96680   * content size is always written into the frame header.
96681   *
96682 - * Return:          Zero or an error, which can be checked using ZSTD_isError().
96683 + * Return:            Zero or an error, which can be checked using
96684 + *                    zstd_is_error().
96685   */
96686 -size_t ZSTD_resetCStream(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize);
96687 +size_t zstd_reset_cstream(zstd_cstream *cstream,
96688 +       unsigned long long pledged_src_size);
96690  /**
96691 - * ZSTD_compressStream() - streaming compress some of input into output
96692 - * @zcs:    The zstd streaming compression context.
96693 - * @output: Destination buffer. `output->pos` is updated to indicate how much
96694 - *          compressed data was written.
96695 - * @input:  Source buffer. `input->pos` is updated to indicate how much data was
96696 - *          read. Note that it may not consume the entire input, in which case
96697 - *          `input->pos < input->size`, and it's up to the caller to present
96698 - *          remaining data again.
96699 + * zstd_compress_stream() - streaming compress some of input into output
96700 + * @cstream: The zstd streaming compression context.
96701 + * @output:  Destination buffer. `output->pos` is updated to indicate how much
96702 + *           compressed data was written.
96703 + * @input:   Source buffer. `input->pos` is updated to indicate how much data
96704 + *           was read. Note that it may not consume the entire input, in which
96705 + *           case `input->pos < input->size`, and it's up to the caller to
96706 + *           present remaining data again.
96707   *
96708   * The `input` and `output` buffers may be any size. Guaranteed to make some
96709   * forward progress if `input` and `output` are not empty.
96710   *
96711 - * Return:  A hint for the number of bytes to use as the input for the next
96712 - *          function call or an error, which can be checked using
96713 - *          ZSTD_isError().
96714 + * Return:   A hint for the number of bytes to use as the input for the next
96715 + *           function call or an error, which can be checked using
96716 + *           zstd_is_error().
96717   */
96718 -size_t ZSTD_compressStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output,
96719 -       ZSTD_inBuffer *input);
96720 +size_t zstd_compress_stream(zstd_cstream *cstream, zstd_out_buffer *output,
96721 +       zstd_in_buffer *input);
96723  /**
96724 - * ZSTD_flushStream() - flush internal buffers into output
96725 - * @zcs:    The zstd streaming compression context.
96726 - * @output: Destination buffer. `output->pos` is updated to indicate how much
96727 - *          compressed data was written.
96728 + * zstd_flush_stream() - flush internal buffers into output
96729 + * @cstream: The zstd streaming compression context.
96730 + * @output:  Destination buffer. `output->pos` is updated to indicate how much
96731 + *           compressed data was written.
96732   *
96733 - * ZSTD_flushStream() must be called until it returns 0, meaning all the data
96734 - * has been flushed. Since ZSTD_flushStream() causes a block to be ended,
96735 + * zstd_flush_stream() must be called until it returns 0, meaning all the data
96736 + * has been flushed. Since zstd_flush_stream() causes a block to be ended,
96737   * calling it too often will degrade the compression ratio.
96738   *
96739 - * Return:  The number of bytes still present within internal buffers or an
96740 - *          error, which can be checked using ZSTD_isError().
96741 + * Return:   The number of bytes still present within internal buffers or an
96742 + *           error, which can be checked using zstd_is_error().
96743   */
96744 -size_t ZSTD_flushStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output);
96745 -/**
96746 - * ZSTD_endStream() - flush internal buffers into output and end the frame
96747 - * @zcs:    The zstd streaming compression context.
96748 - * @output: Destination buffer. `output->pos` is updated to indicate how much
96749 - *          compressed data was written.
96750 - *
96751 - * ZSTD_endStream() must be called until it returns 0, meaning all the data has
96752 - * been flushed and the frame epilogue has been written.
96753 - *
96754 - * Return:  The number of bytes still present within internal buffers or an
96755 - *          error, which can be checked using ZSTD_isError().
96756 - */
96757 -size_t ZSTD_endStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output);
96758 +size_t zstd_flush_stream(zstd_cstream *cstream, zstd_out_buffer *output);
96760  /**
96761 - * ZSTD_CStreamInSize() - recommended size for the input buffer
96762 - *
96763 - * Return: The recommended size for the input buffer.
96764 - */
96765 -size_t ZSTD_CStreamInSize(void);
96766 -/**
96767 - * ZSTD_CStreamOutSize() - recommended size for the output buffer
96768 + * zstd_end_stream() - flush internal buffers into output and end the frame
96769 + * @cstream: The zstd streaming compression context.
96770 + * @output:  Destination buffer. `output->pos` is updated to indicate how much
96771 + *           compressed data was written.
96772   *
96773 - * When the output buffer is at least this large, it is guaranteed to be large
96774 - * enough to flush at least one complete compressed block.
96775 + * zstd_end_stream() must be called until it returns 0, meaning all the data has
96776 + * been flushed and the frame epilogue has been written.
96777   *
96778 - * Return: The recommended size for the output buffer.
96779 + * Return:   The number of bytes still present within internal buffers or an
96780 + *           error, which can be checked using zstd_is_error().
96781   */
96782 -size_t ZSTD_CStreamOutSize(void);
96783 +size_t zstd_end_stream(zstd_cstream *cstream, zstd_out_buffer *output);
96785 +/* ======   Streaming Decompression   ====== */
96788 -/*-*****************************************************************************
96789 - * Streaming decompression - HowTo
96790 - *
96791 - * A ZSTD_DStream object is required to track streaming operations.
96792 - * Use ZSTD_initDStream() to initialize a ZSTD_DStream object.
96793 - * ZSTD_DStream objects can be re-used multiple times.
96794 - *
96795 - * Use ZSTD_decompressStream() repetitively to consume your input.
96796 - * The function will update both `pos` fields.
96797 - * If `input->pos < input->size`, some input has not been consumed.
96798 - * It's up to the caller to present again remaining data.
96799 - * If `output->pos < output->size`, decoder has flushed everything it could.
96800 - * Returns 0 iff a frame is completely decoded and fully flushed.
96801 - * Otherwise it returns a suggested next input size that will never load more
96802 - * than the current frame.
96803 - ******************************************************************************/
96804 +typedef ZSTD_DStream zstd_dstream;
96806  /**
96807 - * ZSTD_DStreamWorkspaceBound() - memory needed to initialize a ZSTD_DStream
96808 - * @maxWindowSize: The maximum window size allowed for compressed frames.
96809 + * zstd_dstream_workspace_bound() - memory needed to initialize a zstd_dstream
96810 + * @max_window_size: The maximum window size allowed for compressed frames.
96811   *
96812 - * Return:         A lower bound on the size of the workspace that is passed to
96813 - *                 ZSTD_initDStream() and ZSTD_initDStream_usingDDict().
96814 + * Return:           A lower bound on the size of the workspace that is passed
96815 + *                   to zstd_init_dstream().
96816   */
96817 -size_t ZSTD_DStreamWorkspaceBound(size_t maxWindowSize);
96818 +size_t zstd_dstream_workspace_bound(size_t max_window_size);
96820  /**
96821 - * struct ZSTD_DStream - the zstd streaming decompression context
96822 - */
96823 -typedef struct ZSTD_DStream_s ZSTD_DStream;
96824 -/*===== ZSTD_DStream management functions =====*/
96825 -/**
96826 - * ZSTD_initDStream() - initialize a zstd streaming decompression context
96827 - * @maxWindowSize: The maximum window size allowed for compressed frames.
96828 - * @workspace:     The workspace to emplace the context into. It must outlive
96829 - *                 the returned context.
96830 - * @workspaceSize: The size of workspace.
96831 - *                 Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine
96832 - *                 how large the workspace must be.
96833 - *
96834 - * Return:         The zstd streaming decompression context.
96835 - */
96836 -ZSTD_DStream *ZSTD_initDStream(size_t maxWindowSize, void *workspace,
96837 -       size_t workspaceSize);
96838 -/**
96839 - * ZSTD_initDStream_usingDDict() - initialize streaming decompression context
96840 - * @maxWindowSize: The maximum window size allowed for compressed frames.
96841 - * @ddict:         The digested dictionary to use for decompression.
96842 - * @workspace:     The workspace to emplace the context into. It must outlive
96843 - *                 the returned context.
96844 - * @workspaceSize: The size of workspace.
96845 - *                 Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine
96846 - *                 how large the workspace must be.
96847 + * zstd_init_dstream() - initialize a zstd streaming decompression context
96848 + * @max_window_size: The maximum window size allowed for compressed frames.
96849 + * @workspace:       The workspace to emplace the context into. It must outlive
96850 + *                   the returned context.
96851 + * @workspaceSize:   The size of workspace.
96852 + *                   Use zstd_dstream_workspace_bound(max_window_size) to
96853 + *                   determine how large the workspace must be.
96854   *
96855 - * Return:         The zstd streaming decompression context.
96856 + * Return:           The zstd streaming decompression context.
96857   */
96858 -ZSTD_DStream *ZSTD_initDStream_usingDDict(size_t maxWindowSize,
96859 -       const ZSTD_DDict *ddict, void *workspace, size_t workspaceSize);
96860 +zstd_dstream *zstd_init_dstream(size_t max_window_size, void *workspace,
96861 +       size_t workspace_size);
96863 -/*===== Streaming decompression functions =====*/
96864  /**
96865 - * ZSTD_resetDStream() - reset the context using parameters from creation
96866 - * @zds:   The zstd streaming decompression context to reset.
96867 + * zstd_reset_dstream() - reset the context using parameters from creation
96868 + * @dstream: The zstd streaming decompression context to reset.
96869   *
96870   * Resets the context using the parameters from creation. Skips dictionary
96871   * loading, since it can be reused.
96872   *
96873 - * Return: Zero or an error, which can be checked using ZSTD_isError().
96874 + * Return:   Zero or an error, which can be checked using zstd_is_error().
96875   */
96876 -size_t ZSTD_resetDStream(ZSTD_DStream *zds);
96877 +size_t zstd_reset_dstream(zstd_dstream *dstream);
96879  /**
96880 - * ZSTD_decompressStream() - streaming decompress some of input into output
96881 - * @zds:    The zstd streaming decompression context.
96882 - * @output: Destination buffer. `output.pos` is updated to indicate how much
96883 - *          decompressed data was written.
96884 - * @input:  Source buffer. `input.pos` is updated to indicate how much data was
96885 - *          read. Note that it may not consume the entire input, in which case
96886 - *          `input.pos < input.size`, and it's up to the caller to present
96887 - *          remaining data again.
96888 + * zstd_decompress_stream() - streaming decompress some of input into output
96889 + * @dstream: The zstd streaming decompression context.
96890 + * @output:  Destination buffer. `output.pos` is updated to indicate how much
96891 + *           decompressed data was written.
96892 + * @input:   Source buffer. `input.pos` is updated to indicate how much data was
96893 + *           read. Note that it may not consume the entire input, in which case
96894 + *           `input.pos < input.size`, and it's up to the caller to present
96895 + *           remaining data again.
96896   *
96897   * The `input` and `output` buffers may be any size. Guaranteed to make some
96898   * forward progress if `input` and `output` are not empty.
96899 - * ZSTD_decompressStream() will not consume the last byte of the frame until
96900 + * zstd_decompress_stream() will not consume the last byte of the frame until
96901   * the entire frame is flushed.
96902   *
96903 - * Return:  Returns 0 iff a frame is completely decoded and fully flushed.
96904 - *          Otherwise returns a hint for the number of bytes to use as the input
96905 - *          for the next function call or an error, which can be checked using
96906 - *          ZSTD_isError(). The size hint will never load more than the frame.
96907 + * Return:   Returns 0 iff a frame is completely decoded and fully flushed.
96908 + *           Otherwise returns a hint for the number of bytes to use as the
96909 + *           input for the next function call or an error, which can be checked
96910 + *           using zstd_is_error(). The size hint will never load more than the
96911 + *           frame.
96912   */
96913 -size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output,
96914 -       ZSTD_inBuffer *input);
96915 +size_t zstd_decompress_stream(zstd_dstream *dstream, zstd_out_buffer *output,
96916 +       zstd_in_buffer *input);
96918 -/**
96919 - * ZSTD_DStreamInSize() - recommended size for the input buffer
96920 - *
96921 - * Return: The recommended size for the input buffer.
96922 - */
96923 -size_t ZSTD_DStreamInSize(void);
96924 -/**
96925 - * ZSTD_DStreamOutSize() - recommended size for the output buffer
96926 - *
96927 - * When the output buffer is at least this large, it is guaranteed to be large
96928 - * enough to flush at least one complete decompressed block.
96929 - *
96930 - * Return: The recommended size for the output buffer.
96931 - */
96932 -size_t ZSTD_DStreamOutSize(void);
96935 -/* --- Constants ---*/
96936 -#define ZSTD_MAGICNUMBER            0xFD2FB528   /* >= v0.8.0 */
96937 -#define ZSTD_MAGIC_SKIPPABLE_START  0x184D2A50U
96939 -#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
96940 -#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
96942 -#define ZSTD_WINDOWLOG_MAX_32  27
96943 -#define ZSTD_WINDOWLOG_MAX_64  27
96944 -#define ZSTD_WINDOWLOG_MAX \
96945 -       ((unsigned int)(sizeof(size_t) == 4 \
96946 -               ? ZSTD_WINDOWLOG_MAX_32 \
96947 -               : ZSTD_WINDOWLOG_MAX_64))
96948 -#define ZSTD_WINDOWLOG_MIN 10
96949 -#define ZSTD_HASHLOG_MAX ZSTD_WINDOWLOG_MAX
96950 -#define ZSTD_HASHLOG_MIN        6
96951 -#define ZSTD_CHAINLOG_MAX     (ZSTD_WINDOWLOG_MAX+1)
96952 -#define ZSTD_CHAINLOG_MIN      ZSTD_HASHLOG_MIN
96953 -#define ZSTD_HASHLOG3_MAX      17
96954 -#define ZSTD_SEARCHLOG_MAX    (ZSTD_WINDOWLOG_MAX-1)
96955 -#define ZSTD_SEARCHLOG_MIN      1
96956 -/* only for ZSTD_fast, other strategies are limited to 6 */
96957 -#define ZSTD_SEARCHLENGTH_MAX   7
96958 -/* only for ZSTD_btopt, other strategies are limited to 4 */
96959 -#define ZSTD_SEARCHLENGTH_MIN   3
96960 -#define ZSTD_TARGETLENGTH_MIN   4
96961 -#define ZSTD_TARGETLENGTH_MAX 999
96963 -/* for static allocation */
96964 -#define ZSTD_FRAMEHEADERSIZE_MAX 18
96965 -#define ZSTD_FRAMEHEADERSIZE_MIN  6
96966 -#define ZSTD_frameHeaderSize_prefix 5
96967 -#define ZSTD_frameHeaderSize_min ZSTD_FRAMEHEADERSIZE_MIN
96968 -#define ZSTD_frameHeaderSize_max ZSTD_FRAMEHEADERSIZE_MAX
96969 -/* magic number + skippable frame length */
96970 -#define ZSTD_skippableHeaderSize 8
96973 -/*-*************************************
96974 - * Compressed size functions
96975 - **************************************/
96977 -/**
96978 - * ZSTD_findFrameCompressedSize() - returns the size of a compressed frame
96979 - * @src:     Source buffer. It should point to the start of a zstd encoded frame
96980 - *           or a skippable frame.
96981 - * @srcSize: The size of the source buffer. It must be at least as large as the
96982 - *           size of the frame.
96983 - *
96984 - * Return:   The compressed size of the frame pointed to by `src` or an error,
96985 - *           which can be check with ZSTD_isError().
96986 - *           Suitable to pass to ZSTD_decompress() or similar functions.
96987 - */
96988 -size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize);
96990 -/*-*************************************
96991 - * Decompressed size functions
96992 - **************************************/
96993 -/**
96994 - * ZSTD_getFrameContentSize() - returns the content size in a zstd frame header
96995 - * @src:     It should point to the start of a zstd encoded frame.
96996 - * @srcSize: The size of the source buffer. It must be at least as large as the
96997 - *           frame header. `ZSTD_frameHeaderSize_max` is always large enough.
96998 - *
96999 - * Return:   The frame content size stored in the frame header if known.
97000 - *           `ZSTD_CONTENTSIZE_UNKNOWN` if the content size isn't stored in the
97001 - *           frame header. `ZSTD_CONTENTSIZE_ERROR` on invalid input.
97002 - */
97003 -unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
97004 +/* ======   Frame Inspection Functions ====== */
97006  /**
97007 - * ZSTD_findDecompressedSize() - returns decompressed size of a series of frames
97008 - * @src:     It should point to the start of a series of zstd encoded and/or
97009 - *           skippable frames.
97010 - * @srcSize: The exact size of the series of frames.
97011 + * zstd_find_frame_compressed_size() - returns the size of a compressed frame
97012 + * @src:      Source buffer. It should point to the start of a zstd encoded
97013 + *            frame or a skippable frame.
97014 + * @src_size: The size of the source buffer. It must be at least as large as the
97015 + *            size of the frame.
97016   *
97017 - * If any zstd encoded frame in the series doesn't have the frame content size
97018 - * set, `ZSTD_CONTENTSIZE_UNKNOWN` is returned. But frame content size is always
97019 - * set when using ZSTD_compress(). The decompressed size can be very large.
97020 - * If the source is untrusted, the decompressed size could be wrong or
97021 - * intentionally modified. Always ensure the result fits within the
97022 - * application's authorized limits. ZSTD_findDecompressedSize() handles multiple
97023 - * frames, and so it must traverse the input to read each frame header. This is
97024 - * efficient as most of the data is skipped, however it does mean that all frame
97025 - * data must be present and valid.
97026 - *
97027 - * Return:   Decompressed size of all the data contained in the frames if known.
97028 - *           `ZSTD_CONTENTSIZE_UNKNOWN` if the decompressed size is unknown.
97029 - *           `ZSTD_CONTENTSIZE_ERROR` if an error occurred.
97030 - */
97031 -unsigned long long ZSTD_findDecompressedSize(const void *src, size_t srcSize);
97033 -/*-*************************************
97034 - * Advanced compression functions
97035 - **************************************/
97036 -/**
97037 - * ZSTD_checkCParams() - ensure parameter values remain within authorized range
97038 - * @cParams: The zstd compression parameters.
97039 - *
97040 - * Return:   Zero or an error, which can be checked using ZSTD_isError().
97041 + * Return:    The compressed size of the frame pointed to by `src` or an error,
97042 + *            which can be check with zstd_is_error().
97043 + *            Suitable to pass to ZSTD_decompress() or similar functions.
97044   */
97045 -size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams);
97046 +size_t zstd_find_frame_compressed_size(const void *src, size_t src_size);
97048  /**
97049 - * ZSTD_adjustCParams() - optimize parameters for a given srcSize and dictSize
97050 - * @srcSize:  Optionally the estimated source size, or zero if unknown.
97051 - * @dictSize: Optionally the estimated dictionary size, or zero if unknown.
97052 - *
97053 - * Return:    The optimized parameters.
97054 - */
97055 -ZSTD_compressionParameters ZSTD_adjustCParams(
97056 -       ZSTD_compressionParameters cParams, unsigned long long srcSize,
97057 -       size_t dictSize);
97059 -/*--- Advanced decompression functions ---*/
97061 -/**
97062 - * ZSTD_isFrame() - returns true iff the buffer starts with a valid frame
97063 - * @buffer: The source buffer to check.
97064 - * @size:   The size of the source buffer, must be at least 4 bytes.
97065 - *
97066 - * Return: True iff the buffer starts with a zstd or skippable frame identifier.
97067 - */
97068 -unsigned int ZSTD_isFrame(const void *buffer, size_t size);
97070 -/**
97071 - * ZSTD_getDictID_fromDict() - returns the dictionary id stored in a dictionary
97072 - * @dict:     The dictionary buffer.
97073 - * @dictSize: The size of the dictionary buffer.
97074 - *
97075 - * Return:    The dictionary id stored within the dictionary or 0 if the
97076 - *            dictionary is not a zstd dictionary. If it returns 0 the
97077 - *            dictionary can still be loaded as a content-only dictionary.
97078 - */
97079 -unsigned int ZSTD_getDictID_fromDict(const void *dict, size_t dictSize);
97081 -/**
97082 - * ZSTD_getDictID_fromDDict() - returns the dictionary id stored in a ZSTD_DDict
97083 - * @ddict: The ddict to find the id of.
97084 - *
97085 - * Return: The dictionary id stored within `ddict` or 0 if the dictionary is not
97086 - *         a zstd dictionary. If it returns 0 `ddict` will be loaded as a
97087 - *         content-only dictionary.
97088 - */
97089 -unsigned int ZSTD_getDictID_fromDDict(const ZSTD_DDict *ddict);
97091 -/**
97092 - * ZSTD_getDictID_fromFrame() - returns the dictionary id stored in a zstd frame
97093 - * @src:     Source buffer. It must be a zstd encoded frame.
97094 - * @srcSize: The size of the source buffer. It must be at least as large as the
97095 - *           frame header. `ZSTD_frameHeaderSize_max` is always large enough.
97096 - *
97097 - * Return:   The dictionary id required to decompress the frame stored within
97098 - *           `src` or 0 if the dictionary id could not be decoded. It can return
97099 - *           0 if the frame does not require a dictionary, the dictionary id
97100 - *           wasn't stored in the frame, `src` is not a zstd frame, or `srcSize`
97101 - *           is too small.
97102 - */
97103 -unsigned int ZSTD_getDictID_fromFrame(const void *src, size_t srcSize);
97105 -/**
97106 - * struct ZSTD_frameParams - zstd frame parameters stored in the frame header
97107 - * @frameContentSize: The frame content size, or 0 if not present.
97108 + * struct zstd_frame_params - zstd frame parameters stored in the frame header
97109 + * @frameContentSize: The frame content size, or ZSTD_CONTENTSIZE_UNKNOWN if not
97110 + *                    present.
97111   * @windowSize:       The window size, or 0 if the frame is a skippable frame.
97112 + * @blockSizeMax:     The maximum block size.
97113 + * @frameType:        The frame type (zstd or skippable)
97114 + * @headerSize:       The size of the frame header.
97115   * @dictID:           The dictionary id, or 0 if not present.
97116   * @checksumFlag:     Whether a checksum was used.
97117 + *
97118 + * See zstd_lib.h.
97119   */
97120 -typedef struct {
97121 -       unsigned long long frameContentSize;
97122 -       unsigned int windowSize;
97123 -       unsigned int dictID;
97124 -       unsigned int checksumFlag;
97125 -} ZSTD_frameParams;
97126 +typedef ZSTD_frameHeader zstd_frame_header;
97128  /**
97129 - * ZSTD_getFrameParams() - extracts parameters from a zstd or skippable frame
97130 - * @fparamsPtr: On success the frame parameters are written here.
97131 - * @src:        The source buffer. It must point to a zstd or skippable frame.
97132 - * @srcSize:    The size of the source buffer. `ZSTD_frameHeaderSize_max` is
97133 - *              always large enough to succeed.
97134 + * zstd_get_frame_header() - extracts parameters from a zstd or skippable frame
97135 + * @params:   On success the frame parameters are written here.
97136 + * @src:      The source buffer. It must point to a zstd or skippable frame.
97137 + * @src_size: The size of the source buffer.
97138   *
97139 - * Return:      0 on success. If more data is required it returns how many bytes
97140 - *              must be provided to make forward progress. Otherwise it returns
97141 - *              an error, which can be checked using ZSTD_isError().
97142 + * Return:    0 on success. If more data is required it returns how many bytes
97143 + *            must be provided to make forward progress. Otherwise it returns
97144 + *            an error, which can be checked using zstd_is_error().
97145   */
97146 -size_t ZSTD_getFrameParams(ZSTD_frameParams *fparamsPtr, const void *src,
97147 -       size_t srcSize);
97149 -/*-*****************************************************************************
97150 - * Buffer-less and synchronous inner streaming functions
97151 - *
97152 - * This is an advanced API, giving full control over buffer management, for
97153 - * users which need direct control over memory.
97154 - * But it's also a complex one, with many restrictions (documented below).
97155 - * Prefer using normal streaming API for an easier experience
97156 - ******************************************************************************/
97158 -/*-*****************************************************************************
97159 - * Buffer-less streaming compression (synchronous mode)
97160 - *
97161 - * A ZSTD_CCtx object is required to track streaming operations.
97162 - * Use ZSTD_initCCtx() to initialize a context.
97163 - * ZSTD_CCtx object can be re-used multiple times within successive compression
97164 - * operations.
97165 - *
97166 - * Start by initializing a context.
97167 - * Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary
97168 - * compression,
97169 - * or ZSTD_compressBegin_advanced(), for finer parameter control.
97170 - * It's also possible to duplicate a reference context which has already been
97171 - * initialized, using ZSTD_copyCCtx()
97172 - *
97173 - * Then, consume your input using ZSTD_compressContinue().
97174 - * There are some important considerations to keep in mind when using this
97175 - * advanced function :
97176 - * - ZSTD_compressContinue() has no internal buffer. It uses externally provided
97177 - *   buffer only.
97178 - * - Interface is synchronous : input is consumed entirely and produce 1+
97179 - *   (or more) compressed blocks.
97180 - * - Caller must ensure there is enough space in `dst` to store compressed data
97181 - *   under worst case scenario. Worst case evaluation is provided by
97182 - *   ZSTD_compressBound().
97183 - *   ZSTD_compressContinue() doesn't guarantee recover after a failed
97184 - *   compression.
97185 - * - ZSTD_compressContinue() presumes prior input ***is still accessible and
97186 - *   unmodified*** (up to maximum distance size, see WindowLog).
97187 - *   It remembers all previous contiguous blocks, plus one separated memory
97188 - *   segment (which can itself consists of multiple contiguous blocks)
97189 - * - ZSTD_compressContinue() detects that prior input has been overwritten when
97190 - *   `src` buffer overlaps. In which case, it will "discard" the relevant memory
97191 - *   section from its history.
97192 - *
97193 - * Finish a frame with ZSTD_compressEnd(), which will write the last block(s)
97194 - * and optional checksum. It's possible to use srcSize==0, in which case, it
97195 - * will write a final empty block to end the frame. Without last block mark,
97196 - * frames will be considered unfinished (corrupted) by decoders.
97197 - *
97198 - * `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress some new
97199 - * frame.
97200 - ******************************************************************************/
97202 -/*=====   Buffer-less streaming compression functions  =====*/
97203 -size_t ZSTD_compressBegin(ZSTD_CCtx *cctx, int compressionLevel);
97204 -size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx *cctx, const void *dict,
97205 -       size_t dictSize, int compressionLevel);
97206 -size_t ZSTD_compressBegin_advanced(ZSTD_CCtx *cctx, const void *dict,
97207 -       size_t dictSize, ZSTD_parameters params,
97208 -       unsigned long long pledgedSrcSize);
97209 -size_t ZSTD_copyCCtx(ZSTD_CCtx *cctx, const ZSTD_CCtx *preparedCCtx,
97210 -       unsigned long long pledgedSrcSize);
97211 -size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx *cctx, const ZSTD_CDict *cdict,
97212 -       unsigned long long pledgedSrcSize);
97213 -size_t ZSTD_compressContinue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
97214 -       const void *src, size_t srcSize);
97215 -size_t ZSTD_compressEnd(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
97216 -       const void *src, size_t srcSize);
97220 -/*-*****************************************************************************
97221 - * Buffer-less streaming decompression (synchronous mode)
97222 - *
97223 - * A ZSTD_DCtx object is required to track streaming operations.
97224 - * Use ZSTD_initDCtx() to initialize a context.
97225 - * A ZSTD_DCtx object can be re-used multiple times.
97226 - *
97227 - * First typical operation is to retrieve frame parameters, using
97228 - * ZSTD_getFrameParams(). It fills a ZSTD_frameParams structure which provide
97229 - * important information to correctly decode the frame, such as the minimum
97230 - * rolling buffer size to allocate to decompress data (`windowSize`), and the
97231 - * dictionary ID used.
97232 - * Note: content size is optional, it may not be present. 0 means unknown.
97233 - * Note that these values could be wrong, either because of data malformation,
97234 - * or because an attacker is spoofing deliberate false information. As a
97235 - * consequence, check that values remain within valid application range,
97236 - * especially `windowSize`, before allocation. Each application can set its own
97237 - * limit, depending on local restrictions. For extended interoperability, it is
97238 - * recommended to support at least 8 MB.
97239 - * Frame parameters are extracted from the beginning of the compressed frame.
97240 - * Data fragment must be large enough to ensure successful decoding, typically
97241 - * `ZSTD_frameHeaderSize_max` bytes.
97242 - * Result: 0: successful decoding, the `ZSTD_frameParams` structure is filled.
97243 - *        >0: `srcSize` is too small, provide at least this many bytes.
97244 - *        errorCode, which can be tested using ZSTD_isError().
97245 - *
97246 - * Start decompression, with ZSTD_decompressBegin() or
97247 - * ZSTD_decompressBegin_usingDict(). Alternatively, you can copy a prepared
97248 - * context, using ZSTD_copyDCtx().
97249 - *
97250 - * Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue()
97251 - * alternatively.
97252 - * ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize'
97253 - * to ZSTD_decompressContinue().
97254 - * ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will
97255 - * fail.
97256 - *
97257 - * The result of ZSTD_decompressContinue() is the number of bytes regenerated
97258 - * within 'dst' (necessarily <= dstCapacity). It can be zero, which is not an
97259 - * error; it just means ZSTD_decompressContinue() has decoded some metadata
97260 - * item. It can also be an error code, which can be tested with ZSTD_isError().
97261 - *
97262 - * ZSTD_decompressContinue() needs previous data blocks during decompression, up
97263 - * to `windowSize`. They should preferably be located contiguously, prior to
97264 - * current block. Alternatively, a round buffer of sufficient size is also
97265 - * possible. Sufficient size is determined by frame parameters.
97266 - * ZSTD_decompressContinue() is very sensitive to contiguity, if 2 blocks don't
97267 - * follow each other, make sure that either the compressor breaks contiguity at
97268 - * the same place, or that previous contiguous segment is large enough to
97269 - * properly handle maximum back-reference.
97270 - *
97271 - * A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
97272 - * Context can then be reset to start a new decompression.
97273 - *
97274 - * Note: it's possible to know if next input to present is a header or a block,
97275 - * using ZSTD_nextInputType(). This information is not required to properly
97276 - * decode a frame.
97277 - *
97278 - * == Special case: skippable frames ==
97279 - *
97280 - * Skippable frames allow integration of user-defined data into a flow of
97281 - * concatenated frames. Skippable frames will be ignored (skipped) by a
97282 - * decompressor. The format of skippable frames is as follows:
97283 - * a) Skippable frame ID - 4 Bytes, Little endian format, any value from
97284 - *    0x184D2A50 to 0x184D2A5F
97285 - * b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
97286 - * c) Frame Content - any content (User Data) of length equal to Frame Size
97287 - * For skippable frames ZSTD_decompressContinue() always returns 0.
97288 - * For skippable frames ZSTD_getFrameParams() returns fparamsPtr->windowLog==0
97289 - * what means that a frame is skippable.
97290 - * Note: If fparamsPtr->frameContentSize==0, it is ambiguous: the frame might
97291 - *       actually be a zstd encoded frame with no content. For purposes of
97292 - *       decompression, it is valid in both cases to skip the frame using
97293 - *       ZSTD_findFrameCompressedSize() to find its size in bytes.
97294 - * It also returns frame size as fparamsPtr->frameContentSize.
97295 - ******************************************************************************/
97297 -/*=====   Buffer-less streaming decompression functions  =====*/
97298 -size_t ZSTD_decompressBegin(ZSTD_DCtx *dctx);
97299 -size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx *dctx, const void *dict,
97300 -       size_t dictSize);
97301 -void   ZSTD_copyDCtx(ZSTD_DCtx *dctx, const ZSTD_DCtx *preparedDCtx);
97302 -size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx *dctx);
97303 -size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity,
97304 -       const void *src, size_t srcSize);
97305 -typedef enum {
97306 -       ZSTDnit_frameHeader,
97307 -       ZSTDnit_blockHeader,
97308 -       ZSTDnit_block,
97309 -       ZSTDnit_lastBlock,
97310 -       ZSTDnit_checksum,
97311 -       ZSTDnit_skippableFrame
97312 -} ZSTD_nextInputType_e;
97313 -ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx *dctx);
97315 -/*-*****************************************************************************
97316 - * Block functions
97317 - *
97318 - * Block functions produce and decode raw zstd blocks, without frame metadata.
97319 - * Frame metadata cost is typically ~18 bytes, which can be non-negligible for
97320 - * very small blocks (< 100 bytes). User will have to take in charge required
97321 - * information to regenerate data, such as compressed and content sizes.
97322 - *
97323 - * A few rules to respect:
97324 - * - Compressing and decompressing require a context structure
97325 - *   + Use ZSTD_initCCtx() and ZSTD_initDCtx()
97326 - * - It is necessary to init context before starting
97327 - *   + compression : ZSTD_compressBegin()
97328 - *   + decompression : ZSTD_decompressBegin()
97329 - *   + variants _usingDict() are also allowed
97330 - *   + copyCCtx() and copyDCtx() work too
97331 - * - Block size is limited, it must be <= ZSTD_getBlockSizeMax()
97332 - *   + If you need to compress more, cut data into multiple blocks
97333 - *   + Consider using the regular ZSTD_compress() instead, as frame metadata
97334 - *     costs become negligible when source size is large.
97335 - * - When a block is considered not compressible enough, ZSTD_compressBlock()
97336 - *   result will be zero. In which case, nothing is produced into `dst`.
97337 - *   + User must test for such outcome and deal directly with uncompressed data
97338 - *   + ZSTD_decompressBlock() doesn't accept uncompressed data as input!!!
97339 - *   + In case of multiple successive blocks, decoder must be informed of
97340 - *     uncompressed block existence to follow proper history. Use
97341 - *     ZSTD_insertBlock() in such a case.
97342 - ******************************************************************************/
97344 -/* Define for static allocation */
97345 -#define ZSTD_BLOCKSIZE_ABSOLUTEMAX (128 * 1024)
97346 -/*=====   Raw zstd block functions  =====*/
97347 -size_t ZSTD_getBlockSizeMax(ZSTD_CCtx *cctx);
97348 -size_t ZSTD_compressBlock(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity,
97349 -       const void *src, size_t srcSize);
97350 -size_t ZSTD_decompressBlock(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity,
97351 -       const void *src, size_t srcSize);
97352 -size_t ZSTD_insertBlock(ZSTD_DCtx *dctx, const void *blockStart,
97353 -       size_t blockSize);
97354 +size_t zstd_get_frame_header(zstd_frame_header *params, const void *src,
97355 +       size_t src_size);
97357 -#endif  /* ZSTD_H */
97358 +#endif  /* LINUX_ZSTD_H */
97359 diff --git a/include/linux/zstd_errors.h b/include/linux/zstd_errors.h
97360 new file mode 100644
97361 index 000000000000..ccb92064ef03
97362 --- /dev/null
97363 +++ b/include/linux/zstd_errors.h
97364 @@ -0,0 +1,77 @@
97366 + * Copyright (c) Yann Collet, Facebook, Inc.
97367 + * All rights reserved.
97368 + *
97369 + * This source code is licensed under both the BSD-style license (found in the
97370 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
97371 + * in the COPYING file in the root directory of this source tree).
97372 + * You may select, at your option, one of the above-listed licenses.
97373 + */
97375 +#ifndef ZSTD_ERRORS_H_398273423
97376 +#define ZSTD_ERRORS_H_398273423
97379 +/*===== dependency =====*/
97380 +#include <linux/types.h>   /* size_t */
97383 +/* =====   ZSTDERRORLIB_API : control library symbols visibility   ===== */
97384 +#define ZSTDERRORLIB_VISIBILITY
97385 +#define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY
97387 +/*-*********************************************
97388 + *  Error codes list
97389 + *-*********************************************
97390 + *  Error codes _values_ are pinned down since v1.3.1 only.
97391 + *  Therefore, don't rely on values if you may link to any version < v1.3.1.
97392 + *
97393 + *  Only values < 100 are considered stable.
97394 + *
97395 + *  note 1 : this API shall be used with static linking only.
97396 + *           dynamic linking is not yet officially supported.
97397 + *  note 2 : Prefer relying on the enum than on its value whenever possible
97398 + *           This is the only supported way to use the error list < v1.3.1
97399 + *  note 3 : ZSTD_isError() is always correct, whatever the library version.
97400 + **********************************************/
97401 +typedef enum {
97402 +  ZSTD_error_no_error = 0,
97403 +  ZSTD_error_GENERIC  = 1,
97404 +  ZSTD_error_prefix_unknown                = 10,
97405 +  ZSTD_error_version_unsupported           = 12,
97406 +  ZSTD_error_frameParameter_unsupported    = 14,
97407 +  ZSTD_error_frameParameter_windowTooLarge = 16,
97408 +  ZSTD_error_corruption_detected = 20,
97409 +  ZSTD_error_checksum_wrong      = 22,
97410 +  ZSTD_error_dictionary_corrupted      = 30,
97411 +  ZSTD_error_dictionary_wrong          = 32,
97412 +  ZSTD_error_dictionaryCreation_failed = 34,
97413 +  ZSTD_error_parameter_unsupported   = 40,
97414 +  ZSTD_error_parameter_outOfBound    = 42,
97415 +  ZSTD_error_tableLog_tooLarge       = 44,
97416 +  ZSTD_error_maxSymbolValue_tooLarge = 46,
97417 +  ZSTD_error_maxSymbolValue_tooSmall = 48,
97418 +  ZSTD_error_stage_wrong       = 60,
97419 +  ZSTD_error_init_missing      = 62,
97420 +  ZSTD_error_memory_allocation = 64,
97421 +  ZSTD_error_workSpace_tooSmall= 66,
97422 +  ZSTD_error_dstSize_tooSmall = 70,
97423 +  ZSTD_error_srcSize_wrong    = 72,
97424 +  ZSTD_error_dstBuffer_null   = 74,
97425 +  /* following error codes are __NOT STABLE__, they can be removed or changed in future versions */
97426 +  ZSTD_error_frameIndex_tooLarge = 100,
97427 +  ZSTD_error_seekableIO          = 102,
97428 +  ZSTD_error_dstBuffer_wrong     = 104,
97429 +  ZSTD_error_srcBuffer_wrong     = 105,
97430 +  ZSTD_error_maxCode = 120  /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */
97431 +} ZSTD_ErrorCode;
97433 +/*! ZSTD_getErrorCode() :
97434 +    convert a `size_t` function result into a `ZSTD_ErrorCode` enum type,
97435 +    which can be used to compare with enum list published above */
97436 +ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult);
97437 +ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code);   /**< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */
97441 +#endif /* ZSTD_ERRORS_H_398273423 */
97442 diff --git a/include/linux/zstd_lib.h b/include/linux/zstd_lib.h
97443 new file mode 100644
97444 index 000000000000..d81779076217
97445 --- /dev/null
97446 +++ b/include/linux/zstd_lib.h
97447 @@ -0,0 +1,2431 @@
97449 + * Copyright (c) Yann Collet, Facebook, Inc.
97450 + * All rights reserved.
97451 + *
97452 + * This source code is licensed under both the BSD-style license (found in the
97453 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
97454 + * in the COPYING file in the root directory of this source tree).
97455 + * You may select, at your option, one of the above-listed licenses.
97456 + */
97458 +#ifndef ZSTD_H_235446
97459 +#define ZSTD_H_235446
97461 +/* ======   Dependency   ======*/
97462 +#include <linux/limits.h>   /* INT_MAX */
97463 +#include <linux/types.h>   /* size_t */
97466 +/* =====   ZSTDLIB_API : control library symbols visibility   ===== */
97467 +#define ZSTDLIB_VISIBILITY
97468 +#define ZSTDLIB_API ZSTDLIB_VISIBILITY
97471 +/*******************************************************************************
97472 +  Introduction
97474 +  zstd, short for Zstandard, is a fast lossless compression algorithm, targeting
97475 +  real-time compression scenarios at zlib-level and better compression ratios.
97476 +  The zstd compression library provides in-memory compression and decompression
97477 +  functions.
97479 +  The library supports regular compression levels from 1 up to ZSTD_maxCLevel(),
97480 +  which is currently 22. Levels >= 20, labeled `--ultra`, should be used with
97481 +  caution, as they require more memory. The library also offers negative
97482 +  compression levels, which extend the range of speed vs. ratio preferences.
97483 +  The lower the level, the faster the speed (at the cost of compression).
97485 +  Compression can be done in:
97486 +    - a single step (described as Simple API)
97487 +    - a single step, reusing a context (described as Explicit context)
97488 +    - unbounded multiple steps (described as Streaming compression)
97490 +  The compression ratio achievable on small data can be highly improved using
97491 +  a dictionary. Dictionary compression can be performed in:
97492 +    - a single step (described as Simple dictionary API)
97493 +    - a single step, reusing a dictionary (described as Bulk-processing
97494 +      dictionary API)
97496 +  Advanced experimental functions can be accessed using
97497 +  `#define ZSTD_STATIC_LINKING_ONLY` before including zstd.h.
97499 +  Advanced experimental APIs should never be used with a dynamically-linked
97500 +  library. They are not "stable"; their definitions or signatures may change in
97501 +  the future. Only static linking is allowed.
97502 +*******************************************************************************/
97504 +/*------   Version   ------*/
97505 +#define ZSTD_VERSION_MAJOR    1
97506 +#define ZSTD_VERSION_MINOR    4
97507 +#define ZSTD_VERSION_RELEASE  10
97508 +#define ZSTD_VERSION_NUMBER  (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
97510 +/*! ZSTD_versionNumber() :
97511 + *  Return runtime library version, the value is (MAJOR*100*100 + MINOR*100 + RELEASE). */
97512 +ZSTDLIB_API unsigned ZSTD_versionNumber(void);
97514 +#define ZSTD_LIB_VERSION ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE
97515 +#define ZSTD_QUOTE(str) #str
97516 +#define ZSTD_EXPAND_AND_QUOTE(str) ZSTD_QUOTE(str)
97517 +#define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION)
97519 +/*! ZSTD_versionString() :
97520 + *  Return runtime library version, like "1.4.5". Requires v1.3.0+. */
97521 +ZSTDLIB_API const char* ZSTD_versionString(void);
97523 +/* *************************************
97524 + *  Default constant
97525 + ***************************************/
97526 +#ifndef ZSTD_CLEVEL_DEFAULT
97527 +#  define ZSTD_CLEVEL_DEFAULT 3
97528 +#endif
97530 +/* *************************************
97531 + *  Constants
97532 + ***************************************/
97534 +/* All magic numbers are supposed read/written to/from files/memory using little-endian convention */
97535 +#define ZSTD_MAGICNUMBER            0xFD2FB528    /* valid since v0.8.0 */
97536 +#define ZSTD_MAGIC_DICTIONARY       0xEC30A437    /* valid since v0.7.0 */
97537 +#define ZSTD_MAGIC_SKIPPABLE_START  0x184D2A50    /* all 16 values, from 0x184D2A50 to 0x184D2A5F, signal the beginning of a skippable frame */
97538 +#define ZSTD_MAGIC_SKIPPABLE_MASK   0xFFFFFFF0
97540 +#define ZSTD_BLOCKSIZELOG_MAX  17
97541 +#define ZSTD_BLOCKSIZE_MAX     (1<<ZSTD_BLOCKSIZELOG_MAX)
97545 +/***************************************
97546 +*  Simple API
97547 +***************************************/
97548 +/*! ZSTD_compress() :
97549 + *  Compresses `src` content as a single zstd compressed frame into already allocated `dst`.
97550 + *  Hint : compression runs faster if `dstCapacity` >=  `ZSTD_compressBound(srcSize)`.
97551 + *  @return : compressed size written into `dst` (<= `dstCapacity),
97552 + *            or an error code if it fails (which can be tested using ZSTD_isError()). */
97553 +ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity,
97554 +                            const void* src, size_t srcSize,
97555 +                                  int compressionLevel);
97557 +/*! ZSTD_decompress() :
97558 + *  `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.
97559 + *  `dstCapacity` is an upper bound of originalSize to regenerate.
97560 + *  If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data.
97561 + *  @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
97562 + *            or an errorCode if it fails (which can be tested using ZSTD_isError()). */
97563 +ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity,
97564 +                              const void* src, size_t compressedSize);
97566 +/*! ZSTD_getFrameContentSize() : requires v1.3.0+
97567 + *  `src` should point to the start of a ZSTD encoded frame.
97568 + *  `srcSize` must be at least as large as the frame header.
97569 + *            hint : any size >= `ZSTD_frameHeaderSize_max` is large enough.
97570 + *  @return : - decompressed size of `src` frame content, if known
97571 + *            - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
97572 + *            - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)
97573 + *   note 1 : a 0 return value means the frame is valid but "empty".
97574 + *   note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode.
97575 + *            When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
97576 + *            In which case, it's necessary to use streaming mode to decompress data.
97577 + *            Optionally, application can rely on some implicit limit,
97578 + *            as ZSTD_decompress() only needs an upper bound of decompressed size.
97579 + *            (For example, data could be necessarily cut into blocks <= 16 KB).
97580 + *   note 3 : decompressed size is always present when compression is completed using single-pass functions,
97581 + *            such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict().
97582 + *   note 4 : decompressed size can be very large (64-bits value),
97583 + *            potentially larger than what local system can handle as a single memory segment.
97584 + *            In which case, it's necessary to use streaming mode to decompress data.
97585 + *   note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified.
97586 + *            Always ensure return value fits within application's authorized limits.
97587 + *            Each application can set its own limits.
97588 + *   note 6 : This function replaces ZSTD_getDecompressedSize() */
97589 +#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
97590 +#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
97591 +ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
97593 +/*! ZSTD_getDecompressedSize() :
97594 + *  NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize().
97595 + *  Both functions work the same way, but ZSTD_getDecompressedSize() blends
97596 + *  "empty", "unknown" and "error" results to the same return value (0),
97597 + *  while ZSTD_getFrameContentSize() gives them separate return values.
97598 + * @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */
97599 +ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
97601 +/*! ZSTD_findFrameCompressedSize() :
97602 + * `src` should point to the start of a ZSTD frame or skippable frame.
97603 + * `srcSize` must be >= first frame size
97604 + * @return : the compressed size of the first frame starting at `src`,
97605 + *           suitable to pass as `srcSize` to `ZSTD_decompress` or similar,
97606 + *        or an error code if input is invalid */
97607 +ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize);
97610 +/*======  Helper functions  ======*/
97611 +#define ZSTD_COMPRESSBOUND(srcSize)   ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0))  /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
97612 +ZSTDLIB_API size_t      ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
97613 +ZSTDLIB_API unsigned    ZSTD_isError(size_t code);          /*!< tells if a `size_t` function result is an error code */
97614 +ZSTDLIB_API const char* ZSTD_getErrorName(size_t code);     /*!< provides readable string from an error code */
97615 +ZSTDLIB_API int         ZSTD_minCLevel(void);               /*!< minimum negative compression level allowed */
97616 +ZSTDLIB_API int         ZSTD_maxCLevel(void);               /*!< maximum compression level available */
97619 +/***************************************
97620 +*  Explicit context
97621 +***************************************/
97622 +/*= Compression context
97623 + *  When compressing many times,
97624 + *  it is recommended to allocate a context just once,
97625 + *  and re-use it for each successive compression operation.
97626 + *  This will make workload friendlier for system's memory.
97627 + *  Note : re-using context is just a speed / resource optimization.
97628 + *         It doesn't change the compression ratio, which remains identical.
97629 + *  Note 2 : In multi-threaded environments,
97630 + *         use one different context per thread for parallel execution.
97631 + */
97632 +typedef struct ZSTD_CCtx_s ZSTD_CCtx;
97633 +ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void);
97634 +ZSTDLIB_API size_t     ZSTD_freeCCtx(ZSTD_CCtx* cctx);  /* accept NULL pointer */
97636 +/*! ZSTD_compressCCtx() :
97637 + *  Same as ZSTD_compress(), using an explicit ZSTD_CCtx.
97638 + *  Important : in order to behave similarly to `ZSTD_compress()`,
97639 + *  this function compresses at requested compression level,
97640 + *  __ignoring any other parameter__ .
97641 + *  If any advanced parameter was set using the advanced API,
97642 + *  they will all be reset. Only `compressionLevel` remains.
97643 + */
97644 +ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
97645 +                                     void* dst, size_t dstCapacity,
97646 +                               const void* src, size_t srcSize,
97647 +                                     int compressionLevel);
97649 +/*= Decompression context
97650 + *  When decompressing many times,
97651 + *  it is recommended to allocate a context only once,
97652 + *  and re-use it for each successive compression operation.
97653 + *  This will make workload friendlier for system's memory.
97654 + *  Use one context per thread for parallel execution. */
97655 +typedef struct ZSTD_DCtx_s ZSTD_DCtx;
97656 +ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx(void);
97657 +ZSTDLIB_API size_t     ZSTD_freeDCtx(ZSTD_DCtx* dctx);  /* accept NULL pointer */
97659 +/*! ZSTD_decompressDCtx() :
97660 + *  Same as ZSTD_decompress(),
97661 + *  requires an allocated ZSTD_DCtx.
97662 + *  Compatible with sticky parameters.
97663 + */
97664 +ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx,
97665 +                                       void* dst, size_t dstCapacity,
97666 +                                 const void* src, size_t srcSize);
97669 +/***************************************
97670 +*  Advanced compression API
97671 +***************************************/
97673 +/* API design :
97674 + *   Parameters are pushed one by one into an existing context,
97675 + *   using ZSTD_CCtx_set*() functions.
97676 + *   Pushed parameters are sticky : they are valid for next compressed frame, and any subsequent frame.
97677 + *   "sticky" parameters are applicable to `ZSTD_compress2()` and `ZSTD_compressStream*()` !
97678 + *   __They do not apply to "simple" one-shot variants such as ZSTD_compressCCtx()__ .
97679 + *
97680 + *   It's possible to reset all parameters to "default" using ZSTD_CCtx_reset().
97681 + *
97682 + *   This API supercedes all other "advanced" API entry points in the experimental section.
97683 + *   In the future, we expect to remove from experimental API entry points which are redundant with this API.
97684 + */
97687 +/* Compression strategies, listed from fastest to strongest */
97688 +typedef enum { ZSTD_fast=1,
97689 +               ZSTD_dfast=2,
97690 +               ZSTD_greedy=3,
97691 +               ZSTD_lazy=4,
97692 +               ZSTD_lazy2=5,
97693 +               ZSTD_btlazy2=6,
97694 +               ZSTD_btopt=7,
97695 +               ZSTD_btultra=8,
97696 +               ZSTD_btultra2=9
97697 +               /* note : new strategies _might_ be added in the future.
97698 +                         Only the order (from fast to strong) is guaranteed */
97699 +} ZSTD_strategy;
97702 +typedef enum {
97704 +    /* compression parameters
97705 +     * Note: When compressing with a ZSTD_CDict these parameters are superseded
97706 +     * by the parameters used to construct the ZSTD_CDict.
97707 +     * See ZSTD_CCtx_refCDict() for more info (superseded-by-cdict). */
97708 +    ZSTD_c_compressionLevel=100, /* Set compression parameters according to pre-defined cLevel table.
97709 +                              * Note that exact compression parameters are dynamically determined,
97710 +                              * depending on both compression level and srcSize (when known).
97711 +                              * Default level is ZSTD_CLEVEL_DEFAULT==3.
97712 +                              * Special: value 0 means default, which is controlled by ZSTD_CLEVEL_DEFAULT.
97713 +                              * Note 1 : it's possible to pass a negative compression level.
97714 +                              * Note 2 : setting a level does not automatically set all other compression parameters
97715 +                              *   to default. Setting this will however eventually dynamically impact the compression
97716 +                              *   parameters which have not been manually set. The manually set
97717 +                              *   ones will 'stick'. */
97718 +    /* Advanced compression parameters :
97719 +     * It's possible to pin down compression parameters to some specific values.
97720 +     * In which case, these values are no longer dynamically selected by the compressor */
97721 +    ZSTD_c_windowLog=101,    /* Maximum allowed back-reference distance, expressed as power of 2.
97722 +                              * This will set a memory budget for streaming decompression,
97723 +                              * with larger values requiring more memory
97724 +                              * and typically compressing more.
97725 +                              * Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX.
97726 +                              * Special: value 0 means "use default windowLog".
97727 +                              * Note: Using a windowLog greater than ZSTD_WINDOWLOG_LIMIT_DEFAULT
97728 +                              *       requires explicitly allowing such size at streaming decompression stage. */
97729 +    ZSTD_c_hashLog=102,      /* Size of the initial probe table, as a power of 2.
97730 +                              * Resulting memory usage is (1 << (hashLog+2)).
97731 +                              * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX.
97732 +                              * Larger tables improve compression ratio of strategies <= dFast,
97733 +                              * and improve speed of strategies > dFast.
97734 +                              * Special: value 0 means "use default hashLog". */
97735 +    ZSTD_c_chainLog=103,     /* Size of the multi-probe search table, as a power of 2.
97736 +                              * Resulting memory usage is (1 << (chainLog+2)).
97737 +                              * Must be clamped between ZSTD_CHAINLOG_MIN and ZSTD_CHAINLOG_MAX.
97738 +                              * Larger tables result in better and slower compression.
97739 +                              * This parameter is useless for "fast" strategy.
97740 +                              * It's still useful when using "dfast" strategy,
97741 +                              * in which case it defines a secondary probe table.
97742 +                              * Special: value 0 means "use default chainLog". */
97743 +    ZSTD_c_searchLog=104,    /* Number of search attempts, as a power of 2.
97744 +                              * More attempts result in better and slower compression.
97745 +                              * This parameter is useless for "fast" and "dFast" strategies.
97746 +                              * Special: value 0 means "use default searchLog". */
97747 +    ZSTD_c_minMatch=105,     /* Minimum size of searched matches.
97748 +                              * Note that Zstandard can still find matches of smaller size,
97749 +                              * it just tweaks its search algorithm to look for this size and larger.
97750 +                              * Larger values increase compression and decompression speed, but decrease ratio.
97751 +                              * Must be clamped between ZSTD_MINMATCH_MIN and ZSTD_MINMATCH_MAX.
97752 +                              * Note that currently, for all strategies < btopt, effective minimum is 4.
97753 +                              *                    , for all strategies > fast, effective maximum is 6.
97754 +                              * Special: value 0 means "use default minMatchLength". */
97755 +    ZSTD_c_targetLength=106, /* Impact of this field depends on strategy.
97756 +                              * For strategies btopt, btultra & btultra2:
97757 +                              *     Length of Match considered "good enough" to stop search.
97758 +                              *     Larger values make compression stronger, and slower.
97759 +                              * For strategy fast:
97760 +                              *     Distance between match sampling.
97761 +                              *     Larger values make compression faster, and weaker.
97762 +                              * Special: value 0 means "use default targetLength". */
97763 +    ZSTD_c_strategy=107,     /* See ZSTD_strategy enum definition.
97764 +                              * The higher the value of selected strategy, the more complex it is,
97765 +                              * resulting in stronger and slower compression.
97766 +                              * Special: value 0 means "use default strategy". */
97768 +    /* LDM mode parameters */
97769 +    ZSTD_c_enableLongDistanceMatching=160, /* Enable long distance matching.
97770 +                                     * This parameter is designed to improve compression ratio
97771 +                                     * for large inputs, by finding large matches at long distance.
97772 +                                     * It increases memory usage and window size.
97773 +                                     * Note: enabling this parameter increases default ZSTD_c_windowLog to 128 MB
97774 +                                     * except when expressly set to a different value.
97775 +                                     * Note: will be enabled by default if ZSTD_c_windowLog >= 128 MB and
97776 +                                     * compression strategy >= ZSTD_btopt (== compression level 16+) */
97777 +    ZSTD_c_ldmHashLog=161,   /* Size of the table for long distance matching, as a power of 2.
97778 +                              * Larger values increase memory usage and compression ratio,
97779 +                              * but decrease compression speed.
97780 +                              * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX
97781 +                              * default: windowlog - 7.
97782 +                              * Special: value 0 means "automatically determine hashlog". */
97783 +    ZSTD_c_ldmMinMatch=162,  /* Minimum match size for long distance matcher.
97784 +                              * Larger/too small values usually decrease compression ratio.
97785 +                              * Must be clamped between ZSTD_LDM_MINMATCH_MIN and ZSTD_LDM_MINMATCH_MAX.
97786 +                              * Special: value 0 means "use default value" (default: 64). */
97787 +    ZSTD_c_ldmBucketSizeLog=163, /* Log size of each bucket in the LDM hash table for collision resolution.
97788 +                              * Larger values improve collision resolution but decrease compression speed.
97789 +                              * The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX.
97790 +                              * Special: value 0 means "use default value" (default: 3). */
97791 +    ZSTD_c_ldmHashRateLog=164, /* Frequency of inserting/looking up entries into the LDM hash table.
97792 +                              * Must be clamped between 0 and (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN).
97793 +                              * Default is MAX(0, (windowLog - ldmHashLog)), optimizing hash table usage.
97794 +                              * Larger values improve compression speed.
97795 +                              * Deviating far from default value will likely result in a compression ratio decrease.
97796 +                              * Special: value 0 means "automatically determine hashRateLog". */
97798 +    /* frame parameters */
97799 +    ZSTD_c_contentSizeFlag=200, /* Content size will be written into frame header _whenever known_ (default:1)
97800 +                              * Content size must be known at the beginning of compression.
97801 +                              * This is automatically the case when using ZSTD_compress2(),
97802 +                              * For streaming scenarios, content size must be provided with ZSTD_CCtx_setPledgedSrcSize() */
97803 +    ZSTD_c_checksumFlag=201, /* A 32-bits checksum of content is written at end of frame (default:0) */
97804 +    ZSTD_c_dictIDFlag=202,   /* When applicable, dictionary's ID is written into frame header (default:1) */
97806 +    /* multi-threading parameters */
97807 +    /* These parameters are only active if multi-threading is enabled (compiled with build macro ZSTD_MULTITHREAD).
97808 +     * Otherwise, trying to set any other value than default (0) will be a no-op and return an error.
97809 +     * In a situation where it's unknown if the linked library supports multi-threading or not,
97810 +     * setting ZSTD_c_nbWorkers to any value >= 1 and consulting the return value provides a quick way to check this property.
97811 +     */
97812 +    ZSTD_c_nbWorkers=400,    /* Select how many threads will be spawned to compress in parallel.
97813 +                              * When nbWorkers >= 1, triggers asynchronous mode when invoking ZSTD_compressStream*() :
97814 +                              * ZSTD_compressStream*() consumes input and flush output if possible, but immediately gives back control to caller,
97815 +                              * while compression is performed in parallel, within worker thread(s).
97816 +                              * (note : a strong exception to this rule is when first invocation of ZSTD_compressStream2() sets ZSTD_e_end :
97817 +                              *  in which case, ZSTD_compressStream2() delegates to ZSTD_compress2(), which is always a blocking call).
97818 +                              * More workers improve speed, but also increase memory usage.
97819 +                              * Default value is `0`, aka "single-threaded mode" : no worker is spawned,
97820 +                              * compression is performed inside Caller's thread, and all invocations are blocking */
97821 +    ZSTD_c_jobSize=401,      /* Size of a compression job. This value is enforced only when nbWorkers >= 1.
97822 +                              * Each compression job is completed in parallel, so this value can indirectly impact the nb of active threads.
97823 +                              * 0 means default, which is dynamically determined based on compression parameters.
97824 +                              * Job size must be a minimum of overlap size, or 1 MB, whichever is largest.
97825 +                              * The minimum size is automatically and transparently enforced. */
97826 +    ZSTD_c_overlapLog=402,   /* Control the overlap size, as a fraction of window size.
97827 +                              * The overlap size is an amount of data reloaded from previous job at the beginning of a new job.
97828 +                              * It helps preserve compression ratio, while each job is compressed in parallel.
97829 +                              * This value is enforced only when nbWorkers >= 1.
97830 +                              * Larger values increase compression ratio, but decrease speed.
97831 +                              * Possible values range from 0 to 9 :
97832 +                              * - 0 means "default" : value will be determined by the library, depending on strategy
97833 +                              * - 1 means "no overlap"
97834 +                              * - 9 means "full overlap", using a full window size.
97835 +                              * Each intermediate rank increases/decreases load size by a factor 2 :
97836 +                              * 9: full window;  8: w/2;  7: w/4;  6: w/8;  5:w/16;  4: w/32;  3:w/64;  2:w/128;  1:no overlap;  0:default
97837 +                              * default value varies between 6 and 9, depending on strategy */
97839 +    /* note : additional experimental parameters are also available
97840 +     * within the experimental section of the API.
97841 +     * At the time of this writing, they include :
97842 +     * ZSTD_c_rsyncable
97843 +     * ZSTD_c_format
97844 +     * ZSTD_c_forceMaxWindow
97845 +     * ZSTD_c_forceAttachDict
97846 +     * ZSTD_c_literalCompressionMode
97847 +     * ZSTD_c_targetCBlockSize
97848 +     * ZSTD_c_srcSizeHint
97849 +     * ZSTD_c_enableDedicatedDictSearch
97850 +     * ZSTD_c_stableInBuffer
97851 +     * ZSTD_c_stableOutBuffer
97852 +     * ZSTD_c_blockDelimiters
97853 +     * ZSTD_c_validateSequences
97854 +     * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
97855 +     * note : never ever use experimentalParam? names directly;
97856 +     *        also, the enums values themselves are unstable and can still change.
97857 +     */
97858 +     ZSTD_c_experimentalParam1=500,
97859 +     ZSTD_c_experimentalParam2=10,
97860 +     ZSTD_c_experimentalParam3=1000,
97861 +     ZSTD_c_experimentalParam4=1001,
97862 +     ZSTD_c_experimentalParam5=1002,
97863 +     ZSTD_c_experimentalParam6=1003,
97864 +     ZSTD_c_experimentalParam7=1004,
97865 +     ZSTD_c_experimentalParam8=1005,
97866 +     ZSTD_c_experimentalParam9=1006,
97867 +     ZSTD_c_experimentalParam10=1007,
97868 +     ZSTD_c_experimentalParam11=1008,
97869 +     ZSTD_c_experimentalParam12=1009
97870 +} ZSTD_cParameter;
97872 +typedef struct {
97873 +    size_t error;
97874 +    int lowerBound;
97875 +    int upperBound;
97876 +} ZSTD_bounds;
97878 +/*! ZSTD_cParam_getBounds() :
97879 + *  All parameters must belong to an interval with lower and upper bounds,
97880 + *  otherwise they will either trigger an error or be automatically clamped.
97881 + * @return : a structure, ZSTD_bounds, which contains
97882 + *         - an error status field, which must be tested using ZSTD_isError()
97883 + *         - lower and upper bounds, both inclusive
97884 + */
97885 +ZSTDLIB_API ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter cParam);
97887 +/*! ZSTD_CCtx_setParameter() :
97888 + *  Set one compression parameter, selected by enum ZSTD_cParameter.
97889 + *  All parameters have valid bounds. Bounds can be queried using ZSTD_cParam_getBounds().
97890 + *  Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).
97891 + *  Setting a parameter is generally only possible during frame initialization (before starting compression).
97892 + *  Exception : when using multi-threading mode (nbWorkers >= 1),
97893 + *              the following parameters can be updated _during_ compression (within same frame):
97894 + *              => compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy.
97895 + *              new parameters will be active for next job only (after a flush()).
97896 + * @return : an error code (which can be tested using ZSTD_isError()).
97897 + */
97898 +ZSTDLIB_API size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value);
97900 +/*! ZSTD_CCtx_setPledgedSrcSize() :
97901 + *  Total input data size to be compressed as a single frame.
97902 + *  Value will be written in frame header, unless if explicitly forbidden using ZSTD_c_contentSizeFlag.
97903 + *  This value will also be controlled at end of frame, and trigger an error if not respected.
97904 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
97905 + *  Note 1 : pledgedSrcSize==0 actually means zero, aka an empty frame.
97906 + *           In order to mean "unknown content size", pass constant ZSTD_CONTENTSIZE_UNKNOWN.
97907 + *           ZSTD_CONTENTSIZE_UNKNOWN is default value for any new frame.
97908 + *  Note 2 : pledgedSrcSize is only valid once, for the next frame.
97909 + *           It's discarded at the end of the frame, and replaced by ZSTD_CONTENTSIZE_UNKNOWN.
97910 + *  Note 3 : Whenever all input data is provided and consumed in a single round,
97911 + *           for example with ZSTD_compress2(),
97912 + *           or invoking immediately ZSTD_compressStream2(,,,ZSTD_e_end),
97913 + *           this value is automatically overridden by srcSize instead.
97914 + */
97915 +ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize);
97917 +typedef enum {
97918 +    ZSTD_reset_session_only = 1,
97919 +    ZSTD_reset_parameters = 2,
97920 +    ZSTD_reset_session_and_parameters = 3
97921 +} ZSTD_ResetDirective;
97923 +/*! ZSTD_CCtx_reset() :
97924 + *  There are 2 different things that can be reset, independently or jointly :
97925 + *  - The session : will stop compressing current frame, and make CCtx ready to start a new one.
97926 + *                  Useful after an error, or to interrupt any ongoing compression.
97927 + *                  Any internal data not yet flushed is cancelled.
97928 + *                  Compression parameters and dictionary remain unchanged.
97929 + *                  They will be used to compress next frame.
97930 + *                  Resetting session never fails.
97931 + *  - The parameters : changes all parameters back to "default".
97932 + *                  This removes any reference to any dictionary too.
97933 + *                  Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing)
97934 + *                  otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError())
97935 + *  - Both : similar to resetting the session, followed by resetting parameters.
97936 + */
97937 +ZSTDLIB_API size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset);
97939 +/*! ZSTD_compress2() :
97940 + *  Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API.
97941 + *  ZSTD_compress2() always starts a new frame.
97942 + *  Should cctx hold data from a previously unfinished frame, everything about it is forgotten.
97943 + *  - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
97944 + *  - The function is always blocking, returns when compression is completed.
97945 + *  Hint : compression runs faster if `dstCapacity` >=  `ZSTD_compressBound(srcSize)`.
97946 + * @return : compressed size written into `dst` (<= `dstCapacity),
97947 + *           or an error code if it fails (which can be tested using ZSTD_isError()).
97948 + */
97949 +ZSTDLIB_API size_t ZSTD_compress2( ZSTD_CCtx* cctx,
97950 +                                   void* dst, size_t dstCapacity,
97951 +                             const void* src, size_t srcSize);
97954 +/***************************************
97955 +*  Advanced decompression API
97956 +***************************************/
97958 +/* The advanced API pushes parameters one by one into an existing DCtx context.
97959 + * Parameters are sticky, and remain valid for all following frames
97960 + * using the same DCtx context.
97961 + * It's possible to reset parameters to default values using ZSTD_DCtx_reset().
97962 + * Note : This API is compatible with existing ZSTD_decompressDCtx() and ZSTD_decompressStream().
97963 + *        Therefore, no new decompression function is necessary.
97964 + */
97966 +typedef enum {
97968 +    ZSTD_d_windowLogMax=100, /* Select a size limit (in power of 2) beyond which
97969 +                              * the streaming API will refuse to allocate memory buffer
97970 +                              * in order to protect the host from unreasonable memory requirements.
97971 +                              * This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.
97972 +                              * By default, a decompression context accepts window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT).
97973 +                              * Special: value 0 means "use default maximum windowLog". */
97975 +    /* note : additional experimental parameters are also available
97976 +     * within the experimental section of the API.
97977 +     * At the time of this writing, they include :
97978 +     * ZSTD_d_format
97979 +     * ZSTD_d_stableOutBuffer
97980 +     * ZSTD_d_forceIgnoreChecksum
97981 +     * ZSTD_d_refMultipleDDicts
97982 +     * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
97983 +     * note : never ever use experimentalParam? names directly
97984 +     */
97985 +     ZSTD_d_experimentalParam1=1000,
97986 +     ZSTD_d_experimentalParam2=1001,
97987 +     ZSTD_d_experimentalParam3=1002,
97988 +     ZSTD_d_experimentalParam4=1003
97990 +} ZSTD_dParameter;
97992 +/*! ZSTD_dParam_getBounds() :
97993 + *  All parameters must belong to an interval with lower and upper bounds,
97994 + *  otherwise they will either trigger an error or be automatically clamped.
97995 + * @return : a structure, ZSTD_bounds, which contains
97996 + *         - an error status field, which must be tested using ZSTD_isError()
97997 + *         - both lower and upper bounds, inclusive
97998 + */
97999 +ZSTDLIB_API ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam);
98001 +/*! ZSTD_DCtx_setParameter() :
98002 + *  Set one compression parameter, selected by enum ZSTD_dParameter.
98003 + *  All parameters have valid bounds. Bounds can be queried using ZSTD_dParam_getBounds().
98004 + *  Providing a value beyond bound will either clamp it, or trigger an error (depending on parameter).
98005 + *  Setting a parameter is only possible during frame initialization (before starting decompression).
98006 + * @return : 0, or an error code (which can be tested using ZSTD_isError()).
98007 + */
98008 +ZSTDLIB_API size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int value);
98010 +/*! ZSTD_DCtx_reset() :
98011 + *  Return a DCtx to clean state.
98012 + *  Session and parameters can be reset jointly or separately.
98013 + *  Parameters can only be reset when no active frame is being decompressed.
98014 + * @return : 0, or an error code, which can be tested with ZSTD_isError()
98015 + */
98016 +ZSTDLIB_API size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset);
98019 +/****************************
98020 +*  Streaming
98021 +****************************/
98023 +typedef struct ZSTD_inBuffer_s {
98024 +  const void* src;    /**< start of input buffer */
98025 +  size_t size;        /**< size of input buffer */
98026 +  size_t pos;         /**< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size */
98027 +} ZSTD_inBuffer;
98029 +typedef struct ZSTD_outBuffer_s {
98030 +  void*  dst;         /**< start of output buffer */
98031 +  size_t size;        /**< size of output buffer */
98032 +  size_t pos;         /**< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */
98033 +} ZSTD_outBuffer;
98037 +/*-***********************************************************************
98038 +*  Streaming compression - HowTo
98040 +*  A ZSTD_CStream object is required to track streaming operation.
98041 +*  Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources.
98042 +*  ZSTD_CStream objects can be reused multiple times on consecutive compression operations.
98043 +*  It is recommended to re-use ZSTD_CStream since it will play nicer with system's memory, by re-using already allocated memory.
98045 +*  For parallel execution, use one separate ZSTD_CStream per thread.
98047 +*  note : since v1.3.0, ZSTD_CStream and ZSTD_CCtx are the same thing.
98049 +*  Parameters are sticky : when starting a new compression on the same context,
98050 +*  it will re-use the same sticky parameters as previous compression session.
98051 +*  When in doubt, it's recommended to fully initialize the context before usage.
98052 +*  Use ZSTD_CCtx_reset() to reset the context and ZSTD_CCtx_setParameter(),
98053 +*  ZSTD_CCtx_setPledgedSrcSize(), or ZSTD_CCtx_loadDictionary() and friends to
98054 +*  set more specific parameters, the pledged source size, or load a dictionary.
98056 +*  Use ZSTD_compressStream2() with ZSTD_e_continue as many times as necessary to
98057 +*  consume input stream. The function will automatically update both `pos`
98058 +*  fields within `input` and `output`.
98059 +*  Note that the function may not consume the entire input, for example, because
98060 +*  the output buffer is already full, in which case `input.pos < input.size`.
98061 +*  The caller must check if input has been entirely consumed.
98062 +*  If not, the caller must make some room to receive more compressed data,
98063 +*  and then present again remaining input data.
98064 +*  note: ZSTD_e_continue is guaranteed to make some forward progress when called,
98065 +*        but doesn't guarantee maximal forward progress. This is especially relevant
98066 +*        when compressing with multiple threads. The call won't block if it can
98067 +*        consume some input, but if it can't it will wait for some, but not all,
98068 +*        output to be flushed.
98069 +* @return : provides a minimum amount of data remaining to be flushed from internal buffers
98070 +*           or an error code, which can be tested using ZSTD_isError().
98072 +*  At any moment, it's possible to flush whatever data might remain stuck within internal buffer,
98073 +*  using ZSTD_compressStream2() with ZSTD_e_flush. `output->pos` will be updated.
98074 +*  Note that, if `output->size` is too small, a single invocation with ZSTD_e_flush might not be enough (return code > 0).
98075 +*  In which case, make some room to receive more compressed data, and call again ZSTD_compressStream2() with ZSTD_e_flush.
98076 +*  You must continue calling ZSTD_compressStream2() with ZSTD_e_flush until it returns 0, at which point you can change the
98077 +*  operation.
98078 +*  note: ZSTD_e_flush will flush as much output as possible, meaning when compressing with multiple threads, it will
98079 +*        block until the flush is complete or the output buffer is full.
98080 +*  @return : 0 if internal buffers are entirely flushed,
98081 +*            >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
98082 +*            or an error code, which can be tested using ZSTD_isError().
98084 +*  Calling ZSTD_compressStream2() with ZSTD_e_end instructs to finish a frame.
98085 +*  It will perform a flush and write frame epilogue.
98086 +*  The epilogue is required for decoders to consider a frame completed.
98087 +*  flush operation is the same, and follows same rules as calling ZSTD_compressStream2() with ZSTD_e_flush.
98088 +*  You must continue calling ZSTD_compressStream2() with ZSTD_e_end until it returns 0, at which point you are free to
98089 +*  start a new frame.
98090 +*  note: ZSTD_e_end will flush as much output as possible, meaning when compressing with multiple threads, it will
98091 +*        block until the flush is complete or the output buffer is full.
98092 +*  @return : 0 if frame fully completed and fully flushed,
98093 +*            >0 if some data still present within internal buffer (the value is minimal estimation of remaining size),
98094 +*            or an error code, which can be tested using ZSTD_isError().
98096 +* *******************************************************************/
98098 +typedef ZSTD_CCtx ZSTD_CStream;  /**< CCtx and CStream are now effectively same object (>= v1.3.0) */
98099 +                                 /* Continue to distinguish them for compatibility with older versions <= v1.2.0 */
98100 +/*===== ZSTD_CStream management functions =====*/
98101 +ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void);
98102 +ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs);  /* accept NULL pointer */
98104 +/*===== Streaming compression functions =====*/
98105 +typedef enum {
98106 +    ZSTD_e_continue=0, /* collect more data, encoder decides when to output compressed result, for optimal compression ratio */
98107 +    ZSTD_e_flush=1,    /* flush any data provided so far,
98108 +                        * it creates (at least) one new block, that can be decoded immediately on reception;
98109 +                        * frame will continue: any future data can still reference previously compressed data, improving compression.
98110 +                        * note : multithreaded compression will block to flush as much output as possible. */
98111 +    ZSTD_e_end=2       /* flush any remaining data _and_ close current frame.
98112 +                        * note that frame is only closed after compressed data is fully flushed (return value == 0).
98113 +                        * After that point, any additional data starts a new frame.
98114 +                        * note : each frame is independent (does not reference any content from previous frame).
98115 +                        : note : multithreaded compression will block to flush as much output as possible. */
98116 +} ZSTD_EndDirective;
98118 +/*! ZSTD_compressStream2() :
98119 + *  Behaves about the same as ZSTD_compressStream, with additional control on end directive.
98120 + *  - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
98121 + *  - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)
98122 + *  - output->pos must be <= dstCapacity, input->pos must be <= srcSize
98123 + *  - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.
98124 + *  - endOp must be a valid directive
98125 + *  - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.
98126 + *  - When nbWorkers>=1, function is non-blocking : it copies a portion of input, distributes jobs to internal worker threads, flush to output whatever is available,
98127 + *                                                  and then immediately returns, just indicating that there is some data remaining to be flushed.
98128 + *                                                  The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.
98129 + *  - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking.
98130 + *  - @return provides a minimum amount of data remaining to be flushed from internal buffers
98131 + *            or an error code, which can be tested using ZSTD_isError().
98132 + *            if @return != 0, flush is not fully completed, there is still some data left within internal buffers.
98133 + *            This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers.
98134 + *            For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed.
98135 + *  - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0),
98136 + *            only ZSTD_e_end or ZSTD_e_flush operations are allowed.
98137 + *            Before starting a new compression job, or changing compression parameters,
98138 + *            it is required to fully flush internal buffers.
98139 + */
98140 +ZSTDLIB_API size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
98141 +                                         ZSTD_outBuffer* output,
98142 +                                         ZSTD_inBuffer* input,
98143 +                                         ZSTD_EndDirective endOp);
98146 +/* These buffer sizes are softly recommended.
98147 + * They are not required : ZSTD_compressStream*() happily accepts any buffer size, for both input and output.
98148 + * Respecting the recommended size just makes it a bit easier for ZSTD_compressStream*(),
98149 + * reducing the amount of memory shuffling and buffering, resulting in minor performance savings.
98150 + *
98151 + * However, note that these recommendations are from the perspective of a C caller program.
98152 + * If the streaming interface is invoked from some other language,
98153 + * especially managed ones such as Java or Go, through a foreign function interface such as jni or cgo,
98154 + * a major performance rule is to reduce crossing such interface to an absolute minimum.
98155 + * It's not rare that performance ends being spent more into the interface, rather than compression itself.
98156 + * In which cases, prefer using large buffers, as large as practical,
98157 + * for both input and output, to reduce the nb of roundtrips.
98158 + */
98159 +ZSTDLIB_API size_t ZSTD_CStreamInSize(void);    /**< recommended size for input buffer */
98160 +ZSTDLIB_API size_t ZSTD_CStreamOutSize(void);   /**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block. */
98163 +/* *****************************************************************************
98164 + * This following is a legacy streaming API.
98165 + * It can be replaced by ZSTD_CCtx_reset() and ZSTD_compressStream2().
98166 + * It is redundant, but remains fully supported.
98167 + * Advanced parameters and dictionary compression can only be used through the
98168 + * new API.
98169 + ******************************************************************************/
98171 +/*!
98172 + * Equivalent to:
98173 + *
98174 + *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
98175 + *     ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
98176 + *     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
98177 + */
98178 +ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);
98179 +/*!
98180 + * Alternative for ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue).
98181 + * NOTE: The return value is different. ZSTD_compressStream() returns a hint for
98182 + * the next read size (if non-zero and not an error). ZSTD_compressStream2()
98183 + * returns the minimum nb of bytes left to flush (if non-zero and not an error).
98184 + */
98185 +ZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
98186 +/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_flush). */
98187 +ZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
98188 +/*! Equivalent to ZSTD_compressStream2(zcs, output, &emptyInput, ZSTD_e_end). */
98189 +ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
98192 +/*-***************************************************************************
98193 +*  Streaming decompression - HowTo
98195 +*  A ZSTD_DStream object is required to track streaming operations.
98196 +*  Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources.
98197 +*  ZSTD_DStream objects can be re-used multiple times.
98199 +*  Use ZSTD_initDStream() to start a new decompression operation.
98200 +* @return : recommended first input size
98201 +*  Alternatively, use advanced API to set specific properties.
98203 +*  Use ZSTD_decompressStream() repetitively to consume your input.
98204 +*  The function will update both `pos` fields.
98205 +*  If `input.pos < input.size`, some input has not been consumed.
98206 +*  It's up to the caller to present again remaining data.
98207 +*  The function tries to flush all data decoded immediately, respecting output buffer size.
98208 +*  If `output.pos < output.size`, decoder has flushed everything it could.
98209 +*  But if `output.pos == output.size`, there might be some data left within internal buffers.,
98210 +*  In which case, call ZSTD_decompressStream() again to flush whatever remains in the buffer.
98211 +*  Note : with no additional input provided, amount of data flushed is necessarily <= ZSTD_BLOCKSIZE_MAX.
98212 +* @return : 0 when a frame is completely decoded and fully flushed,
98213 +*        or an error code, which can be tested using ZSTD_isError(),
98214 +*        or any other value > 0, which means there is still some decoding or flushing to do to complete current frame :
98215 +*                                the return value is a suggested next input size (just a hint for better latency)
98216 +*                                that will never request more than the remaining frame size.
98217 +* *******************************************************************************/
98219 +typedef ZSTD_DCtx ZSTD_DStream;  /**< DCtx and DStream are now effectively same object (>= v1.3.0) */
98220 +                                 /* For compatibility with versions <= v1.2.0, prefer differentiating them. */
98221 +/*===== ZSTD_DStream management functions =====*/
98222 +ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void);
98223 +ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds);  /* accept NULL pointer */
98225 +/*===== Streaming decompression functions =====*/
98227 +/* This function is redundant with the advanced API and equivalent to:
98228 + *
98229 + *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
98230 + *     ZSTD_DCtx_refDDict(zds, NULL);
98231 + */
98232 +ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds);
98234 +ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
98236 +ZSTDLIB_API size_t ZSTD_DStreamInSize(void);    /*!< recommended size for input buffer */
98237 +ZSTDLIB_API size_t ZSTD_DStreamOutSize(void);   /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */
98240 +/**************************
98241 +*  Simple dictionary API
98242 +***************************/
98243 +/*! ZSTD_compress_usingDict() :
98244 + *  Compression at an explicit compression level using a Dictionary.
98245 + *  A dictionary can be any arbitrary data segment (also called a prefix),
98246 + *  or a buffer with specified information (see dictBuilder/zdict.h).
98247 + *  Note : This function loads the dictionary, resulting in significant startup delay.
98248 + *         It's intended for a dictionary used only once.
98249 + *  Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used. */
98250 +ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx,
98251 +                                           void* dst, size_t dstCapacity,
98252 +                                     const void* src, size_t srcSize,
98253 +                                     const void* dict,size_t dictSize,
98254 +                                           int compressionLevel);
98256 +/*! ZSTD_decompress_usingDict() :
98257 + *  Decompression using a known Dictionary.
98258 + *  Dictionary must be identical to the one used during compression.
98259 + *  Note : This function loads the dictionary, resulting in significant startup delay.
98260 + *         It's intended for a dictionary used only once.
98261 + *  Note : When `dict == NULL || dictSize < 8` no dictionary is used. */
98262 +ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
98263 +                                             void* dst, size_t dstCapacity,
98264 +                                       const void* src, size_t srcSize,
98265 +                                       const void* dict,size_t dictSize);
98268 +/***********************************
98269 + *  Bulk processing dictionary API
98270 + **********************************/
98271 +typedef struct ZSTD_CDict_s ZSTD_CDict;
98273 +/*! ZSTD_createCDict() :
98274 + *  When compressing multiple messages or blocks using the same dictionary,
98275 + *  it's recommended to digest the dictionary only once, since it's a costly operation.
98276 + *  ZSTD_createCDict() will create a state from digesting a dictionary.
98277 + *  The resulting state can be used for future compression operations with very limited startup cost.
98278 + *  ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
98279 + * @dictBuffer can be released after ZSTD_CDict creation, because its content is copied within CDict.
98280 + *  Note 1 : Consider experimental function `ZSTD_createCDict_byReference()` if you prefer to not duplicate @dictBuffer content.
98281 + *  Note 2 : A ZSTD_CDict can be created from an empty @dictBuffer,
98282 + *      in which case the only thing that it transports is the @compressionLevel.
98283 + *      This can be useful in a pipeline featuring ZSTD_compress_usingCDict() exclusively,
98284 + *      expecting a ZSTD_CDict parameter with any data, including those without a known dictionary. */
98285 +ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize,
98286 +                                         int compressionLevel);
98288 +/*! ZSTD_freeCDict() :
98289 + *  Function frees memory allocated by ZSTD_createCDict().
98290 + *  If a NULL pointer is passed, no operation is performed. */
98291 +ZSTDLIB_API size_t      ZSTD_freeCDict(ZSTD_CDict* CDict);
98293 +/*! ZSTD_compress_usingCDict() :
98294 + *  Compression using a digested Dictionary.
98295 + *  Recommended when same dictionary is used multiple times.
98296 + *  Note : compression level is _decided at dictionary creation time_,
98297 + *     and frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no) */
98298 +ZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
98299 +                                            void* dst, size_t dstCapacity,
98300 +                                      const void* src, size_t srcSize,
98301 +                                      const ZSTD_CDict* cdict);
98304 +typedef struct ZSTD_DDict_s ZSTD_DDict;
98306 +/*! ZSTD_createDDict() :
98307 + *  Create a digested dictionary, ready to start decompression operation without startup delay.
98308 + *  dictBuffer can be released after DDict creation, as its content is copied inside DDict. */
98309 +ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize);
98311 +/*! ZSTD_freeDDict() :
98312 + *  Function frees memory allocated with ZSTD_createDDict()
98313 + *  If a NULL pointer is passed, no operation is performed. */
98314 +ZSTDLIB_API size_t      ZSTD_freeDDict(ZSTD_DDict* ddict);
98316 +/*! ZSTD_decompress_usingDDict() :
98317 + *  Decompression using a digested Dictionary.
98318 + *  Recommended when same dictionary is used multiple times. */
98319 +ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
98320 +                                              void* dst, size_t dstCapacity,
98321 +                                        const void* src, size_t srcSize,
98322 +                                        const ZSTD_DDict* ddict);
98325 +/********************************
98326 + *  Dictionary helper functions
98327 + *******************************/
98329 +/*! ZSTD_getDictID_fromDict() :
98330 + *  Provides the dictID stored within dictionary.
98331 + *  if @return == 0, the dictionary is not conformant with Zstandard specification.
98332 + *  It can still be loaded, but as a content-only dictionary. */
98333 +ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);
98335 +/*! ZSTD_getDictID_fromDDict() :
98336 + *  Provides the dictID of the dictionary loaded into `ddict`.
98337 + *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
98338 + *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
98339 +ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
98341 +/*! ZSTD_getDictID_fromFrame() :
98342 + *  Provides the dictID required to decompressed the frame stored within `src`.
98343 + *  If @return == 0, the dictID could not be decoded.
98344 + *  This could for one of the following reasons :
98345 + *  - The frame does not require a dictionary to be decoded (most common case).
98346 + *  - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
98347 + *    Note : this use case also happens when using a non-conformant dictionary.
98348 + *  - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
98349 + *  - This is not a Zstandard frame.
98350 + *  When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code. */
98351 +ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
98354 +/*******************************************************************************
98355 + * Advanced dictionary and prefix API
98356 + *
98357 + * This API allows dictionaries to be used with ZSTD_compress2(),
98358 + * ZSTD_compressStream2(), and ZSTD_decompress(). Dictionaries are sticky, and
98359 + * only reset with the context is reset with ZSTD_reset_parameters or
98360 + * ZSTD_reset_session_and_parameters. Prefixes are single-use.
98361 + ******************************************************************************/
98364 +/*! ZSTD_CCtx_loadDictionary() :
98365 + *  Create an internal CDict from `dict` buffer.
98366 + *  Decompression will have to use same dictionary.
98367 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
98368 + *  Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary,
98369 + *           meaning "return to no-dictionary mode".
98370 + *  Note 1 : Dictionary is sticky, it will be used for all future compressed frames.
98371 + *           To return to "no-dictionary" situation, load a NULL dictionary (or reset parameters).
98372 + *  Note 2 : Loading a dictionary involves building tables.
98373 + *           It's also a CPU consuming operation, with non-negligible impact on latency.
98374 + *           Tables are dependent on compression parameters, and for this reason,
98375 + *           compression parameters can no longer be changed after loading a dictionary.
98376 + *  Note 3 :`dict` content will be copied internally.
98377 + *           Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead.
98378 + *           In such a case, dictionary buffer must outlive its users.
98379 + *  Note 4 : Use ZSTD_CCtx_loadDictionary_advanced()
98380 + *           to precisely select how dictionary content must be interpreted. */
98381 +ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
98383 +/*! ZSTD_CCtx_refCDict() :
98384 + *  Reference a prepared dictionary, to be used for all next compressed frames.
98385 + *  Note that compression parameters are enforced from within CDict,
98386 + *  and supersede any compression parameter previously set within CCtx.
98387 + *  The parameters ignored are labelled as "superseded-by-cdict" in the ZSTD_cParameter enum docs.
98388 + *  The ignored parameters will be used again if the CCtx is returned to no-dictionary mode.
98389 + *  The dictionary will remain valid for future compressed frames using same CCtx.
98390 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
98391 + *  Special : Referencing a NULL CDict means "return to no-dictionary mode".
98392 + *  Note 1 : Currently, only one dictionary can be managed.
98393 + *           Referencing a new dictionary effectively "discards" any previous one.
98394 + *  Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx. */
98395 +ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
98397 +/*! ZSTD_CCtx_refPrefix() :
98398 + *  Reference a prefix (single-usage dictionary) for next compressed frame.
98399 + *  A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end).
98400 + *  Decompression will need same prefix to properly regenerate data.
98401 + *  Compressing with a prefix is similar in outcome as performing a diff and compressing it,
98402 + *  but performs much faster, especially during decompression (compression speed is tunable with compression level).
98403 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
98404 + *  Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary
98405 + *  Note 1 : Prefix buffer is referenced. It **must** outlive compression.
98406 + *           Its content must remain unmodified during compression.
98407 + *  Note 2 : If the intention is to diff some large src data blob with some prior version of itself,
98408 + *           ensure that the window size is large enough to contain the entire source.
98409 + *           See ZSTD_c_windowLog.
98410 + *  Note 3 : Referencing a prefix involves building tables, which are dependent on compression parameters.
98411 + *           It's a CPU consuming operation, with non-negligible impact on latency.
98412 + *           If there is a need to use the same prefix multiple times, consider loadDictionary instead.
98413 + *  Note 4 : By default, the prefix is interpreted as raw content (ZSTD_dct_rawContent).
98414 + *           Use experimental ZSTD_CCtx_refPrefix_advanced() to alter dictionary interpretation. */
98415 +ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx,
98416 +                                 const void* prefix, size_t prefixSize);
98418 +/*! ZSTD_DCtx_loadDictionary() :
98419 + *  Create an internal DDict from dict buffer,
98420 + *  to be used to decompress next frames.
98421 + *  The dictionary remains valid for all future frames, until explicitly invalidated.
98422 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
98423 + *  Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,
98424 + *            meaning "return to no-dictionary mode".
98425 + *  Note 1 : Loading a dictionary involves building tables,
98426 + *           which has a non-negligible impact on CPU usage and latency.
98427 + *           It's recommended to "load once, use many times", to amortize the cost
98428 + *  Note 2 :`dict` content will be copied internally, so `dict` can be released after loading.
98429 + *           Use ZSTD_DCtx_loadDictionary_byReference() to reference dictionary content instead.
98430 + *  Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to take control of
98431 + *           how dictionary content is loaded and interpreted.
98432 + */
98433 +ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
98435 +/*! ZSTD_DCtx_refDDict() :
98436 + *  Reference a prepared dictionary, to be used to decompress next frames.
98437 + *  The dictionary remains active for decompression of future frames using same DCtx.
98438 + *
98439 + *  If called with ZSTD_d_refMultipleDDicts enabled, repeated calls of this function
98440 + *  will store the DDict references in a table, and the DDict used for decompression
98441 + *  will be determined at decompression time, as per the dict ID in the frame.
98442 + *  The memory for the table is allocated on the first call to refDDict, and can be
98443 + *  freed with ZSTD_freeDCtx().
98444 + *
98445 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
98446 + *  Note 1 : Currently, only one dictionary can be managed.
98447 + *           Referencing a new dictionary effectively "discards" any previous one.
98448 + *  Special: referencing a NULL DDict means "return to no-dictionary mode".
98449 + *  Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx.
98450 + */
98451 +ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
98453 +/*! ZSTD_DCtx_refPrefix() :
98454 + *  Reference a prefix (single-usage dictionary) to decompress next frame.
98455 + *  This is the reverse operation of ZSTD_CCtx_refPrefix(),
98456 + *  and must use the same prefix as the one used during compression.
98457 + *  Prefix is **only used once**. Reference is discarded at end of frame.
98458 + *  End of frame is reached when ZSTD_decompressStream() returns 0.
98459 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
98460 + *  Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary
98461 + *  Note 2 : Prefix buffer is referenced. It **must** outlive decompression.
98462 + *           Prefix buffer must remain unmodified up to the end of frame,
98463 + *           reached when ZSTD_decompressStream() returns 0.
98464 + *  Note 3 : By default, the prefix is treated as raw content (ZSTD_dct_rawContent).
98465 + *           Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode (Experimental section)
98466 + *  Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost.
98467 + *           A full dictionary is more costly, as it requires building tables.
98468 + */
98469 +ZSTDLIB_API size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx,
98470 +                                 const void* prefix, size_t prefixSize);
98472 +/* ===   Memory management   === */
98474 +/*! ZSTD_sizeof_*() :
98475 + *  These functions give the _current_ memory usage of selected object.
98476 + *  Note that object memory usage can evolve (increase or decrease) over time. */
98477 +ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
98478 +ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
98479 +ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);
98480 +ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
98481 +ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
98482 +ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
98484 +#endif  /* ZSTD_H_235446 */
98487 +/* **************************************************************************************
98488 + *   ADVANCED AND EXPERIMENTAL FUNCTIONS
98489 + ****************************************************************************************
98490 + * The definitions in the following section are considered experimental.
98491 + * They are provided for advanced scenarios.
98492 + * They should never be used with a dynamic library, as prototypes may change in the future.
98493 + * Use them only in association with static linking.
98494 + * ***************************************************************************************/
98496 +#if !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
98497 +#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
98499 +/****************************************************************************************
98500 + *   experimental API (static linking only)
98501 + ****************************************************************************************
98502 + * The following symbols and constants
98503 + * are not planned to join "stable API" status in the near future.
98504 + * They can still change in future versions.
98505 + * Some of them are planned to remain in the static_only section indefinitely.
98506 + * Some of them might be removed in the future (especially when redundant with existing stable functions)
98507 + * ***************************************************************************************/
98509 +#define ZSTD_FRAMEHEADERSIZE_PREFIX(format) ((format) == ZSTD_f_zstd1 ? 5 : 1)   /* minimum input size required to query frame header size */
98510 +#define ZSTD_FRAMEHEADERSIZE_MIN(format)    ((format) == ZSTD_f_zstd1 ? 6 : 2)
98511 +#define ZSTD_FRAMEHEADERSIZE_MAX   18   /* can be useful for static allocation */
98512 +#define ZSTD_SKIPPABLEHEADERSIZE    8
98514 +/* compression parameter bounds */
98515 +#define ZSTD_WINDOWLOG_MAX_32    30
98516 +#define ZSTD_WINDOWLOG_MAX_64    31
98517 +#define ZSTD_WINDOWLOG_MAX     ((int)(sizeof(size_t) == 4 ? ZSTD_WINDOWLOG_MAX_32 : ZSTD_WINDOWLOG_MAX_64))
98518 +#define ZSTD_WINDOWLOG_MIN       10
98519 +#define ZSTD_HASHLOG_MAX       ((ZSTD_WINDOWLOG_MAX < 30) ? ZSTD_WINDOWLOG_MAX : 30)
98520 +#define ZSTD_HASHLOG_MIN          6
98521 +#define ZSTD_CHAINLOG_MAX_32     29
98522 +#define ZSTD_CHAINLOG_MAX_64     30
98523 +#define ZSTD_CHAINLOG_MAX      ((int)(sizeof(size_t) == 4 ? ZSTD_CHAINLOG_MAX_32 : ZSTD_CHAINLOG_MAX_64))
98524 +#define ZSTD_CHAINLOG_MIN        ZSTD_HASHLOG_MIN
98525 +#define ZSTD_SEARCHLOG_MAX      (ZSTD_WINDOWLOG_MAX-1)
98526 +#define ZSTD_SEARCHLOG_MIN        1
98527 +#define ZSTD_MINMATCH_MAX         7   /* only for ZSTD_fast, other strategies are limited to 6 */
98528 +#define ZSTD_MINMATCH_MIN         3   /* only for ZSTD_btopt+, faster strategies are limited to 4 */
98529 +#define ZSTD_TARGETLENGTH_MAX    ZSTD_BLOCKSIZE_MAX
98530 +#define ZSTD_TARGETLENGTH_MIN     0   /* note : comparing this constant to an unsigned results in a tautological test */
98531 +#define ZSTD_STRATEGY_MIN        ZSTD_fast
98532 +#define ZSTD_STRATEGY_MAX        ZSTD_btultra2
98535 +#define ZSTD_OVERLAPLOG_MIN       0
98536 +#define ZSTD_OVERLAPLOG_MAX       9
98538 +#define ZSTD_WINDOWLOG_LIMIT_DEFAULT 27   /* by default, the streaming decoder will refuse any frame
98539 +                                           * requiring larger than (1<<ZSTD_WINDOWLOG_LIMIT_DEFAULT) window size,
98540 +                                           * to preserve host's memory from unreasonable requirements.
98541 +                                           * This limit can be overridden using ZSTD_DCtx_setParameter(,ZSTD_d_windowLogMax,).
98542 +                                           * The limit does not apply for one-pass decoders (such as ZSTD_decompress()), since no additional memory is allocated */
98545 +/* LDM parameter bounds */
98546 +#define ZSTD_LDM_HASHLOG_MIN      ZSTD_HASHLOG_MIN
98547 +#define ZSTD_LDM_HASHLOG_MAX      ZSTD_HASHLOG_MAX
98548 +#define ZSTD_LDM_MINMATCH_MIN        4
98549 +#define ZSTD_LDM_MINMATCH_MAX     4096
98550 +#define ZSTD_LDM_BUCKETSIZELOG_MIN   1
98551 +#define ZSTD_LDM_BUCKETSIZELOG_MAX   8
98552 +#define ZSTD_LDM_HASHRATELOG_MIN     0
98553 +#define ZSTD_LDM_HASHRATELOG_MAX (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN)
98555 +/* Advanced parameter bounds */
98556 +#define ZSTD_TARGETCBLOCKSIZE_MIN   64
98557 +#define ZSTD_TARGETCBLOCKSIZE_MAX   ZSTD_BLOCKSIZE_MAX
98558 +#define ZSTD_SRCSIZEHINT_MIN        0
98559 +#define ZSTD_SRCSIZEHINT_MAX        INT_MAX
98561 +/* internal */
98562 +#define ZSTD_HASHLOG3_MAX           17
98565 +/* ---  Advanced types  --- */
98567 +typedef struct ZSTD_CCtx_params_s ZSTD_CCtx_params;
98569 +typedef struct {
98570 +    unsigned int offset;      /* The offset of the match. (NOT the same as the offset code)
98571 +                               * If offset == 0 and matchLength == 0, this sequence represents the last
98572 +                               * literals in the block of litLength size.
98573 +                               */
98575 +    unsigned int litLength;   /* Literal length of the sequence. */
98576 +    unsigned int matchLength; /* Match length of the sequence. */
98578 +                              /* Note: Users of this API may provide a sequence with matchLength == litLength == offset == 0.
98579 +                               * In this case, we will treat the sequence as a marker for a block boundary.
98580 +                               */
98582 +    unsigned int rep;         /* Represents which repeat offset is represented by the field 'offset'.
98583 +                               * Ranges from [0, 3].
98584 +                               *
98585 +                               * Repeat offsets are essentially previous offsets from previous sequences sorted in
98586 +                               * recency order. For more detail, see doc/zstd_compression_format.md
98587 +                               *
98588 +                               * If rep == 0, then 'offset' does not contain a repeat offset.
98589 +                               * If rep > 0:
98590 +                               *  If litLength != 0:
98591 +                               *      rep == 1 --> offset == repeat_offset_1
98592 +                               *      rep == 2 --> offset == repeat_offset_2
98593 +                               *      rep == 3 --> offset == repeat_offset_3
98594 +                               *  If litLength == 0:
98595 +                               *      rep == 1 --> offset == repeat_offset_2
98596 +                               *      rep == 2 --> offset == repeat_offset_3
98597 +                               *      rep == 3 --> offset == repeat_offset_1 - 1
98598 +                               *
98599 +                               * Note: This field is optional. ZSTD_generateSequences() will calculate the value of
98600 +                               * 'rep', but repeat offsets do not necessarily need to be calculated from an external
98601 +                               * sequence provider's perspective. For example, ZSTD_compressSequences() does not
98602 +                               * use this 'rep' field at all (as of now).
98603 +                               */
98604 +} ZSTD_Sequence;
98606 +typedef struct {
98607 +    unsigned windowLog;       /**< largest match distance : larger == more compression, more memory needed during decompression */
98608 +    unsigned chainLog;        /**< fully searched segment : larger == more compression, slower, more memory (useless for fast) */
98609 +    unsigned hashLog;         /**< dispatch table : larger == faster, more memory */
98610 +    unsigned searchLog;       /**< nb of searches : larger == more compression, slower */
98611 +    unsigned minMatch;        /**< match length searched : larger == faster decompression, sometimes less compression */
98612 +    unsigned targetLength;    /**< acceptable match size for optimal parser (only) : larger == more compression, slower */
98613 +    ZSTD_strategy strategy;   /**< see ZSTD_strategy definition above */
98614 +} ZSTD_compressionParameters;
98616 +typedef struct {
98617 +    int contentSizeFlag; /**< 1: content size will be in frame header (when known) */
98618 +    int checksumFlag;    /**< 1: generate a 32-bits checksum using XXH64 algorithm at end of frame, for error detection */
98619 +    int noDictIDFlag;    /**< 1: no dictID will be saved into frame header (dictID is only useful for dictionary compression) */
98620 +} ZSTD_frameParameters;
98622 +typedef struct {
98623 +    ZSTD_compressionParameters cParams;
98624 +    ZSTD_frameParameters fParams;
98625 +} ZSTD_parameters;
98627 +typedef enum {
98628 +    ZSTD_dct_auto = 0,       /* dictionary is "full" when starting with ZSTD_MAGIC_DICTIONARY, otherwise it is "rawContent" */
98629 +    ZSTD_dct_rawContent = 1, /* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */
98630 +    ZSTD_dct_fullDict = 2    /* refuses to load a dictionary if it does not respect Zstandard's specification, starting with ZSTD_MAGIC_DICTIONARY */
98631 +} ZSTD_dictContentType_e;
98633 +typedef enum {
98634 +    ZSTD_dlm_byCopy = 0,  /**< Copy dictionary content internally */
98635 +    ZSTD_dlm_byRef = 1    /**< Reference dictionary content -- the dictionary buffer must outlive its users. */
98636 +} ZSTD_dictLoadMethod_e;
98638 +typedef enum {
98639 +    ZSTD_f_zstd1 = 0,           /* zstd frame format, specified in zstd_compression_format.md (default) */
98640 +    ZSTD_f_zstd1_magicless = 1  /* Variant of zstd frame format, without initial 4-bytes magic number.
98641 +                                 * Useful to save 4 bytes per generated frame.
98642 +                                 * Decoder cannot recognise automatically this format, requiring this instruction. */
98643 +} ZSTD_format_e;
98645 +typedef enum {
98646 +    /* Note: this enum controls ZSTD_d_forceIgnoreChecksum */
98647 +    ZSTD_d_validateChecksum = 0,
98648 +    ZSTD_d_ignoreChecksum = 1
98649 +} ZSTD_forceIgnoreChecksum_e;
98651 +typedef enum {
98652 +    /* Note: this enum controls ZSTD_d_refMultipleDDicts */
98653 +    ZSTD_rmd_refSingleDDict = 0,
98654 +    ZSTD_rmd_refMultipleDDicts = 1
98655 +} ZSTD_refMultipleDDicts_e;
98657 +typedef enum {
98658 +    /* Note: this enum and the behavior it controls are effectively internal
98659 +     * implementation details of the compressor. They are expected to continue
98660 +     * to evolve and should be considered only in the context of extremely
98661 +     * advanced performance tuning.
98662 +     *
98663 +     * Zstd currently supports the use of a CDict in three ways:
98664 +     *
98665 +     * - The contents of the CDict can be copied into the working context. This
98666 +     *   means that the compression can search both the dictionary and input
98667 +     *   while operating on a single set of internal tables. This makes
98668 +     *   the compression faster per-byte of input. However, the initial copy of
98669 +     *   the CDict's tables incurs a fixed cost at the beginning of the
98670 +     *   compression. For small compressions (< 8 KB), that copy can dominate
98671 +     *   the cost of the compression.
98672 +     *
98673 +     * - The CDict's tables can be used in-place. In this model, compression is
98674 +     *   slower per input byte, because the compressor has to search two sets of
98675 +     *   tables. However, this model incurs no start-up cost (as long as the
98676 +     *   working context's tables can be reused). For small inputs, this can be
98677 +     *   faster than copying the CDict's tables.
98678 +     *
98679 +     * - The CDict's tables are not used at all, and instead we use the working
98680 +     *   context alone to reload the dictionary and use params based on the source
98681 +     *   size. See ZSTD_compress_insertDictionary() and ZSTD_compress_usingDict().
98682 +     *   This method is effective when the dictionary sizes are very small relative
98683 +     *   to the input size, and the input size is fairly large to begin with.
98684 +     *
98685 +     * Zstd has a simple internal heuristic that selects which strategy to use
98686 +     * at the beginning of a compression. However, if experimentation shows that
98687 +     * Zstd is making poor choices, it is possible to override that choice with
98688 +     * this enum.
98689 +     */
98690 +    ZSTD_dictDefaultAttach = 0, /* Use the default heuristic. */
98691 +    ZSTD_dictForceAttach   = 1, /* Never copy the dictionary. */
98692 +    ZSTD_dictForceCopy     = 2, /* Always copy the dictionary. */
98693 +    ZSTD_dictForceLoad     = 3  /* Always reload the dictionary */
98694 +} ZSTD_dictAttachPref_e;
98696 +typedef enum {
98697 +  ZSTD_lcm_auto = 0,          /**< Automatically determine the compression mode based on the compression level.
98698 +                               *   Negative compression levels will be uncompressed, and positive compression
98699 +                               *   levels will be compressed. */
98700 +  ZSTD_lcm_huffman = 1,       /**< Always attempt Huffman compression. Uncompressed literals will still be
98701 +                               *   emitted if Huffman compression is not profitable. */
98702 +  ZSTD_lcm_uncompressed = 2   /**< Always emit uncompressed literals. */
98703 +} ZSTD_literalCompressionMode_e;
98706 +/***************************************
98707 +*  Frame size functions
98708 +***************************************/
98710 +/*! ZSTD_findDecompressedSize() :
98711 + *  `src` should point to the start of a series of ZSTD encoded and/or skippable frames
98712 + *  `srcSize` must be the _exact_ size of this series
98713 + *       (i.e. there should be a frame boundary at `src + srcSize`)
98714 + *  @return : - decompressed size of all data in all successive frames
98715 + *            - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN
98716 + *            - if an error occurred: ZSTD_CONTENTSIZE_ERROR
98717 + *
98718 + *   note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.
98719 + *            When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
98720 + *            In which case, it's necessary to use streaming mode to decompress data.
98721 + *   note 2 : decompressed size is always present when compression is done with ZSTD_compress()
98722 + *   note 3 : decompressed size can be very large (64-bits value),
98723 + *            potentially larger than what local system can handle as a single memory segment.
98724 + *            In which case, it's necessary to use streaming mode to decompress data.
98725 + *   note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.
98726 + *            Always ensure result fits within application's authorized limits.
98727 + *            Each application can set its own limits.
98728 + *   note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to
98729 + *            read each contained frame header.  This is fast as most of the data is skipped,
98730 + *            however it does mean that all frame data must be present and valid. */
98731 +ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
98733 +/*! ZSTD_decompressBound() :
98734 + *  `src` should point to the start of a series of ZSTD encoded and/or skippable frames
98735 + *  `srcSize` must be the _exact_ size of this series
98736 + *       (i.e. there should be a frame boundary at `src + srcSize`)
98737 + *  @return : - upper-bound for the decompressed size of all data in all successive frames
98738 + *            - if an error occurred: ZSTD_CONTENTSIZE_ERROR
98739 + *
98740 + *  note 1  : an error can occur if `src` contains an invalid or incorrectly formatted frame.
98741 + *  note 2  : the upper-bound is exact when the decompressed size field is available in every ZSTD encoded frame of `src`.
98742 + *            in this case, `ZSTD_findDecompressedSize` and `ZSTD_decompressBound` return the same value.
98743 + *  note 3  : when the decompressed size field isn't available, the upper-bound for that frame is calculated by:
98744 + *              upper-bound = # blocks * min(128 KB, Window_Size)
98745 + */
98746 +ZSTDLIB_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize);
98748 +/*! ZSTD_frameHeaderSize() :
98749 + *  srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX.
98750 + * @return : size of the Frame Header,
98751 + *           or an error code (if srcSize is too small) */
98752 +ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
98754 +typedef enum {
98755 +  ZSTD_sf_noBlockDelimiters = 0,         /* Representation of ZSTD_Sequence has no block delimiters, sequences only */
98756 +  ZSTD_sf_explicitBlockDelimiters = 1    /* Representation of ZSTD_Sequence contains explicit block delimiters */
98757 +} ZSTD_sequenceFormat_e;
98759 +/*! ZSTD_generateSequences() :
98760 + * Generate sequences using ZSTD_compress2, given a source buffer.
98761 + *
98762 + * Each block will end with a dummy sequence
98763 + * with offset == 0, matchLength == 0, and litLength == length of last literals.
98764 + * litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0)
98765 + * simply acts as a block delimiter.
98766 + *
98767 + * zc can be used to insert custom compression params.
98768 + * This function invokes ZSTD_compress2
98769 + *
98770 + * The output of this function can be fed into ZSTD_compressSequences() with CCtx
98771 + * setting of ZSTD_c_blockDelimiters as ZSTD_sf_explicitBlockDelimiters
98772 + * @return : number of sequences generated
98773 + */
98775 +ZSTDLIB_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
98776 +                                          size_t outSeqsSize, const void* src, size_t srcSize);
98778 +/*! ZSTD_mergeBlockDelimiters() :
98779 + * Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals
98780 + * by merging them into into the literals of the next sequence.
98781 + *
98782 + * As such, the final generated result has no explicit representation of block boundaries,
98783 + * and the final last literals segment is not represented in the sequences.
98784 + *
98785 + * The output of this function can be fed into ZSTD_compressSequences() with CCtx
98786 + * setting of ZSTD_c_blockDelimiters as ZSTD_sf_noBlockDelimiters
98787 + * @return : number of sequences left after merging
98788 + */
98789 +ZSTDLIB_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize);
98791 +/*! ZSTD_compressSequences() :
98792 + * Compress an array of ZSTD_Sequence, generated from the original source buffer, into dst.
98793 + * If a dictionary is included, then the cctx should reference the dict. (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.)
98794 + * The entire source is compressed into a single frame.
98795 + *
98796 + * The compression behavior changes based on cctx params. In particular:
98797 + *    If ZSTD_c_blockDelimiters == ZSTD_sf_noBlockDelimiters, the array of ZSTD_Sequence is expected to contain
98798 + *    no block delimiters (defined in ZSTD_Sequence). Block boundaries are roughly determined based on
98799 + *    the block size derived from the cctx, and sequences may be split. This is the default setting.
98800 + *
98801 + *    If ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, the array of ZSTD_Sequence is expected to contain
98802 + *    block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided.
98803 + *
98804 + *    If ZSTD_c_validateSequences == 0, this function will blindly accept the sequences provided. Invalid sequences cause undefined
98805 + *    behavior. If ZSTD_c_validateSequences == 1, then if sequence is invalid (see doc/zstd_compression_format.md for
98806 + *    specifics regarding offset/matchlength requirements) then the function will bail out and return an error.
98807 + *
98808 + *    In addition to the two adjustable experimental params, there are other important cctx params.
98809 + *    - ZSTD_c_minMatch MUST be set as less than or equal to the smallest match generated by the match finder. It has a minimum value of ZSTD_MINMATCH_MIN.
98810 + *    - ZSTD_c_compressionLevel accordingly adjusts the strength of the entropy coder, as it would in typical compression.
98811 + *    - ZSTD_c_windowLog affects offset validation: this function will return an error at higher debug levels if a provided offset
98812 + *      is larger than what the spec allows for a given window log and dictionary (if present). See: doc/zstd_compression_format.md
98813 + *
98814 + * Note: Repcodes are, as of now, always re-calculated within this function, so ZSTD_Sequence::rep is unused.
98815 + * Note 2: Once we integrate ability to ingest repcodes, the explicit block delims mode must respect those repcodes exactly,
98816 + *         and cannot emit an RLE block that disagrees with the repcode history
98817 + * @return : final compressed size or a ZSTD error.
98818 + */
98819 +ZSTDLIB_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstSize,
98820 +                                  const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
98821 +                                  const void* src, size_t srcSize);
98824 +/*! ZSTD_writeSkippableFrame() :
98825 + * Generates a zstd skippable frame containing data given by src, and writes it to dst buffer.
98826 + *
98827 + * Skippable frames begin with a a 4-byte magic number. There are 16 possible choices of magic number,
98828 + * ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15.
98829 + * As such, the parameter magicVariant controls the exact skippable frame magic number variant used, so
98830 + * the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant.
98831 + *
98832 + * Returns an error if destination buffer is not large enough, if the source size is not representable
98833 + * with a 4-byte unsigned int, or if the parameter magicVariant is greater than 15 (and therefore invalid).
98834 + *
98835 + * @return : number of bytes written or a ZSTD error.
98836 + */
98837 +ZSTDLIB_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
98838 +                                            const void* src, size_t srcSize, unsigned magicVariant);
98841 +/***************************************
98842 +*  Memory management
98843 +***************************************/
98845 +/*! ZSTD_estimate*() :
98846 + *  These functions make it possible to estimate memory usage
98847 + *  of a future {D,C}Ctx, before its creation.
98848 + *
98849 + *  ZSTD_estimateCCtxSize() will provide a memory budget large enough
98850 + *  for any compression level up to selected one.
98851 + *  Note : Unlike ZSTD_estimateCStreamSize*(), this estimate
98852 + *         does not include space for a window buffer.
98853 + *         Therefore, the estimation is only guaranteed for single-shot compressions, not streaming.
98854 + *  The estimate will assume the input may be arbitrarily large,
98855 + *  which is the worst case.
98856 + *
98857 + *  When srcSize can be bound by a known and rather "small" value,
98858 + *  this fact can be used to provide a tighter estimation
98859 + *  because the CCtx compression context will need less memory.
98860 + *  This tighter estimation can be provided by more advanced functions
98861 + *  ZSTD_estimateCCtxSize_usingCParams(), which can be used in tandem with ZSTD_getCParams(),
98862 + *  and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter().
98863 + *  Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits.
98864 + *
98865 + *  Note 2 : only single-threaded compression is supported.
98866 + *  ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.
98867 + */
98868 +ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
98869 +ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
98870 +ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
98871 +ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void);
98873 +/*! ZSTD_estimateCStreamSize() :
98874 + *  ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one.
98875 + *  It will also consider src size to be arbitrarily "large", which is worst case.
98876 + *  If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation.
98877 + *  ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
98878 + *  ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.
98879 + *  Note : CStream size estimation is only correct for single-threaded compression.
98880 + *  ZSTD_DStream memory budget depends on window Size.
98881 + *  This information can be passed manually, using ZSTD_estimateDStreamSize,
98882 + *  or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame();
98883 + *  Note : if streaming is init with function ZSTD_init?Stream_usingDict(),
98884 + *         an internal ?Dict will be created, which additional size is not estimated here.
98885 + *         In this case, get total size by adding ZSTD_estimate?DictSize */
98886 +ZSTDLIB_API size_t ZSTD_estimateCStreamSize(int compressionLevel);
98887 +ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams);
98888 +ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params);
98889 +ZSTDLIB_API size_t ZSTD_estimateDStreamSize(size_t windowSize);
98890 +ZSTDLIB_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
98892 +/*! ZSTD_estimate?DictSize() :
98893 + *  ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict().
98894 + *  ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced().
98895 + *  Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller.
98896 + */
98897 +ZSTDLIB_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);
98898 +ZSTDLIB_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod);
98899 +ZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod);
98901 +/*! ZSTD_initStatic*() :
98902 + *  Initialize an object using a pre-allocated fixed-size buffer.
98903 + *  workspace: The memory area to emplace the object into.
98904 + *             Provided pointer *must be 8-bytes aligned*.
98905 + *             Buffer must outlive object.
98906 + *  workspaceSize: Use ZSTD_estimate*Size() to determine
98907 + *                 how large workspace must be to support target scenario.
98908 + * @return : pointer to object (same address as workspace, just different type),
98909 + *           or NULL if error (size too small, incorrect alignment, etc.)
98910 + *  Note : zstd will never resize nor malloc() when using a static buffer.
98911 + *         If the object requires more memory than available,
98912 + *         zstd will just error out (typically ZSTD_error_memory_allocation).
98913 + *  Note 2 : there is no corresponding "free" function.
98914 + *           Since workspace is allocated externally, it must be freed externally too.
98915 + *  Note 3 : cParams : use ZSTD_getCParams() to convert a compression level
98916 + *           into its associated cParams.
98917 + *  Limitation 1 : currently not compatible with internal dictionary creation, triggered by
98918 + *                 ZSTD_CCtx_loadDictionary(), ZSTD_initCStream_usingDict() or ZSTD_initDStream_usingDict().
98919 + *  Limitation 2 : static cctx currently not compatible with multi-threading.
98920 + *  Limitation 3 : static dctx is incompatible with legacy support.
98921 + */
98922 +ZSTDLIB_API ZSTD_CCtx*    ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize);
98923 +ZSTDLIB_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize);    /**< same as ZSTD_initStaticCCtx() */
98925 +ZSTDLIB_API ZSTD_DCtx*    ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize);
98926 +ZSTDLIB_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize);    /**< same as ZSTD_initStaticDCtx() */
98928 +ZSTDLIB_API const ZSTD_CDict* ZSTD_initStaticCDict(
98929 +                                        void* workspace, size_t workspaceSize,
98930 +                                        const void* dict, size_t dictSize,
98931 +                                        ZSTD_dictLoadMethod_e dictLoadMethod,
98932 +                                        ZSTD_dictContentType_e dictContentType,
98933 +                                        ZSTD_compressionParameters cParams);
98935 +ZSTDLIB_API const ZSTD_DDict* ZSTD_initStaticDDict(
98936 +                                        void* workspace, size_t workspaceSize,
98937 +                                        const void* dict, size_t dictSize,
98938 +                                        ZSTD_dictLoadMethod_e dictLoadMethod,
98939 +                                        ZSTD_dictContentType_e dictContentType);
98942 +/*! Custom memory allocation :
98943 + *  These prototypes make it possible to pass your own allocation/free functions.
98944 + *  ZSTD_customMem is provided at creation time, using ZSTD_create*_advanced() variants listed below.
98945 + *  All allocation/free operations will be completed using these custom variants instead of regular <stdlib.h> ones.
98946 + */
98947 +typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size);
98948 +typedef void  (*ZSTD_freeFunction) (void* opaque, void* address);
98949 +typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem;
98950 +static
98951 +__attribute__((__unused__))
98952 +ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL };  /**< this constant defers to stdlib's functions */
98954 +ZSTDLIB_API ZSTD_CCtx*    ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
98955 +ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
98956 +ZSTDLIB_API ZSTD_DCtx*    ZSTD_createDCtx_advanced(ZSTD_customMem customMem);
98957 +ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
98959 +ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize,
98960 +                                                  ZSTD_dictLoadMethod_e dictLoadMethod,
98961 +                                                  ZSTD_dictContentType_e dictContentType,
98962 +                                                  ZSTD_compressionParameters cParams,
98963 +                                                  ZSTD_customMem customMem);
98965 +/* ! Thread pool :
98966 + * These prototypes make it possible to share a thread pool among multiple compression contexts.
98967 + * This can limit resources for applications with multiple threads where each one uses
98968 + * a threaded compression mode (via ZSTD_c_nbWorkers parameter).
98969 + * ZSTD_createThreadPool creates a new thread pool with a given number of threads.
98970 + * Note that the lifetime of such pool must exist while being used.
98971 + * ZSTD_CCtx_refThreadPool assigns a thread pool to a context (use NULL argument value
98972 + * to use an internal thread pool).
98973 + * ZSTD_freeThreadPool frees a thread pool, accepts NULL pointer.
98974 + */
98975 +typedef struct POOL_ctx_s ZSTD_threadPool;
98976 +ZSTDLIB_API ZSTD_threadPool* ZSTD_createThreadPool(size_t numThreads);
98977 +ZSTDLIB_API void ZSTD_freeThreadPool (ZSTD_threadPool* pool);  /* accept NULL pointer */
98978 +ZSTDLIB_API size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool);
98982 + * This API is temporary and is expected to change or disappear in the future!
98983 + */
98984 +ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced2(
98985 +    const void* dict, size_t dictSize,
98986 +    ZSTD_dictLoadMethod_e dictLoadMethod,
98987 +    ZSTD_dictContentType_e dictContentType,
98988 +    const ZSTD_CCtx_params* cctxParams,
98989 +    ZSTD_customMem customMem);
98991 +ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(
98992 +    const void* dict, size_t dictSize,
98993 +    ZSTD_dictLoadMethod_e dictLoadMethod,
98994 +    ZSTD_dictContentType_e dictContentType,
98995 +    ZSTD_customMem customMem);
98998 +/***************************************
98999 +*  Advanced compression functions
99000 +***************************************/
99002 +/*! ZSTD_createCDict_byReference() :
99003 + *  Create a digested dictionary for compression
99004 + *  Dictionary content is just referenced, not duplicated.
99005 + *  As a consequence, `dictBuffer` **must** outlive CDict,
99006 + *  and its content must remain unmodified throughout the lifetime of CDict.
99007 + *  note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef */
99008 +ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
99010 +/*! ZSTD_getDictID_fromCDict() :
99011 + *  Provides the dictID of the dictionary loaded into `cdict`.
99012 + *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
99013 + *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
99014 +ZSTDLIB_API unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict);
99016 +/*! ZSTD_getCParams() :
99017 + * @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.
99018 + * `estimatedSrcSize` value is optional, select 0 if not known */
99019 +ZSTDLIB_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
99021 +/*! ZSTD_getParams() :
99022 + *  same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`.
99023 + *  All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0 */
99024 +ZSTDLIB_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
99026 +/*! ZSTD_checkCParams() :
99027 + *  Ensure param values remain within authorized range.
99028 + * @return 0 on success, or an error code (can be checked with ZSTD_isError()) */
99029 +ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
99031 +/*! ZSTD_adjustCParams() :
99032 + *  optimize params for a given `srcSize` and `dictSize`.
99033 + * `srcSize` can be unknown, in which case use ZSTD_CONTENTSIZE_UNKNOWN.
99034 + * `dictSize` must be `0` when there is no dictionary.
99035 + *  cPar can be invalid : all parameters will be clamped within valid range in the @return struct.
99036 + *  This function never fails (wide contract) */
99037 +ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
99039 +/*! ZSTD_compress_advanced() :
99040 + *  Note : this function is now DEPRECATED.
99041 + *         It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters.
99042 + *  This prototype will be marked as deprecated and generate compilation warning on reaching v1.5.x */
99043 +ZSTDLIB_API size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,
99044 +                                          void* dst, size_t dstCapacity,
99045 +                                    const void* src, size_t srcSize,
99046 +                                    const void* dict,size_t dictSize,
99047 +                                          ZSTD_parameters params);
99049 +/*! ZSTD_compress_usingCDict_advanced() :
99050 + *  Note : this function is now REDUNDANT.
99051 + *         It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters.
99052 + *  This prototype will be marked as deprecated and generate compilation warning in some future version */
99053 +ZSTDLIB_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
99054 +                                              void* dst, size_t dstCapacity,
99055 +                                        const void* src, size_t srcSize,
99056 +                                        const ZSTD_CDict* cdict,
99057 +                                              ZSTD_frameParameters fParams);
99060 +/*! ZSTD_CCtx_loadDictionary_byReference() :
99061 + *  Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx.
99062 + *  It saves some memory, but also requires that `dict` outlives its usage within `cctx` */
99063 +ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
99065 +/*! ZSTD_CCtx_loadDictionary_advanced() :
99066 + *  Same as ZSTD_CCtx_loadDictionary(), but gives finer control over
99067 + *  how to load the dictionary (by copy ? by reference ?)
99068 + *  and how to interpret it (automatic ? force raw mode ? full mode only ?) */
99069 +ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
99071 +/*! ZSTD_CCtx_refPrefix_advanced() :
99072 + *  Same as ZSTD_CCtx_refPrefix(), but gives finer control over
99073 + *  how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */
99074 +ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
99076 +/* ===   experimental parameters   === */
99077 +/* these parameters can be used with ZSTD_setParameter()
99078 + * they are not guaranteed to remain supported in the future */
99080 + /* Enables rsyncable mode,
99081 +  * which makes compressed files more rsync friendly
99082 +  * by adding periodic synchronization points to the compressed data.
99083 +  * The target average block size is ZSTD_c_jobSize / 2.
99084 +  * It's possible to modify the job size to increase or decrease
99085 +  * the granularity of the synchronization point.
99086 +  * Once the jobSize is smaller than the window size,
99087 +  * it will result in compression ratio degradation.
99088 +  * NOTE 1: rsyncable mode only works when multithreading is enabled.
99089 +  * NOTE 2: rsyncable performs poorly in combination with long range mode,
99090 +  * since it will decrease the effectiveness of synchronization points,
99091 +  * though mileage may vary.
99092 +  * NOTE 3: Rsyncable mode limits maximum compression speed to ~400 MB/s.
99093 +  * If the selected compression level is already running significantly slower,
99094 +  * the overall speed won't be significantly impacted.
99095 +  */
99096 + #define ZSTD_c_rsyncable ZSTD_c_experimentalParam1
99098 +/* Select a compression format.
99099 + * The value must be of type ZSTD_format_e.
99100 + * See ZSTD_format_e enum definition for details */
99101 +#define ZSTD_c_format ZSTD_c_experimentalParam2
99103 +/* Force back-reference distances to remain < windowSize,
99104 + * even when referencing into Dictionary content (default:0) */
99105 +#define ZSTD_c_forceMaxWindow ZSTD_c_experimentalParam3
99107 +/* Controls whether the contents of a CDict
99108 + * are used in place, or copied into the working context.
99109 + * Accepts values from the ZSTD_dictAttachPref_e enum.
99110 + * See the comments on that enum for an explanation of the feature. */
99111 +#define ZSTD_c_forceAttachDict ZSTD_c_experimentalParam4
99113 +/* Controls how the literals are compressed (default is auto).
99114 + * The value must be of type ZSTD_literalCompressionMode_e.
99115 + * See ZSTD_literalCompressionMode_t enum definition for details.
99116 + */
99117 +#define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5
99119 +/* Tries to fit compressed block size to be around targetCBlockSize.
99120 + * No target when targetCBlockSize == 0.
99121 + * There is no guarantee on compressed block size (default:0) */
99122 +#define ZSTD_c_targetCBlockSize ZSTD_c_experimentalParam6
99124 +/* User's best guess of source size.
99125 + * Hint is not valid when srcSizeHint == 0.
99126 + * There is no guarantee that hint is close to actual source size,
99127 + * but compression ratio may regress significantly if guess considerably underestimates */
99128 +#define ZSTD_c_srcSizeHint ZSTD_c_experimentalParam7
99130 +/* Controls whether the new and experimental "dedicated dictionary search
99131 + * structure" can be used. This feature is still rough around the edges, be
99132 + * prepared for surprising behavior!
99133 + *
99134 + * How to use it:
99135 + *
99136 + * When using a CDict, whether to use this feature or not is controlled at
99137 + * CDict creation, and it must be set in a CCtxParams set passed into that
99138 + * construction (via ZSTD_createCDict_advanced2()). A compression will then
99139 + * use the feature or not based on how the CDict was constructed; the value of
99140 + * this param, set in the CCtx, will have no effect.
99141 + *
99142 + * However, when a dictionary buffer is passed into a CCtx, such as via
99143 + * ZSTD_CCtx_loadDictionary(), this param can be set on the CCtx to control
99144 + * whether the CDict that is created internally can use the feature or not.
99145 + *
99146 + * What it does:
99147 + *
99148 + * Normally, the internal data structures of the CDict are analogous to what
99149 + * would be stored in a CCtx after compressing the contents of a dictionary.
99150 + * To an approximation, a compression using a dictionary can then use those
99151 + * data structures to simply continue what is effectively a streaming
99152 + * compression where the simulated compression of the dictionary left off.
99153 + * Which is to say, the search structures in the CDict are normally the same
99154 + * format as in the CCtx.
99155 + *
99156 + * It is possible to do better, since the CDict is not like a CCtx: the search
99157 + * structures are written once during CDict creation, and then are only read
99158 + * after that, while the search structures in the CCtx are both read and
99159 + * written as the compression goes along. This means we can choose a search
99160 + * structure for the dictionary that is read-optimized.
99161 + *
99162 + * This feature enables the use of that different structure.
99163 + *
99164 + * Note that some of the members of the ZSTD_compressionParameters struct have
99165 + * different semantics and constraints in the dedicated search structure. It is
99166 + * highly recommended that you simply set a compression level in the CCtxParams
99167 + * you pass into the CDict creation call, and avoid messing with the cParams
99168 + * directly.
99169 + *
99170 + * Effects:
99171 + *
99172 + * This will only have any effect when the selected ZSTD_strategy
99173 + * implementation supports this feature. Currently, that's limited to
99174 + * ZSTD_greedy, ZSTD_lazy, and ZSTD_lazy2.
99175 + *
99176 + * Note that this means that the CDict tables can no longer be copied into the
99177 + * CCtx, so the dict attachment mode ZSTD_dictForceCopy will no longer be
99178 + * useable. The dictionary can only be attached or reloaded.
99179 + *
99180 + * In general, you should expect compression to be faster--sometimes very much
99181 + * so--and CDict creation to be slightly slower. Eventually, we will probably
99182 + * make this mode the default.
99183 + */
99184 +#define ZSTD_c_enableDedicatedDictSearch ZSTD_c_experimentalParam8
99186 +/* ZSTD_c_stableInBuffer
99187 + * Experimental parameter.
99188 + * Default is 0 == disabled. Set to 1 to enable.
99189 + *
99190 + * Tells the compressor that the ZSTD_inBuffer will ALWAYS be the same
99191 + * between calls, except for the modifications that zstd makes to pos (the
99192 + * caller must not modify pos). This is checked by the compressor, and
99193 + * compression will fail if it ever changes. This means the only flush
99194 + * mode that makes sense is ZSTD_e_end, so zstd will error if ZSTD_e_end
99195 + * is not used. The data in the ZSTD_inBuffer in the range [src, src + pos)
99196 + * MUST not be modified during compression or you will get data corruption.
99197 + *
99198 + * When this flag is enabled zstd won't allocate an input window buffer,
99199 + * because the user guarantees it can reference the ZSTD_inBuffer until
99200 + * the frame is complete. But, it will still allocate an output buffer
99201 + * large enough to fit a block (see ZSTD_c_stableOutBuffer). This will also
99202 + * avoid the memcpy() from the input buffer to the input window buffer.
99203 + *
99204 + * NOTE: ZSTD_compressStream2() will error if ZSTD_e_end is not used.
99205 + * That means this flag cannot be used with ZSTD_compressStream().
99206 + *
99207 + * NOTE: So long as the ZSTD_inBuffer always points to valid memory, using
99208 + * this flag is ALWAYS memory safe, and will never access out-of-bounds
99209 + * memory. However, compression WILL fail if you violate the preconditions.
99210 + *
99211 + * WARNING: The data in the ZSTD_inBuffer in the range [dst, dst + pos) MUST
99212 + * not be modified during compression or you will get data corruption. This
99213 + * is because zstd needs to reference data in the ZSTD_inBuffer to find
99214 + * matches. Normally zstd maintains its own window buffer for this purpose,
99215 + * but passing this flag tells zstd to use the user provided buffer.
99216 + */
99217 +#define ZSTD_c_stableInBuffer ZSTD_c_experimentalParam9
99219 +/* ZSTD_c_stableOutBuffer
99220 + * Experimental parameter.
99221 + * Default is 0 == disabled. Set to 1 to enable.
99222 + *
99223 + * Tells he compressor that the ZSTD_outBuffer will not be resized between
99224 + * calls. Specifically: (out.size - out.pos) will never grow. This gives the
99225 + * compressor the freedom to say: If the compressed data doesn't fit in the
99226 + * output buffer then return ZSTD_error_dstSizeTooSmall. This allows us to
99227 + * always decompress directly into the output buffer, instead of decompressing
99228 + * into an internal buffer and copying to the output buffer.
99229 + *
99230 + * When this flag is enabled zstd won't allocate an output buffer, because
99231 + * it can write directly to the ZSTD_outBuffer. It will still allocate the
99232 + * input window buffer (see ZSTD_c_stableInBuffer).
99233 + *
99234 + * Zstd will check that (out.size - out.pos) never grows and return an error
99235 + * if it does. While not strictly necessary, this should prevent surprises.
99236 + */
99237 +#define ZSTD_c_stableOutBuffer ZSTD_c_experimentalParam10
99239 +/* ZSTD_c_blockDelimiters
99240 + * Default is 0 == ZSTD_sf_noBlockDelimiters.
99241 + *
99242 + * For use with sequence compression API: ZSTD_compressSequences().
99243 + *
99244 + * Designates whether or not the given array of ZSTD_Sequence contains block delimiters
99245 + * and last literals, which are defined as sequences with offset == 0 and matchLength == 0.
99246 + * See the definition of ZSTD_Sequence for more specifics.
99247 + */
99248 +#define ZSTD_c_blockDelimiters ZSTD_c_experimentalParam11
99250 +/* ZSTD_c_validateSequences
99251 + * Default is 0 == disabled. Set to 1 to enable sequence validation.
99252 + *
99253 + * For use with sequence compression API: ZSTD_compressSequences().
99254 + * Designates whether or not we validate sequences provided to ZSTD_compressSequences()
99255 + * during function execution.
99256 + *
99257 + * Without validation, providing a sequence that does not conform to the zstd spec will cause
99258 + * undefined behavior, and may produce a corrupted block.
99259 + *
99260 + * With validation enabled, a if sequence is invalid (see doc/zstd_compression_format.md for
99261 + * specifics regarding offset/matchlength requirements) then the function will bail out and
99262 + * return an error.
99263 + *
99264 + */
99265 +#define ZSTD_c_validateSequences ZSTD_c_experimentalParam12
99267 +/*! ZSTD_CCtx_getParameter() :
99268 + *  Get the requested compression parameter value, selected by enum ZSTD_cParameter,
99269 + *  and store it into int* value.
99270 + * @return : 0, or an error code (which can be tested with ZSTD_isError()).
99271 + */
99272 +ZSTDLIB_API size_t ZSTD_CCtx_getParameter(const ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value);
99275 +/*! ZSTD_CCtx_params :
99276 + *  Quick howto :
99277 + *  - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure
99278 + *  - ZSTD_CCtxParams_setParameter() : Push parameters one by one into
99279 + *                                     an existing ZSTD_CCtx_params structure.
99280 + *                                     This is similar to
99281 + *                                     ZSTD_CCtx_setParameter().
99282 + *  - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to
99283 + *                                    an existing CCtx.
99284 + *                                    These parameters will be applied to
99285 + *                                    all subsequent frames.
99286 + *  - ZSTD_compressStream2() : Do compression using the CCtx.
99287 + *  - ZSTD_freeCCtxParams() : Free the memory, accept NULL pointer.
99288 + *
99289 + *  This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams()
99290 + *  for static allocation of CCtx for single-threaded compression.
99291 + */
99292 +ZSTDLIB_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void);
99293 +ZSTDLIB_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params);  /* accept NULL pointer */
99295 +/*! ZSTD_CCtxParams_reset() :
99296 + *  Reset params to default values.
99297 + */
99298 +ZSTDLIB_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params);
99300 +/*! ZSTD_CCtxParams_init() :
99301 + *  Initializes the compression parameters of cctxParams according to
99302 + *  compression level. All other parameters are reset to their default values.
99303 + */
99304 +ZSTDLIB_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel);
99306 +/*! ZSTD_CCtxParams_init_advanced() :
99307 + *  Initializes the compression and frame parameters of cctxParams according to
99308 + *  params. All other parameters are reset to their default values.
99309 + */
99310 +ZSTDLIB_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params);
99312 +/*! ZSTD_CCtxParams_setParameter() :
99313 + *  Similar to ZSTD_CCtx_setParameter.
99314 + *  Set one compression parameter, selected by enum ZSTD_cParameter.
99315 + *  Parameters must be applied to a ZSTD_CCtx using
99316 + *  ZSTD_CCtx_setParametersUsingCCtxParams().
99317 + * @result : a code representing success or failure (which can be tested with
99318 + *           ZSTD_isError()).
99319 + */
99320 +ZSTDLIB_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value);
99322 +/*! ZSTD_CCtxParams_getParameter() :
99323 + * Similar to ZSTD_CCtx_getParameter.
99324 + * Get the requested value of one compression parameter, selected by enum ZSTD_cParameter.
99325 + * @result : 0, or an error code (which can be tested with ZSTD_isError()).
99326 + */
99327 +ZSTDLIB_API size_t ZSTD_CCtxParams_getParameter(const ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value);
99329 +/*! ZSTD_CCtx_setParametersUsingCCtxParams() :
99330 + *  Apply a set of ZSTD_CCtx_params to the compression context.
99331 + *  This can be done even after compression is started,
99332 + *    if nbWorkers==0, this will have no impact until a new compression is started.
99333 + *    if nbWorkers>=1, new parameters will be picked up at next job,
99334 + *       with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated).
99335 + */
99336 +ZSTDLIB_API size_t ZSTD_CCtx_setParametersUsingCCtxParams(
99337 +        ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params);
99339 +/*! ZSTD_compressStream2_simpleArgs() :
99340 + *  Same as ZSTD_compressStream2(),
99341 + *  but using only integral types as arguments.
99342 + *  This variant might be helpful for binders from dynamic languages
99343 + *  which have troubles handling structures containing memory pointers.
99344 + */
99345 +ZSTDLIB_API size_t ZSTD_compressStream2_simpleArgs (
99346 +                            ZSTD_CCtx* cctx,
99347 +                            void* dst, size_t dstCapacity, size_t* dstPos,
99348 +                      const void* src, size_t srcSize, size_t* srcPos,
99349 +                            ZSTD_EndDirective endOp);
99352 +/***************************************
99353 +*  Advanced decompression functions
99354 +***************************************/
99356 +/*! ZSTD_isFrame() :
99357 + *  Tells if the content of `buffer` starts with a valid Frame Identifier.
99358 + *  Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
99359 + *  Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
99360 + *  Note 3 : Skippable Frame Identifiers are considered valid. */
99361 +ZSTDLIB_API unsigned ZSTD_isFrame(const void* buffer, size_t size);
99363 +/*! ZSTD_createDDict_byReference() :
99364 + *  Create a digested dictionary, ready to start decompression operation without startup delay.
99365 + *  Dictionary content is referenced, and therefore stays in dictBuffer.
99366 + *  It is important that dictBuffer outlives DDict,
99367 + *  it must remain read accessible throughout the lifetime of DDict */
99368 +ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
99370 +/*! ZSTD_DCtx_loadDictionary_byReference() :
99371 + *  Same as ZSTD_DCtx_loadDictionary(),
99372 + *  but references `dict` content instead of copying it into `dctx`.
99373 + *  This saves memory if `dict` remains around.,
99374 + *  However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression. */
99375 +ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
99377 +/*! ZSTD_DCtx_loadDictionary_advanced() :
99378 + *  Same as ZSTD_DCtx_loadDictionary(),
99379 + *  but gives direct control over
99380 + *  how to load the dictionary (by copy ? by reference ?)
99381 + *  and how to interpret it (automatic ? force raw mode ? full mode only ?). */
99382 +ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
99384 +/*! ZSTD_DCtx_refPrefix_advanced() :
99385 + *  Same as ZSTD_DCtx_refPrefix(), but gives finer control over
99386 + *  how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */
99387 +ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
99389 +/*! ZSTD_DCtx_setMaxWindowSize() :
99390 + *  Refuses allocating internal buffers for frames requiring a window size larger than provided limit.
99391 + *  This protects a decoder context from reserving too much memory for itself (potential attack scenario).
99392 + *  This parameter is only useful in streaming mode, since no internal buffer is allocated in single-pass mode.
99393 + *  By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT)
99394 + * @return : 0, or an error code (which can be tested using ZSTD_isError()).
99395 + */
99396 +ZSTDLIB_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize);
99398 +/*! ZSTD_DCtx_getParameter() :
99399 + *  Get the requested decompression parameter value, selected by enum ZSTD_dParameter,
99400 + *  and store it into int* value.
99401 + * @return : 0, or an error code (which can be tested with ZSTD_isError()).
99402 + */
99403 +ZSTDLIB_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value);
99405 +/* ZSTD_d_format
99406 + * experimental parameter,
99407 + * allowing selection between ZSTD_format_e input compression formats
99408 + */
99409 +#define ZSTD_d_format ZSTD_d_experimentalParam1
99410 +/* ZSTD_d_stableOutBuffer
99411 + * Experimental parameter.
99412 + * Default is 0 == disabled. Set to 1 to enable.
99413 + *
99414 + * Tells the decompressor that the ZSTD_outBuffer will ALWAYS be the same
99415 + * between calls, except for the modifications that zstd makes to pos (the
99416 + * caller must not modify pos). This is checked by the decompressor, and
99417 + * decompression will fail if it ever changes. Therefore the ZSTD_outBuffer
99418 + * MUST be large enough to fit the entire decompressed frame. This will be
99419 + * checked when the frame content size is known. The data in the ZSTD_outBuffer
99420 + * in the range [dst, dst + pos) MUST not be modified during decompression
99421 + * or you will get data corruption.
99422 + *
99423 + * When this flags is enabled zstd won't allocate an output buffer, because
99424 + * it can write directly to the ZSTD_outBuffer, but it will still allocate
99425 + * an input buffer large enough to fit any compressed block. This will also
99426 + * avoid the memcpy() from the internal output buffer to the ZSTD_outBuffer.
99427 + * If you need to avoid the input buffer allocation use the buffer-less
99428 + * streaming API.
99429 + *
99430 + * NOTE: So long as the ZSTD_outBuffer always points to valid memory, using
99431 + * this flag is ALWAYS memory safe, and will never access out-of-bounds
99432 + * memory. However, decompression WILL fail if you violate the preconditions.
99433 + *
99434 + * WARNING: The data in the ZSTD_outBuffer in the range [dst, dst + pos) MUST
99435 + * not be modified during decompression or you will get data corruption. This
99436 + * is because zstd needs to reference data in the ZSTD_outBuffer to regenerate
99437 + * matches. Normally zstd maintains its own buffer for this purpose, but passing
99438 + * this flag tells zstd to use the user provided buffer.
99439 + */
99440 +#define ZSTD_d_stableOutBuffer ZSTD_d_experimentalParam2
99442 +/* ZSTD_d_forceIgnoreChecksum
99443 + * Experimental parameter.
99444 + * Default is 0 == disabled. Set to 1 to enable
99445 + *
99446 + * Tells the decompressor to skip checksum validation during decompression, regardless
99447 + * of whether checksumming was specified during compression. This offers some
99448 + * slight performance benefits, and may be useful for debugging.
99449 + * Param has values of type ZSTD_forceIgnoreChecksum_e
99450 + */
99451 +#define ZSTD_d_forceIgnoreChecksum ZSTD_d_experimentalParam3
99453 +/* ZSTD_d_refMultipleDDicts
99454 + * Experimental parameter.
99455 + * Default is 0 == disabled. Set to 1 to enable
99456 + *
99457 + * If enabled and dctx is allocated on the heap, then additional memory will be allocated
99458 + * to store references to multiple ZSTD_DDict. That is, multiple calls of ZSTD_refDDict()
99459 + * using a given ZSTD_DCtx, rather than overwriting the previous DDict reference, will instead
99460 + * store all references. At decompression time, the appropriate dictID is selected
99461 + * from the set of DDicts based on the dictID in the frame.
99462 + *
99463 + * Usage is simply calling ZSTD_refDDict() on multiple dict buffers.
99464 + *
99465 + * Param has values of byte ZSTD_refMultipleDDicts_e
99466 + *
99467 + * WARNING: Enabling this parameter and calling ZSTD_DCtx_refDDict(), will trigger memory
99468 + * allocation for the hash table. ZSTD_freeDCtx() also frees this memory.
99469 + * Memory is allocated as per ZSTD_DCtx::customMem.
99470 + *
99471 + * Although this function allocates memory for the table, the user is still responsible for
99472 + * memory management of the underlying ZSTD_DDict* themselves.
99473 + */
99474 +#define ZSTD_d_refMultipleDDicts ZSTD_d_experimentalParam4
99477 +/*! ZSTD_DCtx_setFormat() :
99478 + *  Instruct the decoder context about what kind of data to decode next.
99479 + *  This instruction is mandatory to decode data without a fully-formed header,
99480 + *  such ZSTD_f_zstd1_magicless for example.
99481 + * @return : 0, or an error code (which can be tested using ZSTD_isError()). */
99482 +ZSTDLIB_API size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
99484 +/*! ZSTD_decompressStream_simpleArgs() :
99485 + *  Same as ZSTD_decompressStream(),
99486 + *  but using only integral types as arguments.
99487 + *  This can be helpful for binders from dynamic languages
99488 + *  which have troubles handling structures containing memory pointers.
99489 + */
99490 +ZSTDLIB_API size_t ZSTD_decompressStream_simpleArgs (
99491 +                            ZSTD_DCtx* dctx,
99492 +                            void* dst, size_t dstCapacity, size_t* dstPos,
99493 +                      const void* src, size_t srcSize, size_t* srcPos);
99496 +/********************************************************************
99497 +*  Advanced streaming functions
99498 +*  Warning : most of these functions are now redundant with the Advanced API.
99499 +*  Once Advanced API reaches "stable" status,
99500 +*  redundant functions will be deprecated, and then at some point removed.
99501 +********************************************************************/
99503 +/*=====   Advanced Streaming compression functions  =====*/
99505 +/*! ZSTD_initCStream_srcSize() :
99506 + * This function is deprecated, and equivalent to:
99507 + *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
99508 + *     ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
99509 + *     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
99510 + *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
99511 + *
99512 + * pledgedSrcSize must be correct. If it is not known at init time, use
99513 + * ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs,
99514 + * "0" also disables frame content size field. It may be enabled in the future.
99515 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
99516 + */
99517 +ZSTDLIB_API size_t
99518 +ZSTD_initCStream_srcSize(ZSTD_CStream* zcs,
99519 +                         int compressionLevel,
99520 +                         unsigned long long pledgedSrcSize);
99522 +/*! ZSTD_initCStream_usingDict() :
99523 + * This function is deprecated, and is equivalent to:
99524 + *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
99525 + *     ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
99526 + *     ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
99527 + *
99528 + * Creates of an internal CDict (incompatible with static CCtx), except if
99529 + * dict == NULL or dictSize < 8, in which case no dict is used.
99530 + * Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if
99531 + * it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.
99532 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
99533 + */
99534 +ZSTDLIB_API size_t
99535 +ZSTD_initCStream_usingDict(ZSTD_CStream* zcs,
99536 +                     const void* dict, size_t dictSize,
99537 +                           int compressionLevel);
99539 +/*! ZSTD_initCStream_advanced() :
99540 + * This function is deprecated, and is approximately equivalent to:
99541 + *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
99542 + *     // Pseudocode: Set each zstd parameter and leave the rest as-is.
99543 + *     for ((param, value) : params) {
99544 + *         ZSTD_CCtx_setParameter(zcs, param, value);
99545 + *     }
99546 + *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
99547 + *     ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
99548 + *
99549 + * dict is loaded with ZSTD_dct_auto and ZSTD_dlm_byCopy.
99550 + * pledgedSrcSize must be correct.
99551 + * If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
99552 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
99553 + */
99554 +ZSTDLIB_API size_t
99555 +ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
99556 +                    const void* dict, size_t dictSize,
99557 +                          ZSTD_parameters params,
99558 +                          unsigned long long pledgedSrcSize);
99560 +/*! ZSTD_initCStream_usingCDict() :
99561 + * This function is deprecated, and equivalent to:
99562 + *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
99563 + *     ZSTD_CCtx_refCDict(zcs, cdict);
99564 + *
99565 + * note : cdict will just be referenced, and must outlive compression session
99566 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
99567 + */
99568 +ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
99570 +/*! ZSTD_initCStream_usingCDict_advanced() :
99571 + *   This function is DEPRECATED, and is approximately equivalent to:
99572 + *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
99573 + *     // Pseudocode: Set each zstd frame parameter and leave the rest as-is.
99574 + *     for ((fParam, value) : fParams) {
99575 + *         ZSTD_CCtx_setParameter(zcs, fParam, value);
99576 + *     }
99577 + *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
99578 + *     ZSTD_CCtx_refCDict(zcs, cdict);
99579 + *
99580 + * same as ZSTD_initCStream_usingCDict(), with control over frame parameters.
99581 + * pledgedSrcSize must be correct. If srcSize is not known at init time, use
99582 + * value ZSTD_CONTENTSIZE_UNKNOWN.
99583 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
99584 + */
99585 +ZSTDLIB_API size_t
99586 +ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
99587 +                               const ZSTD_CDict* cdict,
99588 +                                     ZSTD_frameParameters fParams,
99589 +                                     unsigned long long pledgedSrcSize);
99591 +/*! ZSTD_resetCStream() :
99592 + * This function is deprecated, and is equivalent to:
99593 + *     ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
99594 + *     ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
99595 + *
99596 + *  start a new frame, using same parameters from previous frame.
99597 + *  This is typically useful to skip dictionary loading stage, since it will re-use it in-place.
99598 + *  Note that zcs must be init at least once before using ZSTD_resetCStream().
99599 + *  If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN.
99600 + *  If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end.
99601 + *  For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs,
99602 + *  but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead.
99603 + * @return : 0, or an error code (which can be tested using ZSTD_isError())
99604 + *  Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
99605 + */
99606 +ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
99609 +typedef struct {
99610 +    unsigned long long ingested;   /* nb input bytes read and buffered */
99611 +    unsigned long long consumed;   /* nb input bytes actually compressed */
99612 +    unsigned long long produced;   /* nb of compressed bytes generated and buffered */
99613 +    unsigned long long flushed;    /* nb of compressed bytes flushed : not provided; can be tracked from caller side */
99614 +    unsigned currentJobID;         /* MT only : latest started job nb */
99615 +    unsigned nbActiveWorkers;      /* MT only : nb of workers actively compressing at probe time */
99616 +} ZSTD_frameProgression;
99618 +/* ZSTD_getFrameProgression() :
99619 + * tells how much data has been ingested (read from input)
99620 + * consumed (input actually compressed) and produced (output) for current frame.
99621 + * Note : (ingested - consumed) is amount of input data buffered internally, not yet compressed.
99622 + * Aggregates progression inside active worker threads.
99623 + */
99624 +ZSTDLIB_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx);
99626 +/*! ZSTD_toFlushNow() :
99627 + *  Tell how many bytes are ready to be flushed immediately.
99628 + *  Useful for multithreading scenarios (nbWorkers >= 1).
99629 + *  Probe the oldest active job, defined as oldest job not yet entirely flushed,
99630 + *  and check its output buffer.
99631 + * @return : amount of data stored in oldest job and ready to be flushed immediately.
99632 + *  if @return == 0, it means either :
99633 + *  + there is no active job (could be checked with ZSTD_frameProgression()), or
99634 + *  + oldest job is still actively compressing data,
99635 + *    but everything it has produced has also been flushed so far,
99636 + *    therefore flush speed is limited by production speed of oldest job
99637 + *    irrespective of the speed of concurrent (and newer) jobs.
99638 + */
99639 +ZSTDLIB_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);
99642 +/*=====   Advanced Streaming decompression functions  =====*/
99644 +/*!
99645 + * This function is deprecated, and is equivalent to:
99646 + *
99647 + *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
99648 + *     ZSTD_DCtx_loadDictionary(zds, dict, dictSize);
99649 + *
99650 + * note: no dictionary will be used if dict == NULL or dictSize < 8
99651 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
99652 + */
99653 +ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
99655 +/*!
99656 + * This function is deprecated, and is equivalent to:
99657 + *
99658 + *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
99659 + *     ZSTD_DCtx_refDDict(zds, ddict);
99660 + *
99661 + * note : ddict is referenced, it must outlive decompression session
99662 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
99663 + */
99664 +ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
99666 +/*!
99667 + * This function is deprecated, and is equivalent to:
99668 + *
99669 + *     ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
99670 + *
99671 + * re-use decompression parameters from previous init; saves dictionary loading
99672 + * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
99673 + */
99674 +ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
99677 +/*********************************************************************
99678 +*  Buffer-less and synchronous inner streaming functions
99680 +*  This is an advanced API, giving full control over buffer management, for users which need direct control over memory.
99681 +*  But it's also a complex one, with several restrictions, documented below.
99682 +*  Prefer normal streaming API for an easier experience.
99683 +********************************************************************* */
99685 +/**
99686 +  Buffer-less streaming compression (synchronous mode)
99688 +  A ZSTD_CCtx object is required to track streaming operations.
99689 +  Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource.
99690 +  ZSTD_CCtx object can be re-used multiple times within successive compression operations.
99692 +  Start by initializing a context.
99693 +  Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression,
99694 +  or ZSTD_compressBegin_advanced(), for finer parameter control.
99695 +  It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx()
99697 +  Then, consume your input using ZSTD_compressContinue().
99698 +  There are some important considerations to keep in mind when using this advanced function :
99699 +  - ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only.
99700 +  - Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks.
99701 +  - Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario.
99702 +    Worst case evaluation is provided by ZSTD_compressBound().
99703 +    ZSTD_compressContinue() doesn't guarantee recover after a failed compression.
99704 +  - ZSTD_compressContinue() presumes prior input ***is still accessible and unmodified*** (up to maximum distance size, see WindowLog).
99705 +    It remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks)
99706 +  - ZSTD_compressContinue() detects that prior input has been overwritten when `src` buffer overlaps.
99707 +    In which case, it will "discard" the relevant memory section from its history.
99709 +  Finish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum.
99710 +  It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.
99711 +  Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.
99713 +  `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again.
99716 +/*=====   Buffer-less streaming compression functions  =====*/
99717 +ZSTDLIB_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
99718 +ZSTDLIB_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
99719 +ZSTDLIB_API size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */
99720 +ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */
99721 +ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize);   /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */
99722 +ZSTDLIB_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**<  note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
99724 +ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
99725 +ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
99728 +/**
99729 +  Buffer-less streaming decompression (synchronous mode)
99731 +  A ZSTD_DCtx object is required to track streaming operations.
99732 +  Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.
99733 +  A ZSTD_DCtx object can be re-used multiple times.
99735 +  First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().
99736 +  Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.
99737 +  Data fragment must be large enough to ensure successful decoding.
99738 + `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
99739 +  @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
99740 +           >0 : `srcSize` is too small, please provide at least @result bytes on next attempt.
99741 +           errorCode, which can be tested using ZSTD_isError().
99743 +  It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
99744 +  such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).
99745 +  Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.
99746 +  As a consequence, check that values remain within valid application range.
99747 +  For example, do not allocate memory blindly, check that `windowSize` is within expectation.
99748 +  Each application can set its own limits, depending on local restrictions.
99749 +  For extended interoperability, it is recommended to support `windowSize` of at least 8 MB.
99751 +  ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes.
99752 +  ZSTD_decompressContinue() is very sensitive to contiguity,
99753 +  if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,
99754 +  or that previous contiguous segment is large enough to properly handle maximum back-reference distance.
99755 +  There are multiple ways to guarantee this condition.
99757 +  The most memory efficient way is to use a round buffer of sufficient size.
99758 +  Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(),
99759 +  which can @return an error code if required value is too large for current system (in 32-bits mode).
99760 +  In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,
99761 +  up to the moment there is not enough room left in the buffer to guarantee decoding another full block,
99762 +  which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.
99763 +  At which point, decoding can resume from the beginning of the buffer.
99764 +  Note that already decoded data stored in the buffer should be flushed before being overwritten.
99766 +  There are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory.
99768 +  Finally, if you control the compression process, you can also ignore all buffer size rules,
99769 +  as long as the encoder and decoder progress in "lock-step",
99770 +  aka use exactly the same buffer sizes, break contiguity at the same place, etc.
99772 +  Once buffers are setup, start decompression, with ZSTD_decompressBegin().
99773 +  If decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict().
99775 +  Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.
99776 +  ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().
99777 +  ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.
99779 + @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
99780 +  It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.
99781 +  It can also be an error code, which can be tested with ZSTD_isError().
99783 +  A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
99784 +  Context can then be reset to start a new decompression.
99786 +  Note : it's possible to know if next input to present is a header or a block, using ZSTD_nextInputType().
99787 +  This information is not required to properly decode a frame.
99789 +  == Special case : skippable frames ==
99791 +  Skippable frames allow integration of user-defined data into a flow of concatenated frames.
99792 +  Skippable frames will be ignored (skipped) by decompressor.
99793 +  The format of skippable frames is as follows :
99794 +  a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F
99795 +  b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
99796 +  c) Frame Content - any content (User Data) of length equal to Frame Size
99797 +  For skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame.
99798 +  For skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content.
99801 +/*=====   Buffer-less streaming decompression functions  =====*/
99802 +typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e;
99803 +typedef struct {
99804 +    unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */
99805 +    unsigned long long windowSize;       /* can be very large, up to <= frameContentSize */
99806 +    unsigned blockSizeMax;
99807 +    ZSTD_frameType_e frameType;          /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
99808 +    unsigned headerSize;
99809 +    unsigned dictID;
99810 +    unsigned checksumFlag;
99811 +} ZSTD_frameHeader;
99813 +/*! ZSTD_getFrameHeader() :
99814 + *  decode Frame Header, or requires larger `srcSize`.
99815 + * @return : 0, `zfhPtr` is correctly filled,
99816 + *          >0, `srcSize` is too small, value is wanted `srcSize` amount,
99817 + *           or an error code, which can be tested using ZSTD_isError() */
99818 +ZSTDLIB_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize);   /**< doesn't consume input */
99819 +/*! ZSTD_getFrameHeader_advanced() :
99820 + *  same as ZSTD_getFrameHeader(),
99821 + *  with added capability to select a format (like ZSTD_f_zstd1_magicless) */
99822 +ZSTDLIB_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
99823 +ZSTDLIB_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize);  /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
99825 +ZSTDLIB_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
99826 +ZSTDLIB_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
99827 +ZSTDLIB_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
99829 +ZSTDLIB_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
99830 +ZSTDLIB_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
99832 +/* misc */
99833 +ZSTDLIB_API void   ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
99834 +typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
99835 +ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
99840 +/* ============================ */
99841 +/**       Block level API       */
99842 +/* ============================ */
99844 +/*!
99845 +    Block functions produce and decode raw zstd blocks, without frame metadata.
99846 +    Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes).
99847 +    But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes.
99849 +    A few rules to respect :
99850 +    - Compressing and decompressing require a context structure
99851 +      + Use ZSTD_createCCtx() and ZSTD_createDCtx()
99852 +    - It is necessary to init context before starting
99853 +      + compression : any ZSTD_compressBegin*() variant, including with dictionary
99854 +      + decompression : any ZSTD_decompressBegin*() variant, including with dictionary
99855 +      + copyCCtx() and copyDCtx() can be used too
99856 +    - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
99857 +      + If input is larger than a block size, it's necessary to split input data into multiple blocks
99858 +      + For inputs larger than a single block, consider using regular ZSTD_compress() instead.
99859 +        Frame metadata is not that costly, and quickly becomes negligible as source size grows larger than a block.
99860 +    - When a block is considered not compressible enough, ZSTD_compressBlock() result will be 0 (zero) !
99861 +      ===> In which case, nothing is produced into `dst` !
99862 +      + User __must__ test for such outcome and deal directly with uncompressed data
99863 +      + A block cannot be declared incompressible if ZSTD_compressBlock() return value was != 0.
99864 +        Doing so would mess up with statistics history, leading to potential data corruption.
99865 +      + ZSTD_decompressBlock() _doesn't accept uncompressed data as input_ !!
99866 +      + In case of multiple successive blocks, should some of them be uncompressed,
99867 +        decoder must be informed of their existence in order to follow proper history.
99868 +        Use ZSTD_insertBlock() for such a case.
99871 +/*=====   Raw zstd block functions  =====*/
99872 +ZSTDLIB_API size_t ZSTD_getBlockSize   (const ZSTD_CCtx* cctx);
99873 +ZSTDLIB_API size_t ZSTD_compressBlock  (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
99874 +ZSTDLIB_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
99875 +ZSTDLIB_API size_t ZSTD_insertBlock    (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize);  /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */
99878 +#endif   /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */
99879 diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
99880 index 167ca8c8424f..2fe4019b749f 100644
99881 --- a/include/media/v4l2-ctrls.h
99882 +++ b/include/media/v4l2-ctrls.h
99883 @@ -301,12 +301,14 @@ struct v4l2_ctrl {
99884   *             the control has been applied. This prevents applying controls
99885   *             from a cluster with multiple controls twice (when the first
99886   *             control of a cluster is applied, they all are).
99887 - * @req:       If set, this refers to another request that sets this control.
99888 + * @valid_p_req: If set, then p_req contains the control value for the request.
99889   * @p_req:     If the control handler containing this control reference
99890   *             is bound to a media request, then this points to the
99891 - *             value of the control that should be applied when the request
99892 + *             value of the control that must be applied when the request
99893   *             is executed, or to the value of the control at the time
99894 - *             that the request was completed.
99895 + *             that the request was completed. If @valid_p_req is false,
99896 + *             then this control was never set for this request and the
99897 + *             control will not be updated when this request is applied.
99898   *
99899   * Each control handler has a list of these refs. The list_head is used to
99900   * keep a sorted-by-control-ID list of all controls, while the next pointer
99901 @@ -319,7 +321,7 @@ struct v4l2_ctrl_ref {
99902         struct v4l2_ctrl_helper *helper;
99903         bool from_other_dev;
99904         bool req_done;
99905 -       struct v4l2_ctrl_ref *req;
99906 +       bool valid_p_req;
99907         union v4l2_ctrl_ptr p_req;
99908  };
99910 @@ -346,7 +348,7 @@ struct v4l2_ctrl_ref {
99911   * @error:     The error code of the first failed control addition.
99912   * @request_is_queued: True if the request was queued.
99913   * @requests:  List to keep track of open control handler request objects.
99914 - *             For the parent control handler (@req_obj.req == NULL) this
99915 + *             For the parent control handler (@req_obj.ops == NULL) this
99916   *             is the list header. When the parent control handler is
99917   *             removed, it has to unbind and put all these requests since
99918   *             they refer to the parent.
99919 diff --git a/include/net/addrconf.h b/include/net/addrconf.h
99920 index 18f783dcd55f..78ea3e332688 100644
99921 --- a/include/net/addrconf.h
99922 +++ b/include/net/addrconf.h
99923 @@ -233,7 +233,6 @@ void ipv6_mc_unmap(struct inet6_dev *idev);
99924  void ipv6_mc_remap(struct inet6_dev *idev);
99925  void ipv6_mc_init_dev(struct inet6_dev *idev);
99926  void ipv6_mc_destroy_dev(struct inet6_dev *idev);
99927 -int ipv6_mc_check_icmpv6(struct sk_buff *skb);
99928  int ipv6_mc_check_mld(struct sk_buff *skb);
99929  void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp);
99931 diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
99932 index ebdd4afe30d2..ca4ac6603b9a 100644
99933 --- a/include/net/bluetooth/hci_core.h
99934 +++ b/include/net/bluetooth/hci_core.h
99935 @@ -704,6 +704,7 @@ struct hci_chan {
99936         struct sk_buff_head data_q;
99937         unsigned int    sent;
99938         __u8            state;
99939 +       bool            amp;
99940  };
99942  struct hci_conn_params {
99943 diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
99944 index 3c8c59471bc1..2cdc5a0709fe 100644
99945 --- a/include/net/inet_connection_sock.h
99946 +++ b/include/net/inet_connection_sock.h
99947 @@ -134,8 +134,9 @@ struct inet_connection_sock {
99948         u32                       icsk_probes_tstamp;
99949         u32                       icsk_user_timeout;
99951 -       u64                       icsk_ca_priv[104 / sizeof(u64)];
99952 -#define ICSK_CA_PRIV_SIZE      (13 * sizeof(u64))
99953 +/* XXX inflated by temporary internal debugging info */
99954 +#define ICSK_CA_PRIV_SIZE      (216)
99955 +       u64                       icsk_ca_priv[ICSK_CA_PRIV_SIZE / sizeof(u64)];
99956  };
99958  #define ICSK_TIME_RETRANS      1       /* Retransmit timer */
99959 diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h
99960 index 1d34fe154fe0..434a6158852f 100644
99961 --- a/include/net/netfilter/nf_tables_offload.h
99962 +++ b/include/net/netfilter/nf_tables_offload.h
99963 @@ -4,11 +4,16 @@
99964  #include <net/flow_offload.h>
99965  #include <net/netfilter/nf_tables.h>
99967 +enum nft_offload_reg_flags {
99968 +       NFT_OFFLOAD_F_NETWORK2HOST      = (1 << 0),
99971  struct nft_offload_reg {
99972         u32             key;
99973         u32             len;
99974         u32             base_offset;
99975         u32             offset;
99976 +       u32             flags;
99977         struct nft_data data;
99978         struct nft_data mask;
99979  };
99980 @@ -45,6 +50,7 @@ struct nft_flow_key {
99981         struct flow_dissector_key_ports                 tp;
99982         struct flow_dissector_key_ip                    ip;
99983         struct flow_dissector_key_vlan                  vlan;
99984 +       struct flow_dissector_key_vlan                  cvlan;
99985         struct flow_dissector_key_eth_addrs             eth_addrs;
99986         struct flow_dissector_key_meta                  meta;
99987  } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
99988 @@ -71,13 +77,17 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net, const struct nft_rul
99989  void nft_flow_rule_destroy(struct nft_flow_rule *flow);
99990  int nft_flow_rule_offload_commit(struct net *net);
99992 -#define NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg)                \
99993 +#define NFT_OFFLOAD_MATCH_FLAGS(__key, __base, __field, __len, __reg, __flags) \
99994         (__reg)->base_offset    =                                       \
99995                 offsetof(struct nft_flow_key, __base);                  \
99996         (__reg)->offset         =                                       \
99997                 offsetof(struct nft_flow_key, __base.__field);          \
99998         (__reg)->len            = __len;                                \
99999         (__reg)->key            = __key;                                \
100000 +       (__reg)->flags          = __flags;
100002 +#define NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg)                \
100003 +       NFT_OFFLOAD_MATCH_FLAGS(__key, __base, __field, __len, __reg, 0)
100005  #define NFT_OFFLOAD_MATCH_EXACT(__key, __base, __field, __len, __reg)  \
100006         NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg)         \
100007 diff --git a/include/net/page_pool.h b/include/net/page_pool.h
100008 index b5b195305346..e05744b9a1bc 100644
100009 --- a/include/net/page_pool.h
100010 +++ b/include/net/page_pool.h
100011 @@ -198,7 +198,17 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
100013  static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
100015 -       return page->dma_addr;
100016 +       dma_addr_t ret = page->dma_addr[0];
100017 +       if (sizeof(dma_addr_t) > sizeof(unsigned long))
100018 +               ret |= (dma_addr_t)page->dma_addr[1] << 16 << 16;
100019 +       return ret;
100022 +static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
100024 +       page->dma_addr[0] = addr;
100025 +       if (sizeof(dma_addr_t) > sizeof(unsigned long))
100026 +               page->dma_addr[1] = upper_32_bits(addr);
100029  static inline bool is_page_pool_compiled_in(void)
100030 diff --git a/include/net/tcp.h b/include/net/tcp.h
100031 index 963cd86d12dd..5a86fa1d2ff1 100644
100032 --- a/include/net/tcp.h
100033 +++ b/include/net/tcp.h
100034 @@ -799,6 +799,11 @@ static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
100035         return max_t(s64, t1 - t0, 0);
100038 +static inline u32 tcp_stamp32_us_delta(u32 t1, u32 t0)
100040 +       return max_t(s32, t1 - t0, 0);
100043  static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
100045         return tcp_ns_to_ts(skb->skb_mstamp_ns);
100046 @@ -866,16 +871,22 @@ struct tcp_skb_cb {
100047         __u32           ack_seq;        /* Sequence number ACK'd        */
100048         union {
100049                 struct {
100050 +#define TCPCB_DELIVERED_CE_MASK ((1U<<20) - 1)
100051                         /* There is space for up to 24 bytes */
100052 -                       __u32 in_flight:30,/* Bytes in flight at transmit */
100053 -                             is_app_limited:1, /* cwnd not fully used? */
100054 -                             unused:1;
100055 +                       __u32 is_app_limited:1, /* cwnd not fully used? */
100056 +                             delivered_ce:20,
100057 +                             unused:11;
100058                         /* pkts S/ACKed so far upon tx of skb, incl retrans: */
100059                         __u32 delivered;
100060                         /* start of send pipeline phase */
100061 -                       u64 first_tx_mstamp;
100062 +                       u32 first_tx_mstamp;
100063                         /* when we reached the "delivered" count */
100064 -                       u64 delivered_mstamp;
100065 +                       u32 delivered_mstamp;
100066 +#define TCPCB_IN_FLIGHT_BITS 20
100067 +#define TCPCB_IN_FLIGHT_MAX ((1U << TCPCB_IN_FLIGHT_BITS) - 1)
100068 +                       u32 in_flight:20,   /* packets in flight at transmit */
100069 +                           unused2:12;
100070 +                       u32 lost;       /* packets lost so far upon tx of skb */
100071                 } tx;   /* only used for outgoing skbs */
100072                 union {
100073                         struct inet_skb_parm    h4;
100074 @@ -1025,7 +1036,11 @@ enum tcp_ca_ack_event_flags {
100075  #define TCP_CONG_NON_RESTRICTED 0x1
100076  /* Requires ECN/ECT set on all packets */
100077  #define TCP_CONG_NEEDS_ECN     0x2
100078 -#define TCP_CONG_MASK  (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN)
100079 +/* Wants notification of CE events (CA_EVENT_ECN_IS_CE, CA_EVENT_ECN_NO_CE). */
100080 +#define TCP_CONG_WANTS_CE_EVENTS       0x4
100081 +#define TCP_CONG_MASK  (TCP_CONG_NON_RESTRICTED | \
100082 +                        TCP_CONG_NEEDS_ECN | \
100083 +                        TCP_CONG_WANTS_CE_EVENTS)
100085  union tcp_cc_info;
100087 @@ -1045,8 +1060,13 @@ struct ack_sample {
100088   */
100089  struct rate_sample {
100090         u64  prior_mstamp; /* starting timestamp for interval */
100091 +       u32  prior_lost;        /* tp->lost at "prior_mstamp" */
100092         u32  prior_delivered;   /* tp->delivered at "prior_mstamp" */
100093 +       u32  prior_delivered_ce;/* tp->delivered_ce at "prior_mstamp" */
100094 +       u32 tx_in_flight;       /* packets in flight at starting timestamp */
100095 +       s32  lost;              /* number of packets lost over interval */
100096         s32  delivered;         /* number of packets delivered over interval */
100097 +       s32  delivered_ce;      /* packets delivered w/ CE mark over interval */
100098         long interval_us;       /* time for tp->delivered to incr "delivered" */
100099         u32 snd_interval_us;    /* snd interval for delivered packets */
100100         u32 rcv_interval_us;    /* rcv interval for delivered packets */
100101 @@ -1057,6 +1077,7 @@ struct rate_sample {
100102         bool is_app_limited;    /* is sample from packet with bubble in pipe? */
100103         bool is_retrans;        /* is sample from retransmission? */
100104         bool is_ack_delayed;    /* is this (likely) a delayed ACK? */
100105 +       bool is_ece;            /* did this ACK have ECN marked? */
100108  struct tcp_congestion_ops {
100109 @@ -1083,10 +1104,12 @@ struct tcp_congestion_ops {
100110         u32  (*undo_cwnd)(struct sock *sk);
100111         /* hook for packet ack accounting (optional) */
100112         void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
100113 -       /* override sysctl_tcp_min_tso_segs */
100114 -       u32 (*min_tso_segs)(struct sock *sk);
100115 +       /* pick target number of segments per TSO/GSO skb (optional): */
100116 +       u32 (*tso_segs)(struct sock *sk, unsigned int mss_now);
100117         /* returns the multiplier used in tcp_sndbuf_expand (optional) */
100118         u32 (*sndbuf_expand)(struct sock *sk);
100119 +       /* react to a specific lost skb (optional) */
100120 +       void (*skb_marked_lost)(struct sock *sk, const struct sk_buff *skb);
100121         /* call when packets are delivered to update cwnd and pacing rate,
100122          * after all the ca_state processing. (optional)
100123          */
100124 @@ -1132,6 +1155,14 @@ static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
100126  #endif
100128 +static inline bool tcp_ca_wants_ce_events(const struct sock *sk)
100130 +       const struct inet_connection_sock *icsk = inet_csk(sk);
100132 +       return icsk->icsk_ca_ops->flags & (TCP_CONG_NEEDS_ECN |
100133 +                                          TCP_CONG_WANTS_CE_EVENTS);
100136  static inline bool tcp_ca_needs_ecn(const struct sock *sk)
100138         const struct inet_connection_sock *icsk = inet_csk(sk);
100139 @@ -1157,6 +1188,7 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
100142  /* From tcp_rate.c */
100143 +void tcp_set_tx_in_flight(struct sock *sk, struct sk_buff *skb);
100144  void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
100145  void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
100146                             struct rate_sample *rs);
100147 diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
100148 index 2568cb0627ec..fac8e89aed81 100644
100149 --- a/include/scsi/libfcoe.h
100150 +++ b/include/scsi/libfcoe.h
100151 @@ -249,7 +249,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_lport *,
100152                          struct fc_frame *);
100154  /* libfcoe funcs */
100155 -u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int);
100156 +u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], unsigned int, unsigned int);
100157  int fcoe_libfc_config(struct fc_lport *, struct fcoe_ctlr *,
100158                       const struct libfc_function_template *, int init_fcp);
100159  u32 fcoe_fc_crc(struct fc_frame *fp);
100160 diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
100161 index 036eb1f5c133..2f01314de73a 100644
100162 --- a/include/trace/events/sunrpc.h
100163 +++ b/include/trace/events/sunrpc.h
100164 @@ -1141,7 +1141,6 @@ DECLARE_EVENT_CLASS(xprt_writelock_event,
100166  DEFINE_WRITELOCK_EVENT(reserve_xprt);
100167  DEFINE_WRITELOCK_EVENT(release_xprt);
100168 -DEFINE_WRITELOCK_EVENT(transmit_queued);
100170  DECLARE_EVENT_CLASS(xprt_cong_event,
100171         TP_PROTO(
100172 diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
100173 index ce58cff99b66..2778da551846 100644
100174 --- a/include/uapi/asm-generic/unistd.h
100175 +++ b/include/uapi/asm-generic/unistd.h
100176 @@ -864,8 +864,20 @@ __SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2)
100177  #define __NR_mount_setattr 442
100178  __SYSCALL(__NR_mount_setattr, sys_mount_setattr)
100180 +#define __NR_futex_wait 443
100181 +__SYSCALL(__NR_futex_wait, sys_futex_wait)
100183 +#define __NR_futex_wake 444
100184 +__SYSCALL(__NR_futex_wake, sys_futex_wake)
100186 +#define __NR_futex_waitv 445
100187 +__SC_COMP(__NR_futex_waitv, sys_futex_waitv, compat_sys_futex_waitv)
100189 +#define __NR_futex_requeue 446
100190 +__SC_COMP(__NR_futex_requeue, sys_futex_requeue, compat_sys_futex_requeue)
100192  #undef __NR_syscalls
100193 -#define __NR_syscalls 443
100194 +#define __NR_syscalls 447
100197   * 32 bit systems traditionally used different
100198 diff --git a/include/uapi/linux/futex.h b/include/uapi/linux/futex.h
100199 index a89eb0accd5e..afc3245e5728 100644
100200 --- a/include/uapi/linux/futex.h
100201 +++ b/include/uapi/linux/futex.h
100202 @@ -21,6 +21,7 @@
100203  #define FUTEX_WAKE_BITSET      10
100204  #define FUTEX_WAIT_REQUEUE_PI  11
100205  #define FUTEX_CMP_REQUEUE_PI   12
100206 +#define FUTEX_WAIT_MULTIPLE    31
100208  #define FUTEX_PRIVATE_FLAG     128
100209  #define FUTEX_CLOCK_REALTIME   256
100210 @@ -40,6 +41,39 @@
100211                                          FUTEX_PRIVATE_FLAG)
100212  #define FUTEX_CMP_REQUEUE_PI_PRIVATE   (FUTEX_CMP_REQUEUE_PI | \
100213                                          FUTEX_PRIVATE_FLAG)
100214 +#define FUTEX_WAIT_MULTIPLE_PRIVATE    (FUTEX_WAIT_MULTIPLE | \
100215 +                                        FUTEX_PRIVATE_FLAG)
100217 +/* Size argument to futex2 syscall */
100218 +#define FUTEX_32       2
100220 +#define FUTEX_SIZE_MASK        0x3
100222 +#define FUTEX_SHARED_FLAG 8
100224 +#define FUTEX_WAITV_MAX 128
100227 + * struct futex_waitv - A waiter for vectorized wait
100228 + * @uaddr: User address to wait on
100229 + * @val:   Expected value at uaddr
100230 + * @flags: Flags for this waiter
100231 + */
100232 +struct futex_waitv {
100233 +       void __user *uaddr;
100234 +       unsigned int val;
100235 +       unsigned int flags;
100239 + * struct futex_requeue - Define an address and its flags for requeue operation
100240 + * @uaddr: User address of one of the requeue arguments
100241 + * @flags: Flags for this address
100242 + */
100243 +struct futex_requeue {
100244 +       void __user *uaddr;
100245 +       unsigned int flags;
100249   * Support for robust futexes: the kernel cleans up held futexes at
100250 @@ -150,4 +184,21 @@ struct robust_list_head {
100251    (((op & 0xf) << 28) | ((cmp & 0xf) << 24)            \
100252     | ((oparg & 0xfff) << 12) | (cmparg & 0xfff))
100255 + * Maximum number of multiple futexes to wait for
100256 + */
100257 +#define FUTEX_MULTIPLE_MAX_COUNT       128
100260 + * struct futex_wait_block - Block of futexes to be waited for
100261 + * @uaddr:     User address of the futex
100262 + * @val:       Futex value expected by userspace
100263 + * @bitset:    Bitset for the optional bitmasked wakeup
100264 + */
100265 +struct futex_wait_block {
100266 +       __u32 __user *uaddr;
100267 +       __u32 val;
100268 +       __u32 bitset;
100271  #endif /* _UAPI_LINUX_FUTEX_H */
100272 diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
100273 index 20ee93f0f876..96d52dd9c48a 100644
100274 --- a/include/uapi/linux/inet_diag.h
100275 +++ b/include/uapi/linux/inet_diag.h
100276 @@ -231,9 +231,42 @@ struct tcp_bbr_info {
100277         __u32   bbr_cwnd_gain;          /* cwnd gain shifted left 8 bits */
100280 +/* Phase as reported in netlink/ss stats. */
100281 +enum tcp_bbr2_phase {
100282 +       BBR2_PHASE_INVALID              = 0,
100283 +       BBR2_PHASE_STARTUP              = 1,
100284 +       BBR2_PHASE_DRAIN                = 2,
100285 +       BBR2_PHASE_PROBE_RTT            = 3,
100286 +       BBR2_PHASE_PROBE_BW_UP          = 4,
100287 +       BBR2_PHASE_PROBE_BW_DOWN        = 5,
100288 +       BBR2_PHASE_PROBE_BW_CRUISE      = 6,
100289 +       BBR2_PHASE_PROBE_BW_REFILL      = 7
100292 +struct tcp_bbr2_info {
100293 +       /* u64 bw: bandwidth (app throughput) estimate in Byte per sec: */
100294 +       __u32   bbr_bw_lsb;             /* lower 32 bits of bw */
100295 +       __u32   bbr_bw_msb;             /* upper 32 bits of bw */
100296 +       __u32   bbr_min_rtt;            /* min-filtered RTT in uSec */
100297 +       __u32   bbr_pacing_gain;        /* pacing gain shifted left 8 bits */
100298 +       __u32   bbr_cwnd_gain;          /* cwnd gain shifted left 8 bits */
100299 +       __u32   bbr_bw_hi_lsb;          /* lower 32 bits of bw_hi */
100300 +       __u32   bbr_bw_hi_msb;          /* upper 32 bits of bw_hi */
100301 +       __u32   bbr_bw_lo_lsb;          /* lower 32 bits of bw_lo */
100302 +       __u32   bbr_bw_lo_msb;          /* upper 32 bits of bw_lo */
100303 +       __u8    bbr_mode;               /* current bbr_mode in state machine */
100304 +       __u8    bbr_phase;              /* current state machine phase */
100305 +       __u8    unused1;                /* alignment padding; not used yet */
100306 +       __u8    bbr_version;            /* MUST be at this offset in struct */
100307 +       __u32   bbr_inflight_lo;        /* lower/short-term data volume bound */
100308 +       __u32   bbr_inflight_hi;        /* higher/long-term data volume bound */
100309 +       __u32   bbr_extra_acked;        /* max excess packets ACKed in epoch */
100312  union tcp_cc_info {
100313         struct tcpvegas_info    vegas;
100314         struct tcp_dctcp_info   dctcp;
100315         struct tcp_bbr_info     bbr;
100316 +       struct tcp_bbr2_info    bbr2;
100318  #endif /* _UAPI_INET_DIAG_H_ */
100319 diff --git a/include/uapi/linux/netfilter/xt_SECMARK.h b/include/uapi/linux/netfilter/xt_SECMARK.h
100320 index 1f2a708413f5..beb2cadba8a9 100644
100321 --- a/include/uapi/linux/netfilter/xt_SECMARK.h
100322 +++ b/include/uapi/linux/netfilter/xt_SECMARK.h
100323 @@ -20,4 +20,10 @@ struct xt_secmark_target_info {
100324         char secctx[SECMARK_SECCTX_MAX];
100327 +struct xt_secmark_target_info_v1 {
100328 +       __u8 mode;
100329 +       char secctx[SECMARK_SECCTX_MAX];
100330 +       __u32 secid;
100333  #endif /*_XT_SECMARK_H_target */
100334 diff --git a/include/uapi/linux/tty_flags.h b/include/uapi/linux/tty_flags.h
100335 index 900a32e63424..6a3ac496a56c 100644
100336 --- a/include/uapi/linux/tty_flags.h
100337 +++ b/include/uapi/linux/tty_flags.h
100338 @@ -39,7 +39,7 @@
100339   * WARNING: These flags are no longer used and have been superceded by the
100340   *         TTY_PORT_ flags in the iflags field (and not userspace-visible)
100341   */
100342 -#ifndef _KERNEL_
100343 +#ifndef __KERNEL__
100344  #define ASYNCB_INITIALIZED     31 /* Serial port was initialized */
100345  #define ASYNCB_SUSPENDED       30 /* Serial port is suspended */
100346  #define ASYNCB_NORMAL_ACTIVE   29 /* Normal device is active */
100347 @@ -81,7 +81,7 @@
100348  #define ASYNC_SPD_WARP         (ASYNC_SPD_HI|ASYNC_SPD_SHI)
100349  #define ASYNC_SPD_MASK         (ASYNC_SPD_HI|ASYNC_SPD_VHI|ASYNC_SPD_SHI)
100351 -#ifndef _KERNEL_
100352 +#ifndef __KERNEL__
100353  /* These flags are no longer used (and were always masked from userspace) */
100354  #define ASYNC_INITIALIZED      (1U << ASYNCB_INITIALIZED)
100355  #define ASYNC_NORMAL_ACTIVE    (1U << ASYNCB_NORMAL_ACTIVE)
100356 diff --git a/include/uapi/linux/usb/video.h b/include/uapi/linux/usb/video.h
100357 index d854cb19c42c..bfdae12cdacf 100644
100358 --- a/include/uapi/linux/usb/video.h
100359 +++ b/include/uapi/linux/usb/video.h
100360 @@ -302,9 +302,10 @@ struct uvc_processing_unit_descriptor {
100361         __u8   bControlSize;
100362         __u8   bmControls[2];
100363         __u8   iProcessing;
100364 +       __u8   bmVideoStandards;
100365  } __attribute__((__packed__));
100367 -#define UVC_DT_PROCESSING_UNIT_SIZE(n)                 (9+(n))
100368 +#define UVC_DT_PROCESSING_UNIT_SIZE(n)                 (10+(n))
100370  /* 3.7.2.6. Extension Unit Descriptor */
100371  struct uvc_extension_unit_descriptor {
100372 diff --git a/init/Kconfig b/init/Kconfig
100373 index 5f5c776ef192..f49c69d8a8b0 100644
100374 --- a/init/Kconfig
100375 +++ b/init/Kconfig
100376 @@ -830,6 +830,17 @@ config UCLAMP_BUCKETS_COUNT
100378  endmenu
100380 +config CACULE_SCHED
100381 +       bool "CacULE CPU scheduler"
100382 +       default y
100383 +       help
100384 +         The CacULE CPU scheduler is based on interactivity score mechanism.
100385 +         The interactivity score is inspired by the ULE scheduler (FreeBSD
100386 +         scheduler).
100388 +         If unsure, say Y here.
100392  # For architectures that want to enable the support for NUMA-affine scheduler
100393  # balancing logic:
100394 @@ -1220,6 +1231,18 @@ config SCHED_AUTOGROUP
100395           desktop applications.  Task group autogeneration is currently based
100396           upon task session.
100398 +config SCHED_AUTOGROUP_DEFAULT_ENABLED
100399 +       bool "Enable automatic process group scheduling feature"
100400 +       default y
100401 +       depends on SCHED_AUTOGROUP
100402 +       help
100403 +         If set, automatic process group scheduling will be enabled per
100404 +         default but can be disabled through passing autogroup=0 on the
100405 +         kernel commandline during boot or a value of 0 via the file
100406 +         proc/sys/kernel/sched_autogroup_enabled.
100408 +         If unsure say Y.
100410  config SYSFS_DEPRECATED
100411         bool "Enable deprecated sysfs features to support old userspace tools"
100412         depends on SYSFS
100413 @@ -1316,7 +1339,6 @@ config CC_OPTIMIZE_FOR_PERFORMANCE
100415  config CC_OPTIMIZE_FOR_PERFORMANCE_O3
100416         bool "Optimize more for performance (-O3)"
100417 -       depends on ARC
100418         help
100419           Choosing this option will pass "-O3" to your compiler to optimize
100420           the kernel yet more for performance.
100421 @@ -1537,6 +1559,13 @@ config FUTEX
100422           support for "fast userspace mutexes".  The resulting kernel may not
100423           run glibc-based applications correctly.
100425 +config FUTEX2
100426 +       bool "Enable futex2 support" if EXPERT
100427 +       depends on FUTEX
100428 +       default y
100429 +       help
100430 +         Support for futex2 interface.
100432  config FUTEX_PI
100433         bool
100434         depends on FUTEX && RT_MUTEXES
100435 @@ -2217,8 +2246,8 @@ config MODULE_COMPRESS
100436         bool "Compress modules on installation"
100437         help
100439 -         Compresses kernel modules when 'make modules_install' is run; gzip or
100440 -         xz depending on "Compression algorithm" below.
100441 +         Compresses kernel modules when 'make modules_install' is run; gzip,
100442 +         xz, or zstd depending on "Compression algorithm" below.
100444           module-init-tools MAY support gzip, and kmod MAY support gzip and xz.
100446 @@ -2240,7 +2269,7 @@ choice
100447           This determines which sort of compression will be used during
100448           'make modules_install'.
100450 -         GZIP (default) and XZ are supported.
100451 +         GZIP (default), XZ, and ZSTD are supported.
100453  config MODULE_COMPRESS_GZIP
100454         bool "GZIP"
100455 @@ -2248,6 +2277,9 @@ config MODULE_COMPRESS_GZIP
100456  config MODULE_COMPRESS_XZ
100457         bool "XZ"
100459 +config MODULE_COMPRESS_ZSTD
100460 +       bool "ZSTD"
100462  endchoice
100464  config MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS
100465 diff --git a/init/init_task.c b/init/init_task.c
100466 index 3711cdaafed2..8b08c2e19cbb 100644
100467 --- a/init/init_task.c
100468 +++ b/init/init_task.c
100469 @@ -210,7 +210,7 @@ struct task_struct init_task
100470  #ifdef CONFIG_SECURITY
100471         .security       = NULL,
100472  #endif
100473 -#ifdef CONFIG_SECCOMP
100474 +#ifdef CONFIG_SECCOMP_FILTER
100475         .seccomp        = { .filter_count = ATOMIC_INIT(0) },
100476  #endif
100478 diff --git a/ipc/namespace.c b/ipc/namespace.c
100479 index 7bd0766ddc3b..2bb05b2dacd1 100644
100480 --- a/ipc/namespace.c
100481 +++ b/ipc/namespace.c
100482 @@ -172,6 +172,23 @@ void put_ipc_ns(struct ipc_namespace *ns)
100483                         schedule_work(&free_ipc_work);
100484         }
100486 +EXPORT_SYMBOL(put_ipc_ns);
100488 +struct ipc_namespace *get_ipc_ns_exported(struct ipc_namespace *ns)
100490 +       return get_ipc_ns(ns);
100492 +EXPORT_SYMBOL(get_ipc_ns_exported);
100494 +struct ipc_namespace *show_init_ipc_ns(void)
100496 +#if defined(CONFIG_IPC_NS)
100497 +       return &init_ipc_ns;
100498 +#else
100499 +       return NULL;
100500 +#endif
100502 +EXPORT_SYMBOL(show_init_ipc_ns);
100504  static inline struct ipc_namespace *to_ipc_ns(struct ns_common *ns)
100506 diff --git a/kernel/.gitignore b/kernel/.gitignore
100507 index 78701ea37c97..5518835ac35c 100644
100508 --- a/kernel/.gitignore
100509 +++ b/kernel/.gitignore
100510 @@ -1,4 +1,5 @@
100511  # SPDX-License-Identifier: GPL-2.0-only
100512 +/config_data
100513  kheaders.md5
100514  timeconst.h
100515  hz.bc
100516 diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
100517 index 38ef6d06888e..b4a1995149d0 100644
100518 --- a/kernel/Kconfig.hz
100519 +++ b/kernel/Kconfig.hz
100520 @@ -5,7 +5,7 @@
100522  choice
100523         prompt "Timer frequency"
100524 -       default HZ_250
100525 +       default HZ_500
100526         help
100527          Allows the configuration of the timer frequency. It is customary
100528          to have the timer interrupt run at 1000 Hz but 100 Hz may be more
100529 @@ -40,6 +40,13 @@ choice
100530          on SMP and NUMA systems and exactly dividing by both PAL and
100531          NTSC frame rates for video and multimedia work.
100533 +       config HZ_500
100534 +               bool "500 HZ"
100535 +       help
100536 +        500 Hz is a balanced timer frequency. Provides fast interactivity
100537 +        on desktops with great smoothness without increasing CPU power
100538 +        consumption and sacrificing the battery life on laptops.
100540         config HZ_1000
100541                 bool "1000 HZ"
100542         help
100543 @@ -53,6 +60,7 @@ config HZ
100544         default 100 if HZ_100
100545         default 250 if HZ_250
100546         default 300 if HZ_300
100547 +       default 500 if HZ_500
100548         default 1000 if HZ_1000
100550  config SCHED_HRTICK
100551 diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
100552 index 416017301660..293725c44cbb 100644
100553 --- a/kernel/Kconfig.preempt
100554 +++ b/kernel/Kconfig.preempt
100555 @@ -2,7 +2,7 @@
100557  choice
100558         prompt "Preemption Model"
100559 -       default PREEMPT_NONE
100560 +       default PREEMPT
100562  config PREEMPT_NONE
100563         bool "No Forced Preemption (Server)"
100564 diff --git a/kernel/Makefile b/kernel/Makefile
100565 index 320f1f3941b7..caf7fca27b62 100644
100566 --- a/kernel/Makefile
100567 +++ b/kernel/Makefile
100568 @@ -57,6 +57,7 @@ obj-$(CONFIG_PROFILING) += profile.o
100569  obj-$(CONFIG_STACKTRACE) += stacktrace.o
100570  obj-y += time/
100571  obj-$(CONFIG_FUTEX) += futex.o
100572 +obj-$(CONFIG_FUTEX2) += futex2.o
100573  obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
100574  obj-$(CONFIG_SMP) += smp.o
100575  ifneq ($(CONFIG_SMP),y)
100576 @@ -138,10 +139,15 @@ obj-$(CONFIG_SCF_TORTURE_TEST) += scftorture.o
100578  $(obj)/configs.o: $(obj)/config_data.gz
100580 -targets += config_data.gz
100581 -$(obj)/config_data.gz: $(KCONFIG_CONFIG) FORCE
100582 +targets += config_data config_data.gz
100583 +$(obj)/config_data.gz: $(obj)/config_data FORCE
100584         $(call if_changed,gzip)
100586 +filechk_cat = cat $<
100588 +$(obj)/config_data: $(KCONFIG_CONFIG) FORCE
100589 +       $(call filechk,cat)
100591  $(obj)/kheaders.o: $(obj)/kheaders_data.tar.xz
100593  quiet_cmd_genikh = CHK     $(obj)/kheaders_data.tar.xz
100594 diff --git a/kernel/bounds.c b/kernel/bounds.c
100595 index 9795d75b09b2..a8cbf2d0b11a 100644
100596 --- a/kernel/bounds.c
100597 +++ b/kernel/bounds.c
100598 @@ -22,6 +22,12 @@ int main(void)
100599         DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS));
100600  #endif
100601         DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
100602 +#ifdef CONFIG_LRU_GEN
100603 +       /* bits needed to represent internal values stored in page->flags */
100604 +       DEFINE(LRU_GEN_WIDTH, order_base_2(CONFIG_NR_LRU_GENS + 1));
100605 +       /* bits needed to represent normalized values for external uses */
100606 +       DEFINE(LRU_GEN_SHIFT, order_base_2(CONFIG_NR_LRU_GENS));
100607 +#endif
100608         /* End of constants */
100610         return 0;
100611 diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
100612 index f25b719ac786..84b3b35fc0d0 100644
100613 --- a/kernel/bpf/ringbuf.c
100614 +++ b/kernel/bpf/ringbuf.c
100615 @@ -221,25 +221,20 @@ static int ringbuf_map_get_next_key(struct bpf_map *map, void *key,
100616         return -ENOTSUPP;
100619 -static size_t bpf_ringbuf_mmap_page_cnt(const struct bpf_ringbuf *rb)
100621 -       size_t data_pages = (rb->mask + 1) >> PAGE_SHIFT;
100623 -       /* consumer page + producer page + 2 x data pages */
100624 -       return RINGBUF_POS_PAGES + 2 * data_pages;
100627  static int ringbuf_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
100629         struct bpf_ringbuf_map *rb_map;
100630 -       size_t mmap_sz;
100632         rb_map = container_of(map, struct bpf_ringbuf_map, map);
100633 -       mmap_sz = bpf_ringbuf_mmap_page_cnt(rb_map->rb) << PAGE_SHIFT;
100635 -       if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) > mmap_sz)
100636 -               return -EINVAL;
100638 +       if (vma->vm_flags & VM_WRITE) {
100639 +               /* allow writable mapping for the consumer_pos only */
100640 +               if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE)
100641 +                       return -EPERM;
100642 +       } else {
100643 +               vma->vm_flags &= ~VM_MAYWRITE;
100644 +       }
100645 +       /* remap_vmalloc_range() checks size and offset constraints */
100646         return remap_vmalloc_range(vma, rb_map->rb,
100647                                    vma->vm_pgoff + RINGBUF_PGOFF);
100649 @@ -315,6 +310,9 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
100650                 return NULL;
100652         len = round_up(size + BPF_RINGBUF_HDR_SZ, 8);
100653 +       if (len > rb->mask + 1)
100654 +               return NULL;
100656         cons_pos = smp_load_acquire(&rb->consumer_pos);
100658         if (in_nmi()) {
100659 diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
100660 index 0399ac092b36..7fa6fc6bedf1 100644
100661 --- a/kernel/bpf/verifier.c
100662 +++ b/kernel/bpf/verifier.c
100663 @@ -1362,9 +1362,7 @@ static bool __reg64_bound_s32(s64 a)
100665  static bool __reg64_bound_u32(u64 a)
100667 -       if (a > U32_MIN && a < U32_MAX)
100668 -               return true;
100669 -       return false;
100670 +       return a > U32_MIN && a < U32_MAX;
100673  static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
100674 @@ -1375,10 +1373,10 @@ static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
100675                 reg->s32_min_value = (s32)reg->smin_value;
100676                 reg->s32_max_value = (s32)reg->smax_value;
100677         }
100678 -       if (__reg64_bound_u32(reg->umin_value))
100679 +       if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) {
100680                 reg->u32_min_value = (u32)reg->umin_value;
100681 -       if (__reg64_bound_u32(reg->umax_value))
100682                 reg->u32_max_value = (u32)reg->umax_value;
100683 +       }
100685         /* Intersecting with the old var_off might have improved our bounds
100686          * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
100687 @@ -5952,6 +5950,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
100689         struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : tmp_aux;
100690         struct bpf_verifier_state *vstate = env->cur_state;
100691 +       bool off_is_imm = tnum_is_const(off_reg->var_off);
100692         bool off_is_neg = off_reg->smin_value < 0;
100693         bool ptr_is_dst_reg = ptr_reg == dst_reg;
100694         u8 opcode = BPF_OP(insn->code);
100695 @@ -5982,6 +5981,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
100696                 alu_limit = abs(tmp_aux->alu_limit - alu_limit);
100697         } else {
100698                 alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
100699 +               alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
100700                 alu_state |= ptr_is_dst_reg ?
100701                              BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
100702         }
100703 @@ -6538,11 +6538,10 @@ static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
100704         s32 smin_val = src_reg->s32_min_value;
100705         u32 umax_val = src_reg->u32_max_value;
100707 -       /* Assuming scalar64_min_max_and will be called so its safe
100708 -        * to skip updating register for known 32-bit case.
100709 -        */
100710 -       if (src_known && dst_known)
100711 +       if (src_known && dst_known) {
100712 +               __mark_reg32_known(dst_reg, var32_off.value);
100713                 return;
100714 +       }
100716         /* We get our minimum from the var_off, since that's inherently
100717          * bitwise.  Our maximum is the minimum of the operands' maxima.
100718 @@ -6562,7 +6561,6 @@ static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
100719                 dst_reg->s32_min_value = dst_reg->u32_min_value;
100720                 dst_reg->s32_max_value = dst_reg->u32_max_value;
100721         }
100725  static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
100726 @@ -6609,11 +6607,10 @@ static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
100727         s32 smin_val = src_reg->s32_min_value;
100728         u32 umin_val = src_reg->u32_min_value;
100730 -       /* Assuming scalar64_min_max_or will be called so it is safe
100731 -        * to skip updating register for known case.
100732 -        */
100733 -       if (src_known && dst_known)
100734 +       if (src_known && dst_known) {
100735 +               __mark_reg32_known(dst_reg, var32_off.value);
100736                 return;
100737 +       }
100739         /* We get our maximum from the var_off, and our minimum is the
100740          * maximum of the operands' minima
100741 @@ -6678,11 +6675,10 @@ static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
100742         struct tnum var32_off = tnum_subreg(dst_reg->var_off);
100743         s32 smin_val = src_reg->s32_min_value;
100745 -       /* Assuming scalar64_min_max_xor will be called so it is safe
100746 -        * to skip updating register for known case.
100747 -        */
100748 -       if (src_known && dst_known)
100749 +       if (src_known && dst_known) {
100750 +               __mark_reg32_known(dst_reg, var32_off.value);
100751                 return;
100752 +       }
100754         /* We get both minimum and maximum from the var32_off. */
100755         dst_reg->u32_min_value = var32_off.value;
100756 @@ -11740,7 +11736,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
100757                         const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
100758                         struct bpf_insn insn_buf[16];
100759                         struct bpf_insn *patch = &insn_buf[0];
100760 -                       bool issrc, isneg;
100761 +                       bool issrc, isneg, isimm;
100762                         u32 off_reg;
100764                         aux = &env->insn_aux_data[i + delta];
100765 @@ -11751,28 +11747,29 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
100766                         isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
100767                         issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
100768                                 BPF_ALU_SANITIZE_SRC;
100769 +                       isimm = aux->alu_state & BPF_ALU_IMMEDIATE;
100771                         off_reg = issrc ? insn->src_reg : insn->dst_reg;
100772 -                       if (isneg)
100773 -                               *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
100774 -                       *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
100775 -                       *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
100776 -                       *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
100777 -                       *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
100778 -                       *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
100779 -                       if (issrc) {
100780 -                               *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
100781 -                                                        off_reg);
100782 -                               insn->src_reg = BPF_REG_AX;
100783 +                       if (isimm) {
100784 +                               *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
100785                         } else {
100786 -                               *patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
100787 -                                                        BPF_REG_AX);
100788 +                               if (isneg)
100789 +                                       *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
100790 +                               *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
100791 +                               *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
100792 +                               *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
100793 +                               *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
100794 +                               *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
100795 +                               *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg);
100796                         }
100797 +                       if (!issrc)
100798 +                               *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg);
100799 +                       insn->src_reg = BPF_REG_AX;
100800                         if (isneg)
100801                                 insn->code = insn->code == code_add ?
100802                                              code_sub : code_add;
100803                         *patch++ = *insn;
100804 -                       if (issrc && isneg)
100805 +                       if (issrc && isneg && !isimm)
100806                                 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
100807                         cnt = patch - insn_buf;
100809 diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
100810 index c10e855a03bc..fe4c01c14ab2 100644
100811 --- a/kernel/dma/swiotlb.c
100812 +++ b/kernel/dma/swiotlb.c
100813 @@ -608,7 +608,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
100814                 enum dma_data_direction dir, unsigned long attrs)
100816         unsigned int offset = swiotlb_align_offset(dev, orig_addr);
100817 -       unsigned int index, i;
100818 +       unsigned int i;
100819 +       int index;
100820         phys_addr_t tlb_addr;
100822         if (no_iotlb_memory)
100823 diff --git a/kernel/events/core.c b/kernel/events/core.c
100824 index 03db40f6cba9..c24ea952e7ae 100644
100825 --- a/kernel/events/core.c
100826 +++ b/kernel/events/core.c
100827 @@ -2204,6 +2204,26 @@ static void perf_group_detach(struct perf_event *event)
100828         perf_event__header_size(leader);
100831 +static void sync_child_event(struct perf_event *child_event);
100833 +static void perf_child_detach(struct perf_event *event)
100835 +       struct perf_event *parent_event = event->parent;
100837 +       if (!(event->attach_state & PERF_ATTACH_CHILD))
100838 +               return;
100840 +       event->attach_state &= ~PERF_ATTACH_CHILD;
100842 +       if (WARN_ON_ONCE(!parent_event))
100843 +               return;
100845 +       lockdep_assert_held(&parent_event->child_mutex);
100847 +       sync_child_event(event);
100848 +       list_del_init(&event->child_list);
100851  static bool is_orphaned_event(struct perf_event *event)
100853         return event->state == PERF_EVENT_STATE_DEAD;
100854 @@ -2311,6 +2331,7 @@ group_sched_out(struct perf_event *group_event,
100857  #define DETACH_GROUP   0x01UL
100858 +#define DETACH_CHILD   0x02UL
100861   * Cross CPU call to remove a performance event
100862 @@ -2334,6 +2355,8 @@ __perf_remove_from_context(struct perf_event *event,
100863         event_sched_out(event, cpuctx, ctx);
100864         if (flags & DETACH_GROUP)
100865                 perf_group_detach(event);
100866 +       if (flags & DETACH_CHILD)
100867 +               perf_child_detach(event);
100868         list_del_event(event, ctx);
100870         if (!ctx->nr_events && ctx->is_active) {
100871 @@ -2362,25 +2385,21 @@ static void perf_remove_from_context(struct perf_event *event, unsigned long fla
100873         lockdep_assert_held(&ctx->mutex);
100875 -       event_function_call(event, __perf_remove_from_context, (void *)flags);
100877         /*
100878 -        * The above event_function_call() can NO-OP when it hits
100879 -        * TASK_TOMBSTONE. In that case we must already have been detached
100880 -        * from the context (by perf_event_exit_event()) but the grouping
100881 -        * might still be in-tact.
100882 +        * Because of perf_event_exit_task(), perf_remove_from_context() ought
100883 +        * to work in the face of TASK_TOMBSTONE, unlike every other
100884 +        * event_function_call() user.
100885          */
100886 -       WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
100887 -       if ((flags & DETACH_GROUP) &&
100888 -           (event->attach_state & PERF_ATTACH_GROUP)) {
100889 -               /*
100890 -                * Since in that case we cannot possibly be scheduled, simply
100891 -                * detach now.
100892 -                */
100893 -               raw_spin_lock_irq(&ctx->lock);
100894 -               perf_group_detach(event);
100895 +       raw_spin_lock_irq(&ctx->lock);
100896 +       if (!ctx->is_active) {
100897 +               __perf_remove_from_context(event, __get_cpu_context(ctx),
100898 +                                          ctx, (void *)flags);
100899                 raw_spin_unlock_irq(&ctx->lock);
100900 +               return;
100901         }
100902 +       raw_spin_unlock_irq(&ctx->lock);
100904 +       event_function_call(event, __perf_remove_from_context, (void *)flags);
100908 @@ -11829,12 +11848,12 @@ SYSCALL_DEFINE5(perf_event_open,
100909                         return err;
100910         }
100912 -       err = security_locked_down(LOCKDOWN_PERF);
100913 -       if (err && (attr.sample_type & PERF_SAMPLE_REGS_INTR))
100914 -               /* REGS_INTR can leak data, lockdown must prevent this */
100915 -               return err;
100917 -       err = 0;
100918 +       /* REGS_INTR can leak data, lockdown must prevent this */
100919 +       if (attr.sample_type & PERF_SAMPLE_REGS_INTR) {
100920 +               err = security_locked_down(LOCKDOWN_PERF);
100921 +               if (err)
100922 +                       return err;
100923 +       }
100925         /*
100926          * In cgroup mode, the pid argument is used to pass the fd
100927 @@ -12373,14 +12392,17 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
100929  EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
100931 -static void sync_child_event(struct perf_event *child_event,
100932 -                              struct task_struct *child)
100933 +static void sync_child_event(struct perf_event *child_event)
100935         struct perf_event *parent_event = child_event->parent;
100936         u64 child_val;
100938 -       if (child_event->attr.inherit_stat)
100939 -               perf_event_read_event(child_event, child);
100940 +       if (child_event->attr.inherit_stat) {
100941 +               struct task_struct *task = child_event->ctx->task;
100943 +               if (task && task != TASK_TOMBSTONE)
100944 +                       perf_event_read_event(child_event, task);
100945 +       }
100947         child_val = perf_event_count(child_event);
100949 @@ -12395,60 +12417,53 @@ static void sync_child_event(struct perf_event *child_event,
100952  static void
100953 -perf_event_exit_event(struct perf_event *child_event,
100954 -                     struct perf_event_context *child_ctx,
100955 -                     struct task_struct *child)
100956 +perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx)
100958 -       struct perf_event *parent_event = child_event->parent;
100959 +       struct perf_event *parent_event = event->parent;
100960 +       unsigned long detach_flags = 0;
100962 -       /*
100963 -        * Do not destroy the 'original' grouping; because of the context
100964 -        * switch optimization the original events could've ended up in a
100965 -        * random child task.
100966 -        *
100967 -        * If we were to destroy the original group, all group related
100968 -        * operations would cease to function properly after this random
100969 -        * child dies.
100970 -        *
100971 -        * Do destroy all inherited groups, we don't care about those
100972 -        * and being thorough is better.
100973 -        */
100974 -       raw_spin_lock_irq(&child_ctx->lock);
100975 -       WARN_ON_ONCE(child_ctx->is_active);
100976 +       if (parent_event) {
100977 +               /*
100978 +                * Do not destroy the 'original' grouping; because of the
100979 +                * context switch optimization the original events could've
100980 +                * ended up in a random child task.
100981 +                *
100982 +                * If we were to destroy the original group, all group related
100983 +                * operations would cease to function properly after this
100984 +                * random child dies.
100985 +                *
100986 +                * Do destroy all inherited groups, we don't care about those
100987 +                * and being thorough is better.
100988 +                */
100989 +               detach_flags = DETACH_GROUP | DETACH_CHILD;
100990 +               mutex_lock(&parent_event->child_mutex);
100991 +       }
100993 -       if (parent_event)
100994 -               perf_group_detach(child_event);
100995 -       list_del_event(child_event, child_ctx);
100996 -       perf_event_set_state(child_event, PERF_EVENT_STATE_EXIT); /* is_event_hup() */
100997 -       raw_spin_unlock_irq(&child_ctx->lock);
100998 +       perf_remove_from_context(event, detach_flags);
101000 +       raw_spin_lock_irq(&ctx->lock);
101001 +       if (event->state > PERF_EVENT_STATE_EXIT)
101002 +               perf_event_set_state(event, PERF_EVENT_STATE_EXIT);
101003 +       raw_spin_unlock_irq(&ctx->lock);
101005         /*
101006 -        * Parent events are governed by their filedesc, retain them.
101007 +        * Child events can be freed.
101008          */
101009 -       if (!parent_event) {
101010 -               perf_event_wakeup(child_event);
101011 +       if (parent_event) {
101012 +               mutex_unlock(&parent_event->child_mutex);
101013 +               /*
101014 +                * Kick perf_poll() for is_event_hup();
101015 +                */
101016 +               perf_event_wakeup(parent_event);
101017 +               free_event(event);
101018 +               put_event(parent_event);
101019                 return;
101020         }
101021 -       /*
101022 -        * Child events can be cleaned up.
101023 -        */
101025 -       sync_child_event(child_event, child);
101027         /*
101028 -        * Remove this event from the parent's list
101029 -        */
101030 -       WARN_ON_ONCE(parent_event->ctx->parent_ctx);
101031 -       mutex_lock(&parent_event->child_mutex);
101032 -       list_del_init(&child_event->child_list);
101033 -       mutex_unlock(&parent_event->child_mutex);
101035 -       /*
101036 -        * Kick perf_poll() for is_event_hup().
101037 +        * Parent events are governed by their filedesc, retain them.
101038          */
101039 -       perf_event_wakeup(parent_event);
101040 -       free_event(child_event);
101041 -       put_event(parent_event);
101042 +       perf_event_wakeup(event);
101045  static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
101046 @@ -12505,7 +12520,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
101047         perf_event_task(child, child_ctx, 0);
101049         list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
101050 -               perf_event_exit_event(child_event, child_ctx, child);
101051 +               perf_event_exit_event(child_event, child_ctx);
101053         mutex_unlock(&child_ctx->mutex);
101055 @@ -12765,6 +12780,7 @@ inherit_event(struct perf_event *parent_event,
101056          */
101057         raw_spin_lock_irqsave(&child_ctx->lock, flags);
101058         add_event_to_ctx(child_event, child_ctx);
101059 +       child_event->attach_state |= PERF_ATTACH_CHILD;
101060         raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
101062         /*
101063 diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
101064 index 6addc9780319..4e93e5602723 100644
101065 --- a/kernel/events/uprobes.c
101066 +++ b/kernel/events/uprobes.c
101067 @@ -184,7 +184,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
101068         if (new_page) {
101069                 get_page(new_page);
101070                 page_add_new_anon_rmap(new_page, vma, addr, false);
101071 -               lru_cache_add_inactive_or_unevictable(new_page, vma);
101072 +               lru_cache_add_page_vma(new_page, vma, false);
101073         } else
101074                 /* no new page, just dec_mm_counter for old_page */
101075                 dec_mm_counter(mm, MM_ANONPAGES);
101076 diff --git a/kernel/exit.c b/kernel/exit.c
101077 index 04029e35e69a..e4292717ce37 100644
101078 --- a/kernel/exit.c
101079 +++ b/kernel/exit.c
101080 @@ -422,6 +422,7 @@ void mm_update_next_owner(struct mm_struct *mm)
101081                 goto retry;
101082         }
101083         WRITE_ONCE(mm->owner, c);
101084 +       lru_gen_migrate_mm(mm);
101085         task_unlock(c);
101086         put_task_struct(c);
101088 diff --git a/kernel/fork.c b/kernel/fork.c
101089 index 426cd0c51f9e..c54400f24fb2 100644
101090 --- a/kernel/fork.c
101091 +++ b/kernel/fork.c
101092 @@ -107,6 +107,11 @@
101094  #define CREATE_TRACE_POINTS
101095  #include <trace/events/task.h>
101096 +#ifdef CONFIG_USER_NS
101097 +extern int unprivileged_userns_clone;
101098 +#else
101099 +#define unprivileged_userns_clone 0
101100 +#endif
101103   * Minimum number of threads to boot the kernel
101104 @@ -665,6 +670,7 @@ static void check_mm(struct mm_struct *mm)
101105  #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
101106         VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
101107  #endif
101108 +       VM_BUG_ON_MM(lru_gen_mm_is_active(mm), mm);
101111  #define allocate_mm()  (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
101112 @@ -1055,6 +1061,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
101113                 goto fail_nocontext;
101115         mm->user_ns = get_user_ns(user_ns);
101116 +       lru_gen_init_mm(mm);
101117         return mm;
101119  fail_nocontext:
101120 @@ -1097,6 +1104,7 @@ static inline void __mmput(struct mm_struct *mm)
101121         }
101122         if (mm->binfmt)
101123                 module_put(mm->binfmt->module);
101124 +       lru_gen_del_mm(mm);
101125         mmdrop(mm);
101128 @@ -1128,6 +1136,7 @@ void mmput_async(struct mm_struct *mm)
101129                 schedule_work(&mm->async_put_work);
101130         }
101132 +EXPORT_SYMBOL(mmput_async);
101133  #endif
101135  /**
101136 @@ -1316,6 +1325,8 @@ static void mm_release(struct task_struct *tsk, struct mm_struct *mm)
101137                         put_user(0, tsk->clear_child_tid);
101138                         do_futex(tsk->clear_child_tid, FUTEX_WAKE,
101139                                         1, NULL, NULL, 0, 0);
101140 +                       ksys_futex_wake(tsk->clear_child_tid, 1,
101141 +                                       FUTEX_32 | FUTEX_SHARED_FLAG);
101142                 }
101143                 tsk->clear_child_tid = NULL;
101144         }
101145 @@ -1872,6 +1883,10 @@ static __latent_entropy struct task_struct *copy_process(
101146         if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
101147                 return ERR_PTR(-EINVAL);
101149 +       if ((clone_flags & CLONE_NEWUSER) && !unprivileged_userns_clone)
101150 +               if (!capable(CAP_SYS_ADMIN))
101151 +                       return ERR_PTR(-EPERM);
101153         /*
101154          * Thread groups must share signals as well, and detached threads
101155          * can only be started up within the thread group.
101156 @@ -2521,6 +2536,13 @@ pid_t kernel_clone(struct kernel_clone_args *args)
101157                 get_task_struct(p);
101158         }
101160 +       if (IS_ENABLED(CONFIG_LRU_GEN) && !(clone_flags & CLONE_VM)) {
101161 +               /* lock the task to synchronize with memcg migration */
101162 +               task_lock(p);
101163 +               lru_gen_add_mm(p->mm);
101164 +               task_unlock(p);
101165 +       }
101167         wake_up_new_task(p);
101169         /* forking complete and child started to run, tell ptracer */
101170 @@ -2971,6 +2993,12 @@ int ksys_unshare(unsigned long unshare_flags)
101171         if (unshare_flags & CLONE_NEWNS)
101172                 unshare_flags |= CLONE_FS;
101174 +       if ((unshare_flags & CLONE_NEWUSER) && !unprivileged_userns_clone) {
101175 +               err = -EPERM;
101176 +               if (!capable(CAP_SYS_ADMIN))
101177 +                       goto bad_unshare_out;
101178 +       }
101180         err = check_unshare_flags(unshare_flags);
101181         if (err)
101182                 goto bad_unshare_out;
101183 diff --git a/kernel/futex.c b/kernel/futex.c
101184 index 00febd6dea9c..f923d2da4b40 100644
101185 --- a/kernel/futex.c
101186 +++ b/kernel/futex.c
101187 @@ -198,6 +198,8 @@ struct futex_pi_state {
101188   * @rt_waiter:         rt_waiter storage for use with requeue_pi
101189   * @requeue_pi_key:    the requeue_pi target futex key
101190   * @bitset:            bitset for the optional bitmasked wakeup
101191 + * @uaddr:             userspace address of futex
101192 + * @uval:              expected futex's value
101193   *
101194   * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so
101195   * we can wake only the relevant ones (hashed queues may be shared).
101196 @@ -220,6 +222,8 @@ struct futex_q {
101197         struct rt_mutex_waiter *rt_waiter;
101198         union futex_key *requeue_pi_key;
101199         u32 bitset;
101200 +       u32 __user *uaddr;
101201 +       u32 uval;
101202  } __randomize_layout;
101204  static const struct futex_q futex_q_init = {
101205 @@ -2313,6 +2317,29 @@ static int unqueue_me(struct futex_q *q)
101206         return ret;
101210 + * unqueue_multiple() - Remove several futexes from their futex_hash_bucket
101211 + * @q: The list of futexes to unqueue
101212 + * @count: Number of futexes in the list
101214 + * Helper to unqueue a list of futexes. This can't fail.
101216 + * Return:
101217 + *  - >=0 - Index of the last futex that was awoken;
101218 + *  - -1  - If no futex was awoken
101219 + */
101220 +static int unqueue_multiple(struct futex_q *q, int count)
101222 +       int ret = -1;
101223 +       int i;
101225 +       for (i = 0; i < count; i++) {
101226 +               if (!unqueue_me(&q[i]))
101227 +                       ret = i;
101228 +       }
101229 +       return ret;
101233   * PI futexes can not be requeued and must remove themself from the
101234   * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
101235 @@ -2680,6 +2707,205 @@ static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
101236         return ret;
101240 + * futex_wait_multiple_setup() - Prepare to wait and enqueue multiple futexes
101241 + * @qs:                The corresponding futex list
101242 + * @count:     The size of the lists
101243 + * @flags:     Futex flags (FLAGS_SHARED, etc.)
101244 + * @awaken:    Index of the last awoken futex
101246 + * Prepare multiple futexes in a single step and enqueue them. This may fail if
101247 + * the futex list is invalid or if any futex was already awoken. On success the
101248 + * task is ready to interruptible sleep.
101250 + * Return:
101251 + *  -  1 - One of the futexes was awaken by another thread
101252 + *  -  0 - Success
101253 + *  - <0 - -EFAULT, -EWOULDBLOCK or -EINVAL
101254 + */
101255 +static int futex_wait_multiple_setup(struct futex_q *qs, int count,
101256 +                                    unsigned int flags, int *awaken)
101258 +       struct futex_hash_bucket *hb;
101259 +       int ret, i;
101260 +       u32 uval;
101262 +       /*
101263 +        * Enqueuing multiple futexes is tricky, because we need to
101264 +        * enqueue each futex in the list before dealing with the next
101265 +        * one to avoid deadlocking on the hash bucket.  But, before
101266 +        * enqueuing, we need to make sure that current->state is
101267 +        * TASK_INTERRUPTIBLE, so we don't absorb any awake events, which
101268 +        * cannot be done before the get_futex_key of the next key,
101269 +        * because it calls get_user_pages, which can sleep.  Thus, we
101270 +        * fetch the list of futexes keys in two steps, by first pinning
101271 +        * all the memory keys in the futex key, and only then we read
101272 +        * each key and queue the corresponding futex.
101273 +        */
101274 +retry:
101275 +       for (i = 0; i < count; i++) {
101276 +               qs[i].key = FUTEX_KEY_INIT;
101277 +               ret = get_futex_key(qs[i].uaddr, flags & FLAGS_SHARED,
101278 +                                   &qs[i].key, FUTEX_READ);
101279 +               if (unlikely(ret)) {
101280 +                       return ret;
101281 +               }
101282 +       }
101284 +       set_current_state(TASK_INTERRUPTIBLE);
101286 +       for (i = 0; i < count; i++) {
101287 +               struct futex_q *q = &qs[i];
101289 +               hb = queue_lock(q);
101291 +               ret = get_futex_value_locked(&uval, q->uaddr);
101292 +               if (ret) {
101293 +                       /*
101294 +                        * We need to try to handle the fault, which
101295 +                        * cannot be done without sleep, so we need to
101296 +                        * undo all the work already done, to make sure
101297 +                        * we don't miss any wake ups.  Therefore, clean
101298 +                        * up, handle the fault and retry from the
101299 +                        * beginning.
101300 +                        */
101301 +                       queue_unlock(hb);
101303 +                       /*
101304 +                        * Keys 0..(i-1) are implicitly put
101305 +                        * on unqueue_multiple.
101306 +                        */
101307 +                       *awaken = unqueue_multiple(qs, i);
101309 +                       __set_current_state(TASK_RUNNING);
101311 +                       /*
101312 +                        * On a real fault, prioritize the error even if
101313 +                        * some other futex was awoken.  Userspace gave
101314 +                        * us a bad address, -EFAULT them.
101315 +                        */
101316 +                       ret = get_user(uval, q->uaddr);
101317 +                       if (ret)
101318 +                               return ret;
101320 +                       /*
101321 +                        * Even if the page fault was handled, If
101322 +                        * something was already awaken, we can safely
101323 +                        * give up and succeed to give a hint for userspace to
101324 +                        * acquire the right futex faster.
101325 +                        */
101326 +                       if (*awaken >= 0)
101327 +                               return 1;
101329 +                       goto retry;
101330 +               }
101332 +               if (uval != q->uval) {
101333 +                       queue_unlock(hb);
101335 +                       /*
101336 +                        * If something was already awaken, we can
101337 +                        * safely ignore the error and succeed.
101338 +                        */
101339 +                       *awaken = unqueue_multiple(qs, i);
101340 +                       __set_current_state(TASK_RUNNING);
101341 +                       if (*awaken >= 0)
101342 +                               return 1;
101344 +                       return -EWOULDBLOCK;
101345 +               }
101347 +               /*
101348 +                * The bucket lock can't be held while dealing with the
101349 +                * next futex. Queue each futex at this moment so hb can
101350 +                * be unlocked.
101351 +                */
101352 +               queue_me(&qs[i], hb);
101353 +       }
101354 +       return 0;
101358 + * futex_wait_multiple() - Prepare to wait on and enqueue several futexes
101359 + * @qs:                The list of futexes to wait on
101360 + * @op:                Operation code from futex's syscall
101361 + * @count:     The number of objects
101362 + * @abs_time:  Timeout before giving up and returning to userspace
101364 + * Entry point for the FUTEX_WAIT_MULTIPLE futex operation, this function
101365 + * sleeps on a group of futexes and returns on the first futex that
101366 + * triggered, or after the timeout has elapsed.
101368 + * Return:
101369 + *  - >=0 - Hint to the futex that was awoken
101370 + *  - <0  - On error
101371 + */
101372 +static int futex_wait_multiple(struct futex_q *qs, int op,
101373 +                              u32 count, ktime_t *abs_time)
101375 +       struct hrtimer_sleeper timeout, *to;
101376 +       int ret, flags = 0, hint = 0;
101377 +       unsigned int i;
101379 +       if (!(op & FUTEX_PRIVATE_FLAG))
101380 +               flags |= FLAGS_SHARED;
101382 +       if (op & FUTEX_CLOCK_REALTIME)
101383 +               flags |= FLAGS_CLOCKRT;
101385 +       to = futex_setup_timer(abs_time, &timeout, flags, 0);
101386 +       while (1) {
101387 +               ret = futex_wait_multiple_setup(qs, count, flags, &hint);
101388 +               if (ret) {
101389 +                       if (ret > 0) {
101390 +                               /* A futex was awaken during setup */
101391 +                               ret = hint;
101392 +                       }
101393 +                       break;
101394 +               }
101396 +               if (to)
101397 +                       hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
101399 +               /*
101400 +                * Avoid sleeping if another thread already tried to
101401 +                * wake us.
101402 +                */
101403 +               for (i = 0; i < count; i++) {
101404 +                       if (plist_node_empty(&qs[i].list))
101405 +                               break;
101406 +               }
101408 +               if (i == count && (!to || to->task))
101409 +                       freezable_schedule();
101411 +               ret = unqueue_multiple(qs, count);
101413 +               __set_current_state(TASK_RUNNING);
101415 +               if (ret >= 0)
101416 +                       break;
101417 +               if (to && !to->task) {
101418 +                       ret = -ETIMEDOUT;
101419 +                       break;
101420 +               } else if (signal_pending(current)) {
101421 +                       ret = -ERESTARTSYS;
101422 +                       break;
101423 +               }
101424 +               /*
101425 +                * The final case is a spurious wakeup, for
101426 +                * which just retry.
101427 +                */
101428 +       }
101430 +       if (to) {
101431 +               hrtimer_cancel(&to->timer);
101432 +               destroy_hrtimer_on_stack(&to->timer);
101433 +       }
101435 +       return ret;
101438  static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
101439                       ktime_t *abs_time, u32 bitset)
101441 @@ -3711,8 +3937,7 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
101443         if (op & FUTEX_CLOCK_REALTIME) {
101444                 flags |= FLAGS_CLOCKRT;
101445 -               if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \
101446 -                   cmd != FUTEX_WAIT_REQUEUE_PI)
101447 +               if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
101448                         return -ENOSYS;
101449         }
101451 @@ -3759,6 +3984,43 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
101452         return -ENOSYS;
101456 + * futex_read_wait_block - Read an array of futex_wait_block from userspace
101457 + * @uaddr:     Userspace address of the block
101458 + * @count:     Number of blocks to be read
101460 + * This function creates and allocate an array of futex_q (we zero it to
101461 + * initialize the fields) and then, for each futex_wait_block element from
101462 + * userspace, fill a futex_q element with proper values.
101463 + */
101464 +inline struct futex_q *futex_read_wait_block(u32 __user *uaddr, u32 count)
101466 +       unsigned int i;
101467 +       struct futex_q *qs;
101468 +       struct futex_wait_block fwb;
101469 +       struct futex_wait_block __user *entry =
101470 +               (struct futex_wait_block __user *)uaddr;
101472 +       if (!count || count > FUTEX_MULTIPLE_MAX_COUNT)
101473 +               return ERR_PTR(-EINVAL);
101475 +       qs = kcalloc(count, sizeof(*qs), GFP_KERNEL);
101476 +       if (!qs)
101477 +               return ERR_PTR(-ENOMEM);
101479 +       for (i = 0; i < count; i++) {
101480 +               if (copy_from_user(&fwb, &entry[i], sizeof(fwb))) {
101481 +                       kfree(qs);
101482 +                       return ERR_PTR(-EFAULT);
101483 +               }
101485 +               qs[i].uaddr = fwb.uaddr;
101486 +               qs[i].uval = fwb.val;
101487 +               qs[i].bitset = fwb.bitset;
101488 +       }
101490 +       return qs;
101493  SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
101494                 const struct __kernel_timespec __user *, utime,
101495 @@ -3771,7 +4033,8 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
101497         if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
101498                       cmd == FUTEX_WAIT_BITSET ||
101499 -                     cmd == FUTEX_WAIT_REQUEUE_PI)) {
101500 +                     cmd == FUTEX_WAIT_REQUEUE_PI ||
101501 +                     cmd == FUTEX_WAIT_MULTIPLE)) {
101502                 if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
101503                         return -EFAULT;
101504                 if (get_timespec64(&ts, utime))
101505 @@ -3780,9 +4043,9 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
101506                         return -EINVAL;
101508                 t = timespec64_to_ktime(ts);
101509 -               if (cmd == FUTEX_WAIT)
101510 +               if (cmd == FUTEX_WAIT || cmd == FUTEX_WAIT_MULTIPLE)
101511                         t = ktime_add_safe(ktime_get(), t);
101512 -               else if (!(op & FUTEX_CLOCK_REALTIME))
101513 +               else if (cmd != FUTEX_LOCK_PI && !(op & FUTEX_CLOCK_REALTIME))
101514                         t = timens_ktime_to_host(CLOCK_MONOTONIC, t);
101515                 tp = &t;
101516         }
101517 @@ -3794,6 +4057,25 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
101518             cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
101519                 val2 = (u32) (unsigned long) utime;
101521 +       if (cmd == FUTEX_WAIT_MULTIPLE) {
101522 +               int ret;
101523 +               struct futex_q *qs;
101525 +#ifdef CONFIG_X86_X32
101526 +               if (unlikely(in_x32_syscall()))
101527 +                       return -ENOSYS;
101528 +#endif
101529 +               qs = futex_read_wait_block(uaddr, val);
101531 +               if (IS_ERR(qs))
101532 +                       return PTR_ERR(qs);
101534 +               ret = futex_wait_multiple(qs, op, val, tp);
101535 +               kfree(qs);
101537 +               return ret;
101538 +       }
101540         return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
101543 @@ -3956,6 +4238,58 @@ COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
101544  #endif /* CONFIG_COMPAT */
101546  #ifdef CONFIG_COMPAT_32BIT_TIME
101548 + * struct compat_futex_wait_block - Block of futexes to be waited for
101549 + * @uaddr:     User address of the futex (compatible pointer)
101550 + * @val:       Futex value expected by userspace
101551 + * @bitset:    Bitset for the optional bitmasked wakeup
101552 + */
101553 +struct compat_futex_wait_block {
101554 +       compat_uptr_t   uaddr;
101555 +       __u32 pad;
101556 +       __u32 val;
101557 +       __u32 bitset;
101561 + * compat_futex_read_wait_block - Read an array of futex_wait_block from
101562 + * userspace
101563 + * @uaddr:     Userspace address of the block
101564 + * @count:     Number of blocks to be read
101566 + * This function does the same as futex_read_wait_block(), except that it
101567 + * converts the pointer to the futex from the compat version to the regular one.
101568 + */
101569 +inline struct futex_q *compat_futex_read_wait_block(u32 __user *uaddr,
101570 +                                                   u32 count)
101572 +       unsigned int i;
101573 +       struct futex_q *qs;
101574 +       struct compat_futex_wait_block fwb;
101575 +       struct compat_futex_wait_block __user *entry =
101576 +               (struct compat_futex_wait_block __user *)uaddr;
101578 +       if (!count || count > FUTEX_MULTIPLE_MAX_COUNT)
101579 +               return ERR_PTR(-EINVAL);
101581 +       qs = kcalloc(count, sizeof(*qs), GFP_KERNEL);
101582 +       if (!qs)
101583 +               return ERR_PTR(-ENOMEM);
101585 +       for (i = 0; i < count; i++) {
101586 +               if (copy_from_user(&fwb, &entry[i], sizeof(fwb))) {
101587 +                       kfree(qs);
101588 +                       return ERR_PTR(-EFAULT);
101589 +               }
101591 +               qs[i].uaddr = compat_ptr(fwb.uaddr);
101592 +               qs[i].uval = fwb.val;
101593 +               qs[i].bitset = fwb.bitset;
101594 +       }
101596 +       return qs;
101599  SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
101600                 const struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
101601                 u32, val3)
101602 @@ -3967,16 +4301,17 @@ SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
101604         if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
101605                       cmd == FUTEX_WAIT_BITSET ||
101606 -                     cmd == FUTEX_WAIT_REQUEUE_PI)) {
101607 +                     cmd == FUTEX_WAIT_REQUEUE_PI ||
101608 +                     cmd == FUTEX_WAIT_MULTIPLE)) {
101609                 if (get_old_timespec32(&ts, utime))
101610                         return -EFAULT;
101611                 if (!timespec64_valid(&ts))
101612                         return -EINVAL;
101614                 t = timespec64_to_ktime(ts);
101615 -               if (cmd == FUTEX_WAIT)
101616 +               if (cmd == FUTEX_WAIT || cmd == FUTEX_WAIT_MULTIPLE)
101617                         t = ktime_add_safe(ktime_get(), t);
101618 -               else if (!(op & FUTEX_CLOCK_REALTIME))
101619 +               else if (cmd != FUTEX_LOCK_PI && !(op & FUTEX_CLOCK_REALTIME))
101620                         t = timens_ktime_to_host(CLOCK_MONOTONIC, t);
101621                 tp = &t;
101622         }
101623 @@ -3984,6 +4319,19 @@ SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
101624             cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
101625                 val2 = (int) (unsigned long) utime;
101627 +       if (cmd == FUTEX_WAIT_MULTIPLE) {
101628 +               int ret;
101629 +               struct futex_q *qs = compat_futex_read_wait_block(uaddr, val);
101631 +               if (IS_ERR(qs))
101632 +                       return PTR_ERR(qs);
101634 +               ret = futex_wait_multiple(qs, op, val, tp);
101635 +               kfree(qs);
101637 +               return ret;
101638 +       }
101640         return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
101642  #endif /* CONFIG_COMPAT_32BIT_TIME */
101643 diff --git a/kernel/futex2.c b/kernel/futex2.c
101644 new file mode 100644
101645 index 000000000000..dd6f54ae0220
101646 --- /dev/null
101647 +++ b/kernel/futex2.c
101648 @@ -0,0 +1,1239 @@
101649 +// SPDX-License-Identifier: GPL-2.0-or-later
101651 + * futex2 system call interface by André Almeida <andrealmeid@collabora.com>
101653 + * Copyright 2021 Collabora Ltd.
101655 + * Based on original futex implementation by:
101656 + *  (C) 2002 Rusty Russell, IBM
101657 + *  (C) 2003, 2006 Ingo Molnar, Red Hat Inc.
101658 + *  (C) 2003, 2004 Jamie Lokier
101659 + *  (C) 2006 Thomas Gleixner, Timesys Corp.
101660 + *  (C) 2007 Eric Dumazet
101661 + *  (C) 2009 Darren Hart, IBM
101662 + */
101664 +#include <linux/freezer.h>
101665 +#include <linux/hugetlb.h>
101666 +#include <linux/jhash.h>
101667 +#include <linux/memblock.h>
101668 +#include <linux/pagemap.h>
101669 +#include <linux/sched/wake_q.h>
101670 +#include <linux/spinlock.h>
101671 +#include <linux/syscalls.h>
101672 +#include <uapi/linux/futex.h>
101674 +#ifdef CONFIG_X86_64
101675 +#include <linux/compat.h>
101676 +#endif
101679 + * struct futex_key - Components to build unique key for a futex
101680 + * @pointer: Pointer to current->mm or inode's UUID for file backed futexes
101681 + * @index: Start address of the page containing futex or index of the page
101682 + * @offset: Address offset of uaddr in a page
101683 + */
101684 +struct futex_key {
101685 +       u64 pointer;
101686 +       unsigned long index;
101687 +       unsigned long offset;
101691 + * struct futex_waiter - List entry for a waiter
101692 + * @uaddr:        Virtual address of userspace futex
101693 + * @key:          Information that uniquely identify a futex
101694 + * @list:        List node struct
101695 + * @val:         Expected value for this waiter
101696 + * @flags:        Flags
101697 + * @bucket:       Pointer to the bucket for this waiter
101698 + * @index:        Index of waiter in futexv list
101699 + */
101700 +struct futex_waiter {
101701 +       void __user *uaddr;
101702 +       struct futex_key key;
101703 +       struct list_head list;
101704 +       unsigned int val;
101705 +       unsigned int flags;
101706 +       struct futex_bucket *bucket;
101707 +       unsigned int index;
101711 + * struct futex_waiter_head - List of futexes to be waited
101712 + * @task:    Task to be awaken
101713 + * @hint:    Was someone on this list awakened?
101714 + * @objects: List of futexes
101715 + */
101716 +struct futex_waiter_head {
101717 +       struct task_struct *task;
101718 +       bool hint;
101719 +       struct futex_waiter objects[0];
101723 + * struct futex_bucket - A bucket of futex's hash table
101724 + * @waiters: Number of waiters in the bucket
101725 + * @lock:    Bucket lock
101726 + * @list:    List of waiters on this bucket
101727 + */
101728 +struct futex_bucket {
101729 +       atomic_t waiters;
101730 +       spinlock_t lock;
101731 +       struct list_head list;
101734 +/* Mask for futex2 flag operations */
101735 +#define FUTEX2_MASK (FUTEX_SIZE_MASK | FUTEX_CLOCK_REALTIME | FUTEX_SHARED_FLAG)
101737 +/* Mask for sys_futex_waitv flag */
101738 +#define FUTEXV_MASK (FUTEX_CLOCK_REALTIME)
101740 +/* Mask for each futex in futex_waitv list */
101741 +#define FUTEXV_WAITER_MASK (FUTEX_SIZE_MASK | FUTEX_SHARED_FLAG)
101743 +#define is_object_shared ((futexv->objects[i].flags & FUTEX_SHARED_FLAG) ? true : false)
101745 +#define FUT_OFF_INODE    1 /* We set bit 0 if key has a reference on inode */
101746 +#define FUT_OFF_MMSHARED 2 /* We set bit 1 if key has a reference on mm */
101748 +static struct futex_bucket *futex_table;
101749 +static unsigned int futex2_hashsize;
101752 + * Reflects a new waiter being added to the waitqueue.
101753 + */
101754 +static inline void bucket_inc_waiters(struct futex_bucket *bucket)
101756 +#ifdef CONFIG_SMP
101757 +       atomic_inc(&bucket->waiters);
101758 +       /*
101759 +        * Issue a barrier after adding so futex_wake() will see that the
101760 +        * value had increased
101761 +        */
101762 +       smp_mb__after_atomic();
101763 +#endif
101767 + * Reflects a waiter being removed from the waitqueue by wakeup
101768 + * paths.
101769 + */
101770 +static inline void bucket_dec_waiters(struct futex_bucket *bucket)
101772 +#ifdef CONFIG_SMP
101773 +       atomic_dec(&bucket->waiters);
101774 +#endif
101778 + * Get the number of waiters in a bucket
101779 + */
101780 +static inline int bucket_get_waiters(struct futex_bucket *bucket)
101782 +#ifdef CONFIG_SMP
101783 +       /*
101784 +        * Issue a barrier before reading so we get an updated value from
101785 +        * futex_wait()
101786 +        */
101787 +       smp_mb();
101788 +       return atomic_read(&bucket->waiters);
101789 +#else
101790 +       return 1;
101791 +#endif
101795 + * futex_get_inode_uuid - Gets an UUID for an inode
101796 + * @inode: inode to get UUID
101798 + * Generate a machine wide unique identifier for this inode.
101800 + * This relies on u64 not wrapping in the life-time of the machine; which with
101801 + * 1ns resolution means almost 585 years.
101803 + * This further relies on the fact that a well formed program will not unmap
101804 + * the file while it has a (shared) futex waiting on it. This mapping will have
101805 + * a file reference which pins the mount and inode.
101807 + * If for some reason an inode gets evicted and read back in again, it will get
101808 + * a new sequence number and will _NOT_ match, even though it is the exact same
101809 + * file.
101811 + * It is important that match_futex() will never have a false-positive, esp.
101812 + * for PI futexes that can mess up the state. The above argues that false-negatives
101813 + * are only possible for malformed programs.
101815 + * Returns: UUID for the given inode
101816 + */
101817 +static u64 futex_get_inode_uuid(struct inode *inode)
101819 +       static atomic64_t i_seq;
101820 +       u64 old;
101822 +       /* Does the inode already have a sequence number? */
101823 +       old = atomic64_read(&inode->i_sequence2);
101825 +       if (likely(old))
101826 +               return old;
101828 +       for (;;) {
101829 +               u64 new = atomic64_add_return(1, &i_seq);
101831 +               if (WARN_ON_ONCE(!new))
101832 +                       continue;
101834 +               old = atomic64_cmpxchg_relaxed(&inode->i_sequence2, 0, new);
101835 +               if (old)
101836 +                       return old;
101837 +               return new;
101838 +       }
101842 + * futex_get_shared_key - Get a key for a shared futex
101843 + * @address: Futex memory address
101844 + * @mm:      Current process mm_struct pointer
101845 + * @key:     Key struct to be filled
101847 + * Returns: 0 on success, error code otherwise
101848 + */
101849 +static int futex_get_shared_key(uintptr_t address, struct mm_struct *mm,
101850 +                               struct futex_key *key)
101852 +       int ret;
101853 +       struct page *page, *tail;
101854 +       struct address_space *mapping;
101856 +again:
101857 +       ret = get_user_pages_fast(address, 1, 0, &page);
101858 +       if (ret < 0)
101859 +               return ret;
101861 +       /*
101862 +        * The treatment of mapping from this point on is critical. The page
101863 +        * lock protects many things but in this context the page lock
101864 +        * stabilizes mapping, prevents inode freeing in the shared
101865 +        * file-backed region case and guards against movement to swap cache.
101866 +        *
101867 +        * Strictly speaking the page lock is not needed in all cases being
101868 +        * considered here and page lock forces unnecessarily serialization
101869 +        * From this point on, mapping will be re-verified if necessary and
101870 +        * page lock will be acquired only if it is unavoidable
101871 +        *
101872 +        * Mapping checks require the head page for any compound page so the
101873 +        * head page and mapping is looked up now. For anonymous pages, it
101874 +        * does not matter if the page splits in the future as the key is
101875 +        * based on the address. For filesystem-backed pages, the tail is
101876 +        * required as the index of the page determines the key. For
101877 +        * base pages, there is no tail page and tail == page.
101878 +        */
101879 +       tail = page;
101880 +       page = compound_head(page);
101881 +       mapping = READ_ONCE(page->mapping);
101883 +       /*
101884 +        * If page->mapping is NULL, then it cannot be a PageAnon
101885 +        * page; but it might be the ZERO_PAGE or in the gate area or
101886 +        * in a special mapping (all cases which we are happy to fail);
101887 +        * or it may have been a good file page when get_user_pages_fast
101888 +        * found it, but truncated or holepunched or subjected to
101889 +        * invalidate_complete_page2 before we got the page lock (also
101890 +        * cases which we are happy to fail).  And we hold a reference,
101891 +        * so refcount care in invalidate_complete_page's remove_mapping
101892 +        * prevents drop_caches from setting mapping to NULL beneath us.
101893 +        *
101894 +        * The case we do have to guard against is when memory pressure made
101895 +        * shmem_writepage move it from filecache to swapcache beneath us:
101896 +        * an unlikely race, but we do need to retry for page->mapping.
101897 +        */
101898 +       if (unlikely(!mapping)) {
101899 +               int shmem_swizzled;
101901 +               /*
101902 +                * Page lock is required to identify which special case above
101903 +                * applies. If this is really a shmem page then the page lock
101904 +                * will prevent unexpected transitions.
101905 +                */
101906 +               lock_page(page);
101907 +               shmem_swizzled = PageSwapCache(page) || page->mapping;
101908 +               unlock_page(page);
101909 +               put_page(page);
101911 +               if (shmem_swizzled)
101912 +                       goto again;
101914 +               return -EFAULT;
101915 +       }
101917 +       /*
101918 +        * Private mappings are handled in a simple way.
101919 +        *
101920 +        * If the futex key is stored on an anonymous page, then the associated
101921 +        * object is the mm which is implicitly pinned by the calling process.
101922 +        *
101923 +        * NOTE: When userspace waits on a MAP_SHARED mapping, even if
101924 +        * it's a read-only handle, it's expected that futexes attach to
101925 +        * the object not the particular process.
101926 +        */
101927 +       if (PageAnon(page)) {
101928 +               key->offset |= FUT_OFF_MMSHARED;
101929 +       } else {
101930 +               struct inode *inode;
101932 +               /*
101933 +                * The associated futex object in this case is the inode and
101934 +                * the page->mapping must be traversed. Ordinarily this should
101935 +                * be stabilised under page lock but it's not strictly
101936 +                * necessary in this case as we just want to pin the inode, not
101937 +                * update the radix tree or anything like that.
101938 +                *
101939 +                * The RCU read lock is taken as the inode is finally freed
101940 +                * under RCU. If the mapping still matches expectations then the
101941 +                * mapping->host can be safely accessed as being a valid inode.
101942 +                */
101943 +               rcu_read_lock();
101945 +               if (READ_ONCE(page->mapping) != mapping) {
101946 +                       rcu_read_unlock();
101947 +                       put_page(page);
101949 +                       goto again;
101950 +               }
101952 +               inode = READ_ONCE(mapping->host);
101953 +               if (!inode) {
101954 +                       rcu_read_unlock();
101955 +                       put_page(page);
101957 +                       goto again;
101958 +               }
101960 +               key->pointer = futex_get_inode_uuid(inode);
101961 +               key->index = (unsigned long)basepage_index(tail);
101962 +               key->offset |= FUT_OFF_INODE;
101964 +               rcu_read_unlock();
101965 +       }
101967 +       put_page(page);
101969 +       return 0;
101973 + * futex_get_bucket - Check if the user address is valid, prepare internal
101974 + *                    data and calculate the hash
101975 + * @uaddr:   futex user address
101976 + * @key:     data that uniquely identifies a futex
101977 + * @shared:  is this a shared futex?
101979 + * For private futexes, each uaddr will be unique for a given mm_struct, and it
101980 + * won't be freed for the life time of the process. For shared futexes, check
101981 + * futex_get_shared_key().
101983 + * Return: address of bucket on success, error code otherwise
101984 + */
101985 +static struct futex_bucket *futex_get_bucket(void __user *uaddr,
101986 +                                            struct futex_key *key,
101987 +                                            bool shared)
101989 +       uintptr_t address = (uintptr_t)uaddr;
101990 +       u32 hash_key;
101992 +       /* Checking if uaddr is valid and accessible */
101993 +       if (unlikely(!IS_ALIGNED(address, sizeof(u32))))
101994 +               return ERR_PTR(-EINVAL);
101995 +       if (unlikely(!access_ok(uaddr, sizeof(u32))))
101996 +               return ERR_PTR(-EFAULT);
101998 +       key->offset = address % PAGE_SIZE;
101999 +       address -= key->offset;
102000 +       key->pointer = (u64)address;
102001 +       key->index = (unsigned long)current->mm;
102003 +       if (shared)
102004 +               futex_get_shared_key(address, current->mm, key);
102006 +       /* Generate hash key for this futex using uaddr and current->mm */
102007 +       hash_key = jhash2((u32 *)key, sizeof(*key) / sizeof(u32), 0);
102009 +       /* Since HASH_SIZE is 2^n, subtracting 1 makes a perfect bit mask */
102010 +       return &futex_table[hash_key & (futex2_hashsize - 1)];
102014 + * futex_get_user - Get the userspace value on this address
102015 + * @uval:  variable to store the value
102016 + * @uaddr: userspace address
102018 + * Check the comment at futex_enqueue() for more information.
102019 + */
102020 +static int futex_get_user(u32 *uval, u32 __user *uaddr)
102022 +       int ret;
102024 +       pagefault_disable();
102025 +       ret = __get_user(*uval, uaddr);
102026 +       pagefault_enable();
102028 +       return ret;
102032 + * futex_setup_time - Prepare the timeout mechanism and start it.
102033 + * @timo:    Timeout value from userspace
102034 + * @timeout: Pointer to hrtimer handler
102035 + * @flags: Flags from userspace, to decide which clockid to use
102037 + * Return: 0 on success, error code otherwise
102038 + */
102039 +static int futex_setup_time(struct __kernel_timespec __user *timo,
102040 +                           struct hrtimer_sleeper *timeout,
102041 +                           unsigned int flags)
102043 +       ktime_t time;
102044 +       struct timespec64 ts;
102045 +       clockid_t clockid = (flags & FUTEX_CLOCK_REALTIME) ?
102046 +                           CLOCK_REALTIME : CLOCK_MONOTONIC;
102048 +       if (get_timespec64(&ts, timo))
102049 +               return -EFAULT;
102051 +       if (!timespec64_valid(&ts))
102052 +               return -EINVAL;
102054 +       time = timespec64_to_ktime(ts);
102056 +       hrtimer_init_sleeper(timeout, clockid, HRTIMER_MODE_ABS);
102058 +       hrtimer_set_expires(&timeout->timer, time);
102060 +       hrtimer_sleeper_start_expires(timeout, HRTIMER_MODE_ABS);
102062 +       return 0;
102066 + * futex_dequeue_multiple - Remove multiple futexes from hash table
102067 + * @futexv: list of waiters
102068 + * @nr:     number of futexes to be removed
102070 + * This function is used if (a) something went wrong while enqueuing, and we
102071 + * need to undo our work (then nr <= nr_futexes) or (b) we woke up, and thus
102072 + * need to remove every waiter, check if some was indeed woken and return.
102073 + * Before removing a waiter, we check if it's on the list, since we have no
102074 + * clue who have been waken.
102076 + * Return:
102077 + *  * -1  - If no futex was woken during the removal
102078 + *  * 0>= - At least one futex was found woken, index of the last one
102079 + */
102080 +static int futex_dequeue_multiple(struct futex_waiter_head *futexv, unsigned int nr)
102082 +       int i, ret = -1;
102084 +       for (i = 0; i < nr; i++) {
102085 +               spin_lock(&futexv->objects[i].bucket->lock);
102086 +               if (!list_empty(&futexv->objects[i].list)) {
102087 +                       list_del_init(&futexv->objects[i].list);
102088 +                       bucket_dec_waiters(futexv->objects[i].bucket);
102089 +               } else {
102090 +                       ret = i;
102091 +               }
102092 +               spin_unlock(&futexv->objects[i].bucket->lock);
102093 +       }
102095 +       return ret;
102099 + * futex_enqueue - Check the value and enqueue a futex on a wait list
102101 + * @futexv:     List of futexes
102102 + * @nr_futexes: Number of futexes in the list
102103 + * @awakened:  If a futex was awakened during enqueueing, store the index here
102105 + * Get the value from the userspace address and compares with the expected one.
102107 + * Getting the value from user futex address:
102109 + * Since we are in a hurry, we use a spin lock and we can't sleep.
102110 + * Try to get the value with page fault disabled (when enable, we might
102111 + * sleep).
102113 + * If we fail, we aren't sure if the address is invalid or is just a
102114 + * page fault. Then, release the lock (so we can sleep) and try to get
102115 + * the value with page fault enabled. In order to trigger a page fault
102116 + * handling, we just call __get_user() again. If we sleep with enqueued
102117 + * futexes, we might miss a wake, so dequeue everything before sleeping.
102119 + * If get_user succeeds, this mean that the address is valid and we do
102120 + * the work again. Since we just handled the page fault, the page is
102121 + * likely pinned in memory and we should be luckier this time and be
102122 + * able to get the value. If we fail anyway, we will try again.
102124 + * If even with page faults enabled we get and error, this means that
102125 + * the address is not valid and we return from the syscall.
102127 + * If we got an unexpected value or need to treat a page fault and realized that
102128 + * a futex was awakened, we can priority this and return success.
102130 + * In success, enqueue the futex in the correct bucket
102132 + * Return:
102133 + * * 1  - We were awake in the process and nothing is enqueued
102134 + * * 0  - Everything is enqueued and we are ready to sleep
102135 + * * 0< - Something went wrong, nothing is enqueued, return error code
102136 + */
102137 +static int futex_enqueue(struct futex_waiter_head *futexv, unsigned int nr_futexes,
102138 +                        int *awakened)
102140 +       int i, ret;
102141 +       u32 uval, val;
102142 +       u32 __user *uaddr;
102143 +       bool retry = false;
102144 +       struct futex_bucket *bucket;
102146 +retry:
102147 +       set_current_state(TASK_INTERRUPTIBLE);
102149 +       for (i = 0; i < nr_futexes; i++) {
102150 +               uaddr = (u32 __user *)futexv->objects[i].uaddr;
102151 +               val = (u32)futexv->objects[i].val;
102153 +               if (is_object_shared && retry) {
102154 +                       struct futex_bucket *tmp =
102155 +                               futex_get_bucket((void __user *)uaddr,
102156 +                                                &futexv->objects[i].key, true);
102157 +                       if (IS_ERR(tmp)) {
102158 +                               __set_current_state(TASK_RUNNING);
102159 +                               futex_dequeue_multiple(futexv, i);
102160 +                               return PTR_ERR(tmp);
102161 +                       }
102162 +                       futexv->objects[i].bucket = tmp;
102163 +               }
102165 +               bucket = futexv->objects[i].bucket;
102167 +               bucket_inc_waiters(bucket);
102168 +               spin_lock(&bucket->lock);
102170 +               ret = futex_get_user(&uval, uaddr);
102172 +               if (unlikely(ret)) {
102173 +                       spin_unlock(&bucket->lock);
102175 +                       bucket_dec_waiters(bucket);
102176 +                       __set_current_state(TASK_RUNNING);
102177 +                       *awakened = futex_dequeue_multiple(futexv, i);
102179 +                       if (*awakened >= 0)
102180 +                               return 1;
102182 +                       if (__get_user(uval, uaddr))
102183 +                               return -EFAULT;
102185 +                       retry = true;
102186 +                       goto retry;
102187 +               }
102189 +               if (uval != val) {
102190 +                       spin_unlock(&bucket->lock);
102192 +                       bucket_dec_waiters(bucket);
102193 +                       __set_current_state(TASK_RUNNING);
102194 +                       *awakened = futex_dequeue_multiple(futexv, i);
102196 +                       if (*awakened >= 0)
102197 +                               return 1;
102199 +                       return -EAGAIN;
102200 +               }
102202 +               list_add_tail(&futexv->objects[i].list, &bucket->list);
102203 +               spin_unlock(&bucket->lock);
102204 +       }
102206 +       return 0;
102210 + * __futex_waitv - Enqueue the list of futexes and wait to be woken
102211 + * @futexv: List of futexes to wait
102212 + * @nr_futexes: Length of futexv
102213 + * @timo:      Timeout
102214 + * @flags:     Timeout flags
102216 + * Return:
102217 + * * 0 >= - Hint of which futex woke us
102218 + * * 0 <  - Error code
102219 + */
102220 +static int __futex_waitv(struct futex_waiter_head *futexv, unsigned int nr_futexes,
102221 +                        struct __kernel_timespec __user *timo,
102222 +                        unsigned int flags)
102224 +       int ret;
102225 +       struct hrtimer_sleeper timeout;
102227 +       if (timo) {
102228 +               ret = futex_setup_time(timo, &timeout, flags);
102229 +               if (ret)
102230 +                       return ret;
102231 +       }
102233 +       while (1) {
102234 +               int awakened = -1;
102236 +               ret = futex_enqueue(futexv, nr_futexes, &awakened);
102238 +               if (ret) {
102239 +                       if (awakened >= 0)
102240 +                               ret = awakened;
102241 +                       break;
102242 +               }
102244 +               /* Before sleeping, check if someone was woken */
102245 +               if (!futexv->hint && (!timo || timeout.task))
102246 +                       freezable_schedule();
102248 +               __set_current_state(TASK_RUNNING);
102250 +               /*
102251 +                * One of those things triggered this wake:
102252 +                *
102253 +                * * We have been removed from the bucket. futex_wake() woke
102254 +                *   us. We just need to dequeue and return 0 to userspace.
102255 +                *
102256 +                * However, if no futex was dequeued by a futex_wake():
102257 +                *
102258 +                * * If the there's a timeout and it has expired,
102259 +                *   return -ETIMEDOUT.
102260 +                *
102261 +                * * If there is a signal pending, something wants to kill our
102262 +                *   thread, return -ERESTARTSYS.
102263 +                *
102264 +                * * If there's no signal pending, it was a spurious wake
102265 +                *   (scheduler gave us a chance to do some work, even if we
102266 +                *   don't want to). We need to remove ourselves from the
102267 +                *   bucket and add again, to prevent losing wakeups in the
102268 +                *   meantime.
102269 +                */
102271 +               ret = futex_dequeue_multiple(futexv, nr_futexes);
102273 +               /* Normal wake */
102274 +               if (ret >= 0)
102275 +                       break;
102277 +               if (timo && !timeout.task) {
102278 +                       ret = -ETIMEDOUT;
102279 +                       break;
102280 +               }
102282 +               if (signal_pending(current)) {
102283 +                       ret = -ERESTARTSYS;
102284 +                       break;
102285 +               }
102287 +               /* Spurious wake, do everything again */
102288 +       }
102290 +       if (timo)
102291 +               hrtimer_cancel(&timeout.timer);
102293 +       return ret;
102297 + * sys_futex_wait - Wait on a futex address if (*uaddr) == val
102298 + * @uaddr: User address of futex
102299 + * @val:   Expected value of futex
102300 + * @flags: Specify the size of futex and the clockid
102301 + * @timo:  Optional absolute timeout.
102303 + * The user thread is put to sleep, waiting for a futex_wake() at uaddr, if the
102304 + * value at *uaddr is the same as val (otherwise, the syscall returns
102305 + * immediately with -EAGAIN).
102307 + * Returns 0 on success, error code otherwise.
102308 + */
102309 +SYSCALL_DEFINE4(futex_wait, void __user *, uaddr, unsigned int, val,
102310 +               unsigned int, flags, struct __kernel_timespec __user *, timo)
102312 +       bool shared = (flags & FUTEX_SHARED_FLAG) ? true : false;
102313 +       unsigned int size = flags & FUTEX_SIZE_MASK;
102314 +       struct futex_waiter *waiter;
102315 +       struct futex_waiter_head *futexv;
102317 +       /* Wrapper for a futexv_waiter_head with one element */
102318 +       struct {
102319 +               struct futex_waiter_head futexv;
102320 +               struct futex_waiter waiter;
102321 +       } __packed wait_single;
102323 +       if (flags & ~FUTEX2_MASK)
102324 +               return -EINVAL;
102326 +       if (size != FUTEX_32)
102327 +               return -EINVAL;
102329 +       futexv = &wait_single.futexv;
102330 +       futexv->task = current;
102331 +       futexv->hint = false;
102333 +       waiter = &wait_single.waiter;
102334 +       waiter->index = 0;
102335 +       waiter->val = val;
102336 +       waiter->uaddr = uaddr;
102337 +       memset(&wait_single.waiter.key, 0, sizeof(struct futex_key));
102339 +       INIT_LIST_HEAD(&waiter->list);
102341 +       /* Get an unlocked hash bucket */
102342 +       waiter->bucket = futex_get_bucket(uaddr, &waiter->key, shared);
102343 +       if (IS_ERR(waiter->bucket))
102344 +               return PTR_ERR(waiter->bucket);
102346 +       return __futex_waitv(futexv, 1, timo, flags);
102349 +#ifdef CONFIG_COMPAT
102351 + * compat_futex_parse_waitv - Parse a waitv array from userspace
102352 + * @futexv:    Kernel side list of waiters to be filled
102353 + * @uwaitv:     Userspace list to be parsed
102354 + * @nr_futexes: Length of futexv
102356 + * Return: Error code on failure, pointer to a prepared futexv otherwise
102357 + */
102358 +static int compat_futex_parse_waitv(struct futex_waiter_head *futexv,
102359 +                                   struct compat_futex_waitv __user *uwaitv,
102360 +                                   unsigned int nr_futexes)
102362 +       struct futex_bucket *bucket;
102363 +       struct compat_futex_waitv waitv;
102364 +       unsigned int i;
102366 +       for (i = 0; i < nr_futexes; i++) {
102367 +               if (copy_from_user(&waitv, &uwaitv[i], sizeof(waitv)))
102368 +                       return -EFAULT;
102370 +               if ((waitv.flags & ~FUTEXV_WAITER_MASK) ||
102371 +                   (waitv.flags & FUTEX_SIZE_MASK) != FUTEX_32)
102372 +                       return -EINVAL;
102374 +               futexv->objects[i].key.pointer = 0;
102375 +               futexv->objects[i].flags  = waitv.flags;
102376 +               futexv->objects[i].uaddr  = compat_ptr(waitv.uaddr);
102377 +               futexv->objects[i].val    = waitv.val;
102378 +               futexv->objects[i].index  = i;
102380 +               bucket = futex_get_bucket(compat_ptr(waitv.uaddr),
102381 +                                         &futexv->objects[i].key,
102382 +                                         is_object_shared);
102384 +               if (IS_ERR(bucket))
102385 +                       return PTR_ERR(bucket);
102387 +               futexv->objects[i].bucket = bucket;
102389 +               INIT_LIST_HEAD(&futexv->objects[i].list);
102390 +       }
102392 +       return 0;
102395 +COMPAT_SYSCALL_DEFINE4(futex_waitv, struct compat_futex_waitv __user *, waiters,
102396 +                      unsigned int, nr_futexes, unsigned int, flags,
102397 +                      struct __kernel_timespec __user *, timo)
102399 +       struct futex_waiter_head *futexv;
102400 +       int ret;
102402 +       if (flags & ~FUTEXV_MASK)
102403 +               return -EINVAL;
102405 +       if (!nr_futexes || nr_futexes > FUTEX_WAITV_MAX || !waiters)
102406 +               return -EINVAL;
102408 +       futexv = kmalloc((sizeof(struct futex_waiter) * nr_futexes) +
102409 +                        sizeof(*futexv), GFP_KERNEL);
102410 +       if (!futexv)
102411 +               return -ENOMEM;
102413 +       futexv->hint = false;
102414 +       futexv->task = current;
102416 +       ret = compat_futex_parse_waitv(futexv, waiters, nr_futexes);
102418 +       if (!ret)
102419 +               ret = __futex_waitv(futexv, nr_futexes, timo, flags);
102421 +       kfree(futexv);
102423 +       return ret;
102425 +#endif
102428 + * futex_parse_waitv - Parse a waitv array from userspace
102429 + * @futexv:    Kernel side list of waiters to be filled
102430 + * @uwaitv:     Userspace list to be parsed
102431 + * @nr_futexes: Length of futexv
102433 + * Return: Error code on failure, pointer to a prepared futexv otherwise
102434 + */
102435 +static int futex_parse_waitv(struct futex_waiter_head *futexv,
102436 +                            struct futex_waitv __user *uwaitv,
102437 +                            unsigned int nr_futexes)
102439 +       struct futex_bucket *bucket;
102440 +       struct futex_waitv waitv;
102441 +       unsigned int i;
102443 +       for (i = 0; i < nr_futexes; i++) {
102444 +               if (copy_from_user(&waitv, &uwaitv[i], sizeof(waitv)))
102445 +                       return -EFAULT;
102447 +               if ((waitv.flags & ~FUTEXV_WAITER_MASK) ||
102448 +                   (waitv.flags & FUTEX_SIZE_MASK) != FUTEX_32)
102449 +                       return -EINVAL;
102451 +               futexv->objects[i].key.pointer = 0;
102452 +               futexv->objects[i].flags  = waitv.flags;
102453 +               futexv->objects[i].uaddr  = waitv.uaddr;
102454 +               futexv->objects[i].val    = waitv.val;
102455 +               futexv->objects[i].index  = i;
102457 +               bucket = futex_get_bucket(waitv.uaddr, &futexv->objects[i].key,
102458 +                                         is_object_shared);
102460 +               if (IS_ERR(bucket))
102461 +                       return PTR_ERR(bucket);
102463 +               futexv->objects[i].bucket = bucket;
102465 +               INIT_LIST_HEAD(&futexv->objects[i].list);
102466 +       }
102468 +       return 0;
102472 + * sys_futex_waitv - Wait on a list of futexes
102473 + * @waiters:    List of futexes to wait on
102474 + * @nr_futexes: Length of futexv
102475 + * @flags:      Flag for timeout (monotonic/realtime)
102476 + * @timo:      Optional absolute timeout.
102478 + * Given an array of `struct futex_waitv`, wait on each uaddr. The thread wakes
102479 + * if a futex_wake() is performed at any uaddr. The syscall returns immediately
102480 + * if any waiter has *uaddr != val. *timo is an optional timeout value for the
102481 + * operation. Each waiter has individual flags. The `flags` argument for the
102482 + * syscall should be used solely for specifying the timeout as realtime, if
102483 + * needed. Flags for shared futexes, sizes, etc. should be used on the
102484 + * individual flags of each waiter.
102486 + * Returns the array index of one of the awaken futexes. There's no given
102487 + * information of how many were awakened, or any particular attribute of it (if
102488 + * it's the first awakened, if it is of the smaller index...).
102489 + */
102490 +SYSCALL_DEFINE4(futex_waitv, struct futex_waitv __user *, waiters,
102491 +               unsigned int, nr_futexes, unsigned int, flags,
102492 +               struct __kernel_timespec __user *, timo)
102494 +       struct futex_waiter_head *futexv;
102495 +       int ret;
102497 +       if (flags & ~FUTEXV_MASK)
102498 +               return -EINVAL;
102500 +       if (!nr_futexes || nr_futexes > FUTEX_WAITV_MAX || !waiters)
102501 +               return -EINVAL;
102503 +       futexv = kmalloc((sizeof(struct futex_waiter) * nr_futexes) +
102504 +                        sizeof(*futexv), GFP_KERNEL);
102505 +       if (!futexv)
102506 +               return -ENOMEM;
102508 +       futexv->hint = false;
102509 +       futexv->task = current;
102511 +#ifdef CONFIG_X86_X32_ABI
102512 +       if (in_x32_syscall()) {
102513 +               ret = compat_futex_parse_waitv(futexv, (struct compat_futex_waitv *)waiters,
102514 +                                              nr_futexes);
102515 +       } else
102516 +#endif
102517 +       {
102518 +               ret = futex_parse_waitv(futexv, waiters, nr_futexes);
102519 +       }
102521 +       if (!ret)
102522 +               ret = __futex_waitv(futexv, nr_futexes, timo, flags);
102524 +       kfree(futexv);
102526 +       return ret;
102530 + * futex_get_parent - For a given futex in a futexv list, get a pointer to the futexv
102531 + * @waiter: Address of futex in the list
102532 + * @index: Index of futex in the list
102534 + * Return: A pointer to its futexv struct
102535 + */
102536 +static inline struct futex_waiter_head *futex_get_parent(uintptr_t waiter,
102537 +                                                        unsigned int index)
102539 +       uintptr_t parent = waiter - sizeof(struct futex_waiter_head)
102540 +                          - (uintptr_t)(index * sizeof(struct futex_waiter));
102542 +       return (struct futex_waiter_head *)parent;
102546 + * futex_mark_wake - Find the task to be wake and add it in wake queue
102547 + * @waiter: Waiter to be wake
102548 + * @bucket: Bucket to be decremented
102549 + * @wake_q: Wake queue to insert the task
102550 + */
102551 +static void futex_mark_wake(struct futex_waiter *waiter,
102552 +                           struct futex_bucket *bucket,
102553 +                           struct wake_q_head *wake_q)
102555 +       struct task_struct *task;
102556 +       struct futex_waiter_head *parent = futex_get_parent((uintptr_t)waiter,
102557 +                                                           waiter->index);
102559 +       lockdep_assert_held(&bucket->lock);
102560 +       parent->hint = true;
102561 +       task = parent->task;
102562 +       get_task_struct(task);
102563 +       list_del_init(&waiter->list);
102564 +       wake_q_add_safe(wake_q, task);
102565 +       bucket_dec_waiters(bucket);
102568 +static inline bool futex_match(struct futex_key key1, struct futex_key key2)
102570 +       return (key1.index == key2.index &&
102571 +               key1.pointer == key2.pointer &&
102572 +               key1.offset == key2.offset);
102575 +long ksys_futex_wake(void __user *uaddr, unsigned long nr_wake,
102576 +                    unsigned int flags)
102578 +       bool shared = (flags & FUTEX_SHARED_FLAG) ? true : false;
102579 +       unsigned int size = flags & FUTEX_SIZE_MASK;
102580 +       struct futex_waiter waiter, *aux, *tmp;
102581 +       struct futex_bucket *bucket;
102582 +       DEFINE_WAKE_Q(wake_q);
102583 +       int ret = 0;
102585 +       if (flags & ~FUTEX2_MASK)
102586 +               return -EINVAL;
102588 +       if (size != FUTEX_32)
102589 +               return -EINVAL;
102591 +       bucket = futex_get_bucket(uaddr, &waiter.key, shared);
102592 +       if (IS_ERR(bucket))
102593 +               return PTR_ERR(bucket);
102595 +       if (!bucket_get_waiters(bucket) || !nr_wake)
102596 +               return 0;
102598 +       spin_lock(&bucket->lock);
102599 +       list_for_each_entry_safe(aux, tmp, &bucket->list, list) {
102600 +               if (futex_match(waiter.key, aux->key)) {
102601 +                       futex_mark_wake(aux, bucket, &wake_q);
102602 +                       if (++ret >= nr_wake)
102603 +                               break;
102604 +               }
102605 +       }
102606 +       spin_unlock(&bucket->lock);
102608 +       wake_up_q(&wake_q);
102610 +       return ret;
102614 + * sys_futex_wake - Wake a number of futexes waiting on an address
102615 + * @uaddr:   Address of futex to be woken up
102616 + * @nr_wake: Number of futexes waiting in uaddr to be woken up
102617 + * @flags:   Flags for size and shared
102619 + * Wake `nr_wake` threads waiting at uaddr.
102621 + * Returns the number of woken threads on success, error code otherwise.
102622 + */
102623 +SYSCALL_DEFINE3(futex_wake, void __user *, uaddr, unsigned int, nr_wake,
102624 +               unsigned int, flags)
102626 +       return ksys_futex_wake(uaddr, nr_wake, flags);
102629 +static void futex_double_unlock(struct futex_bucket *b1, struct futex_bucket *b2)
102631 +       spin_unlock(&b1->lock);
102632 +       if (b1 != b2)
102633 +               spin_unlock(&b2->lock);
102636 +static inline int __futex_requeue(struct futex_requeue rq1,
102637 +                                 struct futex_requeue rq2, unsigned int nr_wake,
102638 +                                 unsigned int nr_requeue, unsigned int cmpval,
102639 +                                 bool shared1, bool shared2)
102641 +       struct futex_waiter w1, w2, *aux, *tmp;
102642 +       bool retry = false;
102643 +       struct futex_bucket *b1, *b2;
102644 +       DEFINE_WAKE_Q(wake_q);
102645 +       u32 uval;
102646 +       int ret;
102648 +       b1 = futex_get_bucket(rq1.uaddr, &w1.key, shared1);
102649 +       if (IS_ERR(b1))
102650 +               return PTR_ERR(b1);
102652 +       b2 = futex_get_bucket(rq2.uaddr, &w2.key, shared2);
102653 +       if (IS_ERR(b2))
102654 +               return PTR_ERR(b2);
102656 +retry:
102657 +       if (shared1 && retry) {
102658 +               b1 = futex_get_bucket(rq1.uaddr, &w1.key, shared1);
102659 +               if (IS_ERR(b1))
102660 +                       return PTR_ERR(b1);
102661 +       }
102663 +       if (shared2 && retry) {
102664 +               b2 = futex_get_bucket(rq2.uaddr, &w2.key, shared2);
102665 +               if (IS_ERR(b2))
102666 +                       return PTR_ERR(b2);
102667 +       }
102669 +       bucket_inc_waiters(b2);
102670 +       /*
102671 +        * To ensure the locks are taken in the same order for all threads (and
102672 +        * thus avoiding deadlocks), take the "smaller" one first
102673 +        */
102674 +       if (b1 <= b2) {
102675 +               spin_lock(&b1->lock);
102676 +               if (b1 < b2)
102677 +                       spin_lock_nested(&b2->lock, SINGLE_DEPTH_NESTING);
102678 +       } else {
102679 +               spin_lock(&b2->lock);
102680 +               spin_lock_nested(&b1->lock, SINGLE_DEPTH_NESTING);
102681 +       }
102683 +       ret = futex_get_user(&uval, rq1.uaddr);
102685 +       if (unlikely(ret)) {
102686 +               futex_double_unlock(b1, b2);
102687 +               if (__get_user(uval, (u32 __user *)rq1.uaddr))
102688 +                       return -EFAULT;
102690 +               bucket_dec_waiters(b2);
102691 +               retry = true;
102692 +               goto retry;
102693 +       }
102695 +       if (uval != cmpval) {
102696 +               futex_double_unlock(b1, b2);
102698 +               bucket_dec_waiters(b2);
102699 +               return -EAGAIN;
102700 +       }
102702 +       list_for_each_entry_safe(aux, tmp, &b1->list, list) {
102703 +               if (futex_match(w1.key, aux->key)) {
102704 +                       if (ret < nr_wake) {
102705 +                               futex_mark_wake(aux, b1, &wake_q);
102706 +                               ret++;
102707 +                               continue;
102708 +                       }
102710 +                       if (ret >= nr_wake + nr_requeue)
102711 +                               break;
102713 +                       aux->key.pointer = w2.key.pointer;
102714 +                       aux->key.index = w2.key.index;
102715 +                       aux->key.offset = w2.key.offset;
102717 +                       if (b1 != b2) {
102718 +                               list_del_init(&aux->list);
102719 +                               bucket_dec_waiters(b1);
102721 +                               list_add_tail(&aux->list, &b2->list);
102722 +                               bucket_inc_waiters(b2);
102723 +                       }
102724 +                       ret++;
102725 +               }
102726 +       }
102728 +       futex_double_unlock(b1, b2);
102729 +       wake_up_q(&wake_q);
102730 +       bucket_dec_waiters(b2);
102732 +       return ret;
102735 +#ifdef CONFIG_COMPAT
102736 +static int compat_futex_parse_requeue(struct futex_requeue *rq,
102737 +                                     struct compat_futex_requeue __user *uaddr,
102738 +                                     bool *shared)
102740 +       struct compat_futex_requeue tmp;
102742 +       if (copy_from_user(&tmp, uaddr, sizeof(tmp)))
102743 +               return -EFAULT;
102745 +       if (tmp.flags & ~FUTEXV_WAITER_MASK ||
102746 +           (tmp.flags & FUTEX_SIZE_MASK) != FUTEX_32)
102747 +               return -EINVAL;
102749 +       *shared = (tmp.flags & FUTEX_SHARED_FLAG) ? true : false;
102751 +       rq->uaddr = compat_ptr(tmp.uaddr);
102752 +       rq->flags = tmp.flags;
102754 +       return 0;
102757 +COMPAT_SYSCALL_DEFINE6(futex_requeue, struct compat_futex_requeue __user *, uaddr1,
102758 +                      struct compat_futex_requeue __user *, uaddr2,
102759 +                      unsigned int, nr_wake, unsigned int, nr_requeue,
102760 +                      unsigned int, cmpval, unsigned int, flags)
102762 +       struct futex_requeue rq1, rq2;
102763 +       bool shared1, shared2;
102764 +       int ret;
102766 +       if (flags)
102767 +               return -EINVAL;
102769 +       ret = compat_futex_parse_requeue(&rq1, uaddr1, &shared1);
102770 +       if (ret)
102771 +               return ret;
102773 +       ret = compat_futex_parse_requeue(&rq2, uaddr2, &shared2);
102774 +       if (ret)
102775 +               return ret;
102777 +       return __futex_requeue(rq1, rq2, nr_wake, nr_requeue, cmpval, shared1, shared2);
102779 +#endif
102782 + * futex_parse_requeue - Copy a user struct futex_requeue and check it's flags
102783 + * @rq:    Kernel struct
102784 + * @uaddr: Address of user struct
102785 + * @shared: Out parameter, defines if this is a shared futex
102787 + * Return: 0 on success, error code otherwise
102788 + */
102789 +static int futex_parse_requeue(struct futex_requeue *rq,
102790 +                              struct futex_requeue __user *uaddr, bool *shared)
102792 +       if (copy_from_user(rq, uaddr, sizeof(*rq)))
102793 +               return -EFAULT;
102795 +       if (rq->flags & ~FUTEXV_WAITER_MASK ||
102796 +           (rq->flags & FUTEX_SIZE_MASK) != FUTEX_32)
102797 +               return -EINVAL;
102799 +       *shared = (rq->flags & FUTEX_SHARED_FLAG) ? true : false;
102801 +       return 0;
102805 + * sys_futex_requeue - Wake futexes at uaddr1 and requeue from uaddr1 to uaddr2
102806 + * @uaddr1:    Address of futexes to be waken/dequeued
102807 + * @uaddr2:    Address for the futexes to be enqueued
102808 + * @nr_wake:   Number of futexes waiting in uaddr1 to be woken up
102809 + * @nr_requeue: Number of futexes to be requeued from uaddr1 to uaddr2
102810 + * @cmpval:    Expected value at uaddr1
102811 + * @flags:     Reserved flags arg for requeue operation expansion. Must be 0.
102813 + * If (uaddr1->uaddr == cmpval), wake at uaddr1->uaddr a nr_wake number of
102814 + * waiters and then, remove a number of nr_requeue waiters at uaddr1->uaddr
102815 + * and add then to uaddr2->uaddr list. Each uaddr has its own set of flags,
102816 + * that must be defined at struct futex_requeue (such as size, shared, NUMA).
102818 + * Return the number of the woken futexes + the number of requeued ones on
102819 + * success, error code otherwise.
102820 + */
102821 +SYSCALL_DEFINE6(futex_requeue, struct futex_requeue __user *, uaddr1,
102822 +               struct futex_requeue __user *, uaddr2,
102823 +               unsigned int, nr_wake, unsigned int, nr_requeue,
102824 +               unsigned int, cmpval, unsigned int, flags)
102826 +       struct futex_requeue rq1, rq2;
102827 +       bool shared1, shared2;
102828 +       int ret;
102830 +       if (flags)
102831 +               return -EINVAL;
102833 +#ifdef CONFIG_X86_X32_ABI
102834 +       if (in_x32_syscall()) {
102835 +               ret = compat_futex_parse_requeue(&rq1, (struct compat_futex_requeue *)uaddr1,
102836 +                                                &shared1);
102837 +               if (ret)
102838 +                       return ret;
102840 +               ret = compat_futex_parse_requeue(&rq2, (struct compat_futex_requeue *)uaddr2,
102841 +                                                &shared2);
102842 +               if (ret)
102843 +                       return ret;
102844 +       } else
102845 +#endif
102846 +       {
102847 +               ret = futex_parse_requeue(&rq1, uaddr1, &shared1);
102848 +               if (ret)
102849 +                       return ret;
102851 +               ret = futex_parse_requeue(&rq2, uaddr2, &shared2);
102852 +               if (ret)
102853 +                       return ret;
102854 +       }
102856 +       return __futex_requeue(rq1, rq2, nr_wake, nr_requeue, cmpval, shared1, shared2);
102859 +static int __init futex2_init(void)
102861 +       int i;
102862 +       unsigned int futex_shift;
102864 +#if CONFIG_BASE_SMALL
102865 +       futex2_hashsize = 16;
102866 +#else
102867 +       futex2_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
102868 +#endif
102870 +       futex_table = alloc_large_system_hash("futex2", sizeof(struct futex_bucket),
102871 +                                             futex2_hashsize, 0,
102872 +                                             futex2_hashsize < 256 ? HASH_SMALL : 0,
102873 +                                             &futex_shift, NULL,
102874 +                                             futex2_hashsize, futex2_hashsize);
102875 +       futex2_hashsize = 1UL << futex_shift;
102877 +       BUG_ON(!is_power_of_2(futex2_hashsize));
102879 +       for (i = 0; i < futex2_hashsize; i++) {
102880 +               INIT_LIST_HEAD(&futex_table[i].list);
102881 +               spin_lock_init(&futex_table[i].lock);
102882 +               atomic_set(&futex_table[i].waiters, 0);
102883 +       }
102885 +       return 0;
102887 +core_initcall(futex2_init);
102888 diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
102889 index 651a4ad6d711..8e586858bcf4 100644
102890 --- a/kernel/irq/matrix.c
102891 +++ b/kernel/irq/matrix.c
102892 @@ -423,7 +423,9 @@ void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
102893         if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
102894                 return;
102896 -       clear_bit(bit, cm->alloc_map);
102897 +       if (WARN_ON_ONCE(!test_and_clear_bit(bit, cm->alloc_map)))
102898 +               return;
102900         cm->allocated--;
102901         if(managed)
102902                 cm->managed_allocated--;
102903 diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
102904 index 3bf98db9c702..23e7acb5c667 100644
102905 --- a/kernel/kcsan/core.c
102906 +++ b/kernel/kcsan/core.c
102907 @@ -639,8 +639,6 @@ void __init kcsan_init(void)
102909         BUG_ON(!in_task());
102911 -       kcsan_debugfs_init();
102913         for_each_possible_cpu(cpu)
102914                 per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles();
102916 diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
102917 index 3c8093a371b1..209ad8dcfcec 100644
102918 --- a/kernel/kcsan/debugfs.c
102919 +++ b/kernel/kcsan/debugfs.c
102920 @@ -261,7 +261,9 @@ static const struct file_operations debugfs_ops =
102921         .release = single_release
102924 -void __init kcsan_debugfs_init(void)
102925 +static void __init kcsan_debugfs_init(void)
102927         debugfs_create_file("kcsan", 0644, NULL, NULL, &debugfs_ops);
102930 +late_initcall(kcsan_debugfs_init);
102931 diff --git a/kernel/kcsan/kcsan.h b/kernel/kcsan/kcsan.h
102932 index 8d4bf3431b3c..87ccdb3b051f 100644
102933 --- a/kernel/kcsan/kcsan.h
102934 +++ b/kernel/kcsan/kcsan.h
102935 @@ -30,11 +30,6 @@ extern bool kcsan_enabled;
102936  void kcsan_save_irqtrace(struct task_struct *task);
102937  void kcsan_restore_irqtrace(struct task_struct *task);
102940 - * Initialize debugfs file.
102941 - */
102942 -void kcsan_debugfs_init(void);
102945   * Statistics counters displayed via debugfs; should only be modified in
102946   * slow-paths.
102947 diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
102948 index 5c3447cf7ad5..33400ff051a8 100644
102949 --- a/kernel/kexec_file.c
102950 +++ b/kernel/kexec_file.c
102951 @@ -740,8 +740,10 @@ static int kexec_calculate_store_digests(struct kimage *image)
102953         sha_region_sz = KEXEC_SEGMENT_MAX * sizeof(struct kexec_sha_region);
102954         sha_regions = vzalloc(sha_region_sz);
102955 -       if (!sha_regions)
102956 +       if (!sha_regions) {
102957 +               ret = -ENOMEM;
102958                 goto out_free_desc;
102959 +       }
102961         desc->tfm   = tfm;
102963 diff --git a/kernel/kthread.c b/kernel/kthread.c
102964 index 1578973c5740..3b8dfbc24a22 100644
102965 --- a/kernel/kthread.c
102966 +++ b/kernel/kthread.c
102967 @@ -84,6 +84,25 @@ static inline struct kthread *to_kthread(struct task_struct *k)
102968         return (__force void *)k->set_child_tid;
102972 + * Variant of to_kthread() that doesn't assume @p is a kthread.
102974 + * Per construction; when:
102976 + *   (p->flags & PF_KTHREAD) && p->set_child_tid
102978 + * the task is both a kthread and struct kthread is persistent. However
102979 + * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
102980 + * begin_new_exec()).
102981 + */
102982 +static inline struct kthread *__to_kthread(struct task_struct *p)
102984 +       void *kthread = (__force void *)p->set_child_tid;
102985 +       if (kthread && !(p->flags & PF_KTHREAD))
102986 +               kthread = NULL;
102987 +       return kthread;
102990  void free_kthread_struct(struct task_struct *k)
102992         struct kthread *kthread;
102993 @@ -168,8 +187,9 @@ EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
102994   */
102995  void *kthread_func(struct task_struct *task)
102997 -       if (task->flags & PF_KTHREAD)
102998 -               return to_kthread(task)->threadfn;
102999 +       struct kthread *kthread = __to_kthread(task);
103000 +       if (kthread)
103001 +               return kthread->threadfn;
103002         return NULL;
103004  EXPORT_SYMBOL_GPL(kthread_func);
103005 @@ -199,10 +219,11 @@ EXPORT_SYMBOL_GPL(kthread_data);
103006   */
103007  void *kthread_probe_data(struct task_struct *task)
103009 -       struct kthread *kthread = to_kthread(task);
103010 +       struct kthread *kthread = __to_kthread(task);
103011         void *data = NULL;
103013 -       copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
103014 +       if (kthread)
103015 +               copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
103016         return data;
103019 @@ -514,9 +535,9 @@ void kthread_set_per_cpu(struct task_struct *k, int cpu)
103020         set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
103023 -bool kthread_is_per_cpu(struct task_struct *k)
103024 +bool kthread_is_per_cpu(struct task_struct *p)
103026 -       struct kthread *kthread = to_kthread(k);
103027 +       struct kthread *kthread = __to_kthread(p);
103028         if (!kthread)
103029                 return false;
103031 @@ -1303,6 +1324,7 @@ void kthread_use_mm(struct mm_struct *mm)
103032         tsk->mm = mm;
103033         membarrier_update_current_mm(mm);
103034         switch_mm_irqs_off(active_mm, mm, tsk);
103035 +       lru_gen_switch_mm(active_mm, mm);
103036         local_irq_enable();
103037         task_unlock(tsk);
103038  #ifdef finish_arch_post_lock_switch
103039 diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
103040 index abba5df50006..b9fab2d55b93 100644
103041 --- a/kernel/locking/rwsem.c
103042 +++ b/kernel/locking/rwsem.c
103043 @@ -668,6 +668,7 @@ rwsem_spin_on_owner(struct rw_semaphore *sem)
103044         struct task_struct *new, *owner;
103045         unsigned long flags, new_flags;
103046         enum owner_state state;
103047 +       int i = 0;
103049         owner = rwsem_owner_flags(sem, &flags);
103050         state = rwsem_owner_state(owner, flags);
103051 @@ -701,7 +702,8 @@ rwsem_spin_on_owner(struct rw_semaphore *sem)
103052                         break;
103053                 }
103055 -               cpu_relax();
103056 +               if (i++ > 1000)
103057 +                       cpu_relax();
103058         }
103059         rcu_read_unlock();
103061 diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
103062 index 575a34b88936..77ae2704e979 100644
103063 --- a/kernel/printk/printk.c
103064 +++ b/kernel/printk/printk.c
103065 @@ -1494,6 +1494,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
103066         struct printk_info info;
103067         unsigned int line_count;
103068         struct printk_record r;
103069 +       u64 max_seq;
103070         char *text;
103071         int len = 0;
103072         u64 seq;
103073 @@ -1512,9 +1513,15 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
103074         prb_for_each_info(clear_seq, prb, seq, &info, &line_count)
103075                 len += get_record_print_text_size(&info, line_count, true, time);
103077 +       /*
103078 +        * Set an upper bound for the next loop to avoid subtracting lengths
103079 +        * that were never added.
103080 +        */
103081 +       max_seq = seq;
103083         /* move first record forward until length fits into the buffer */
103084         prb_for_each_info(clear_seq, prb, seq, &info, &line_count) {
103085 -               if (len <= size)
103086 +               if (len <= size || info.seq >= max_seq)
103087                         break;
103088                 len -= get_record_print_text_size(&info, line_count, true, time);
103089         }
103090 diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig
103091 index 3128b7cf8e1f..abfae9afbdc8 100644
103092 --- a/kernel/rcu/Kconfig
103093 +++ b/kernel/rcu/Kconfig
103094 @@ -189,8 +189,8 @@ config RCU_FAST_NO_HZ
103096  config RCU_BOOST
103097         bool "Enable RCU priority boosting"
103098 -       depends on (RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT) || PREEMPT_RT
103099 -       default y if PREEMPT_RT
103100 +       depends on (RT_MUTEXES && PREEMPT_RCU) || PREEMPT_RT
103101 +       default y
103102         help
103103           This option boosts the priority of preempted RCU readers that
103104           block the current preemptible RCU grace period for too long.
103105 @@ -204,7 +204,7 @@ config RCU_BOOST_DELAY
103106         int "Milliseconds to delay boosting after RCU grace-period start"
103107         range 0 3000
103108         depends on RCU_BOOST
103109 -       default 500
103110 +       default 0
103111         help
103112           This option specifies the time to wait after the beginning of
103113           a given grace period before priority-boosting preempted RCU
103114 diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
103115 index da6f5213fb74..7356764e49a0 100644
103116 --- a/kernel/rcu/tree.c
103117 +++ b/kernel/rcu/tree.c
103118 @@ -1077,7 +1077,6 @@ noinstr void rcu_nmi_enter(void)
103119         } else if (!in_nmi()) {
103120                 instrumentation_begin();
103121                 rcu_irq_enter_check_tick();
103122 -               instrumentation_end();
103123         } else  {
103124                 instrumentation_begin();
103125         }
103126 @@ -3464,7 +3463,7 @@ static void fill_page_cache_func(struct work_struct *work)
103128         for (i = 0; i < rcu_min_cached_objs; i++) {
103129                 bnode = (struct kvfree_rcu_bulk_data *)
103130 -                       __get_free_page(GFP_KERNEL | __GFP_NOWARN);
103131 +                       __get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
103133                 if (bnode) {
103134                         raw_spin_lock_irqsave(&krcp->lock, flags);
103135 diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
103136 index 2d603771c7dc..0796a75b6e0e 100644
103137 --- a/kernel/rcu/tree_plugin.h
103138 +++ b/kernel/rcu/tree_plugin.h
103139 @@ -1646,7 +1646,11 @@ static bool wake_nocb_gp(struct rcu_data *rdp, bool force,
103140                 rcu_nocb_unlock_irqrestore(rdp, flags);
103141                 return false;
103142         }
103143 -       del_timer(&rdp->nocb_timer);
103145 +       if (READ_ONCE(rdp->nocb_defer_wakeup) > RCU_NOCB_WAKE_NOT) {
103146 +               WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
103147 +               del_timer(&rdp->nocb_timer);
103148 +       }
103149         rcu_nocb_unlock_irqrestore(rdp, flags);
103150         raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
103151         if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
103152 @@ -2265,7 +2269,6 @@ static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp)
103153                 return false;
103154         }
103155         ndw = READ_ONCE(rdp->nocb_defer_wakeup);
103156 -       WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
103157         ret = wake_nocb_gp(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
103158         trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
103160 diff --git a/kernel/resource.c b/kernel/resource.c
103161 index 627e61b0c124..16e0c7e8ed24 100644
103162 --- a/kernel/resource.c
103163 +++ b/kernel/resource.c
103164 @@ -457,7 +457,7 @@ int walk_system_ram_res(u64 start, u64 end, void *arg,
103166         unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
103168 -       return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true,
103169 +       return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, false,
103170                                      arg, func);
103173 @@ -470,7 +470,7 @@ int walk_mem_res(u64 start, u64 end, void *arg,
103175         unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
103177 -       return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true,
103178 +       return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, false,
103179                                      arg, func);
103182 diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c
103183 index 2067080bb235..573b313efe4c 100644
103184 --- a/kernel/sched/autogroup.c
103185 +++ b/kernel/sched/autogroup.c
103186 @@ -5,7 +5,8 @@
103187  #include <linux/nospec.h>
103188  #include "sched.h"
103190 -unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
103191 +unsigned int __read_mostly sysctl_sched_autogroup_enabled =
103192 +               IS_ENABLED(CONFIG_SCHED_AUTOGROUP_DEFAULT_ENABLED) ? 1 : 0;
103193  static struct autogroup autogroup_default;
103194  static atomic_t autogroup_seq_nr;
103196 @@ -197,11 +198,12 @@ void sched_autogroup_exit(struct signal_struct *sig)
103198  static int __init setup_autogroup(char *str)
103200 -       sysctl_sched_autogroup_enabled = 0;
103202 +       unsigned long enabled;
103203 +       if (!kstrtoul(str, 0, &enabled))
103204 +               sysctl_sched_autogroup_enabled = enabled ? 1 : 0;
103205         return 1;
103207 -__setup("noautogroup", setup_autogroup);
103208 +__setup("autogroup=", setup_autogroup);
103210  #ifdef CONFIG_PROC_FS
103212 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
103213 index 98191218d891..98bcafbe10d9 100644
103214 --- a/kernel/sched/core.c
103215 +++ b/kernel/sched/core.c
103216 @@ -928,7 +928,7 @@ DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
103218  static inline unsigned int uclamp_bucket_id(unsigned int clamp_value)
103220 -       return clamp_value / UCLAMP_BUCKET_DELTA;
103221 +       return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1);
103224  static inline unsigned int uclamp_none(enum uclamp_id clamp_id)
103225 @@ -3554,7 +3554,13 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
103226         p->se.sum_exec_runtime          = 0;
103227         p->se.prev_sum_exec_runtime     = 0;
103228         p->se.nr_migrations             = 0;
103230 +#ifdef CONFIG_CACULE_SCHED
103231 +       p->se.cacule_node.vruntime      = 0;
103232 +#else
103233         p->se.vruntime                  = 0;
103234 +#endif
103236         INIT_LIST_HEAD(&p->se.group_node);
103238  #ifdef CONFIG_FAIR_GROUP_SCHED
103239 @@ -3840,6 +3846,10 @@ void wake_up_new_task(struct task_struct *p)
103240         update_rq_clock(rq);
103241         post_init_entity_util_avg(p);
103243 +#ifdef CONFIG_CACULE_SCHED
103244 +       p->se.cacule_node.cacule_start_time = sched_clock();
103245 +#endif
103247         activate_task(rq, p, ENQUEUE_NOCLOCK);
103248         trace_sched_wakeup_new(p);
103249         check_preempt_curr(rq, p, WF_FORK);
103250 @@ -4306,6 +4316,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
103251                  * finish_task_switch()'s mmdrop().
103252                  */
103253                 switch_mm_irqs_off(prev->active_mm, next->mm, next);
103254 +               lru_gen_switch_mm(prev->active_mm, next->mm);
103256                 if (!prev->mm) {                        // from kernel
103257                         /* will mmdrop() in finish_task_switch(). */
103258 @@ -5765,6 +5776,7 @@ int can_nice(const struct task_struct *p, const int nice)
103259         return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
103260                 capable(CAP_SYS_NICE));
103262 +EXPORT_SYMBOL(can_nice);
103264  #ifdef __ARCH_WANT_SYS_NICE
103266 @@ -7597,6 +7609,7 @@ void idle_task_exit(void)
103268         if (mm != &init_mm) {
103269                 switch_mm(mm, &init_mm, current);
103270 +               lru_gen_switch_mm(mm, &init_mm);
103271                 finish_arch_post_lock_switch();
103272         }
103274 @@ -7652,7 +7665,7 @@ static void balance_push(struct rq *rq)
103275          * histerical raisins.
103276          */
103277         if (rq->idle == push_task ||
103278 -           ((push_task->flags & PF_KTHREAD) && kthread_is_per_cpu(push_task)) ||
103279 +           kthread_is_per_cpu(push_task) ||
103280             is_migration_disabled(push_task)) {
103282                 /*
103283 @@ -8094,6 +8107,10 @@ void __init sched_init(void)
103284         BUG_ON(&dl_sched_class + 1 != &stop_sched_class);
103285  #endif
103287 +#ifdef CONFIG_CACULE_SCHED
103288 +       printk(KERN_INFO "CacULE CPU scheduler v5.12 by Hamad Al Marri.");
103289 +#endif
103291         wait_bit_init();
103293  #ifdef CONFIG_FAIR_GROUP_SCHED
103294 diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
103295 index 486f403a778b..6542bd142365 100644
103296 --- a/kernel/sched/debug.c
103297 +++ b/kernel/sched/debug.c
103298 @@ -8,8 +8,6 @@
103299   */
103300  #include "sched.h"
103302 -static DEFINE_SPINLOCK(sched_debug_lock);
103305   * This allows printing both to /proc/sched_debug and
103306   * to the console
103307 @@ -470,16 +468,37 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
103308  #endif
103310  #ifdef CONFIG_CGROUP_SCHED
103311 +static DEFINE_SPINLOCK(sched_debug_lock);
103312  static char group_path[PATH_MAX];
103314 -static char *task_group_path(struct task_group *tg)
103315 +static void task_group_path(struct task_group *tg, char *path, int plen)
103317 -       if (autogroup_path(tg, group_path, PATH_MAX))
103318 -               return group_path;
103319 +       if (autogroup_path(tg, path, plen))
103320 +               return;
103322 -       cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
103323 +       cgroup_path(tg->css.cgroup, path, plen);
103326 -       return group_path;
103328 + * Only 1 SEQ_printf_task_group_path() caller can use the full length
103329 + * group_path[] for cgroup path. Other simultaneous callers will have
103330 + * to use a shorter stack buffer. A "..." suffix is appended at the end
103331 + * of the stack buffer so that it will show up in case the output length
103332 + * matches the given buffer size to indicate possible path name truncation.
103333 + */
103334 +#define SEQ_printf_task_group_path(m, tg, fmt...)                      \
103335 +{                                                                      \
103336 +       if (spin_trylock(&sched_debug_lock)) {                          \
103337 +               task_group_path(tg, group_path, sizeof(group_path));    \
103338 +               SEQ_printf(m, fmt, group_path);                         \
103339 +               spin_unlock(&sched_debug_lock);                         \
103340 +       } else {                                                        \
103341 +               char buf[128];                                          \
103342 +               char *bufend = buf + sizeof(buf) - 3;                   \
103343 +               task_group_path(tg, buf, bufend - buf);                 \
103344 +               strcpy(bufend - 1, "...");                              \
103345 +               SEQ_printf(m, fmt, buf);                                \
103346 +       }                                                               \
103348  #endif
103350 @@ -506,7 +525,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
103351         SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
103352  #endif
103353  #ifdef CONFIG_CGROUP_SCHED
103354 -       SEQ_printf(m, " %s", task_group_path(task_group(p)));
103355 +       SEQ_printf_task_group_path(m, task_group(p), " %s")
103356  #endif
103358         SEQ_printf(m, "\n");
103359 @@ -535,15 +554,18 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
103361  void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
103363 -       s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
103364 -               spread, rq0_min_vruntime, spread0;
103365 +       s64 MIN_vruntime = -1, max_vruntime = -1,
103366 +#if !defined(CONFIG_CACULE_SCHED)
103367 +       min_vruntime, rq0_min_vruntime, spread0,
103368 +#endif
103369 +       spread;
103370         struct rq *rq = cpu_rq(cpu);
103371         struct sched_entity *last;
103372         unsigned long flags;
103374  #ifdef CONFIG_FAIR_GROUP_SCHED
103375         SEQ_printf(m, "\n");
103376 -       SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
103377 +       SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
103378  #else
103379         SEQ_printf(m, "\n");
103380         SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
103381 @@ -557,21 +579,27 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
103382         last = __pick_last_entity(cfs_rq);
103383         if (last)
103384                 max_vruntime = last->vruntime;
103385 +#if !defined(CONFIG_CACULE_SCHED)
103386         min_vruntime = cfs_rq->min_vruntime;
103387         rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
103388 +#endif
103389         raw_spin_unlock_irqrestore(&rq->lock, flags);
103390         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "MIN_vruntime",
103391                         SPLIT_NS(MIN_vruntime));
103392 +#if !defined(CONFIG_CACULE_SCHED)
103393         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
103394                         SPLIT_NS(min_vruntime));
103395 +#endif
103396         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "max_vruntime",
103397                         SPLIT_NS(max_vruntime));
103398         spread = max_vruntime - MIN_vruntime;
103399         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread",
103400                         SPLIT_NS(spread));
103401 +#if !defined(CONFIG_CACULE_SCHED)
103402         spread0 = min_vruntime - rq0_min_vruntime;
103403         SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread0",
103404                         SPLIT_NS(spread0));
103405 +#endif
103406         SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
103407                         cfs_rq->nr_spread_over);
103408         SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
103409 @@ -614,7 +642,7 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
103411  #ifdef CONFIG_RT_GROUP_SCHED
103412         SEQ_printf(m, "\n");
103413 -       SEQ_printf(m, "rt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
103414 +       SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
103415  #else
103416         SEQ_printf(m, "\n");
103417         SEQ_printf(m, "rt_rq[%d]:\n", cpu);
103418 @@ -666,7 +694,6 @@ void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
103419  static void print_cpu(struct seq_file *m, int cpu)
103421         struct rq *rq = cpu_rq(cpu);
103422 -       unsigned long flags;
103424  #ifdef CONFIG_X86
103425         {
103426 @@ -717,13 +744,11 @@ do {                                                                      \
103427         }
103428  #undef P
103430 -       spin_lock_irqsave(&sched_debug_lock, flags);
103431         print_cfs_stats(m, cpu);
103432         print_rt_stats(m, cpu);
103433         print_dl_stats(m, cpu);
103435         print_rq(m, rq, cpu);
103436 -       spin_unlock_irqrestore(&sched_debug_lock, flags);
103437         SEQ_printf(m, "\n");
103440 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
103441 index 794c2cb945f8..39bad9ca2f49 100644
103442 --- a/kernel/sched/fair.c
103443 +++ b/kernel/sched/fair.c
103444 @@ -19,9 +19,18 @@
103445   *
103446   *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
103447   *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
103449 + *  CacULE enhancements CPU cache and scheduler based on
103450 + *  Interactivity Score.
103451 + *  (C) 2020 Hamad Al Marri <hamad.s.almarri@gmail.com>
103452   */
103453  #include "sched.h"
103455 +#ifdef CONFIG_CACULE_SCHED
103456 +/* Default XanMod's CacULE latency: 2ms * (1 + ilog(ncpus)) */
103457 +unsigned int sysctl_sched_latency                      = 2000000ULL;
103458 +static unsigned int normalized_sysctl_sched_latency    = 2000000ULL;
103459 +#else
103461   * Targeted preemption latency for CPU-bound tasks:
103462   *
103463 @@ -37,6 +46,7 @@
103464   */
103465  unsigned int sysctl_sched_latency                      = 6000000ULL;
103466  static unsigned int normalized_sysctl_sched_latency    = 6000000ULL;
103467 +#endif
103470   * The initial- and re-scaling of tunables is configurable
103471 @@ -51,6 +61,11 @@ static unsigned int normalized_sysctl_sched_latency  = 6000000ULL;
103472   */
103473  enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
103475 +#ifdef CONFIG_CACULE_SCHED
103476 +/* Default XanMod's CacULE granularity: 0.25 msec * (1 + ilog(ncpus)) */
103477 +unsigned int sysctl_sched_min_granularity                      = 250000ULL;
103478 +static unsigned int normalized_sysctl_sched_min_granularity    = 250000ULL;
103479 +#else
103481   * Minimal preemption granularity for CPU-bound tasks:
103482   *
103483 @@ -58,6 +73,7 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_L
103484   */
103485  unsigned int sysctl_sched_min_granularity                      = 750000ULL;
103486  static unsigned int normalized_sysctl_sched_min_granularity    = 750000ULL;
103487 +#endif
103490   * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity
103491 @@ -113,6 +129,11 @@ int __weak arch_asym_cpu_priority(int cpu)
103492   */
103493  #define fits_capacity(cap, max)        ((cap) * 1280 < (max) * 1024)
103495 +#endif
103496 +#ifdef CONFIG_CACULE_SCHED
103497 +int __read_mostly cacule_max_lifetime                  = 22000; // in ms
103498 +int __read_mostly interactivity_factor                 = 32768;
103499 +unsigned int __read_mostly interactivity_threshold     = 1000;
103500  #endif
103502  #ifdef CONFIG_CFS_BANDWIDTH
103503 @@ -253,6 +274,14 @@ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight
103505  const struct sched_class fair_sched_class;
103508 +#ifdef CONFIG_CACULE_SCHED
103509 +static inline struct sched_entity *se_of(struct cacule_node *cn)
103511 +       return container_of(cn, struct sched_entity, cacule_node);
103513 +#endif
103515  /**************************************************************
103516   * CFS operations on generic schedulable entities:
103517   */
103518 @@ -512,7 +541,7 @@ void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
103519  /**************************************************************
103520   * Scheduling class tree data structure manipulation methods:
103521   */
103523 +#if !defined(CONFIG_CACULE_SCHED)
103524  static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
103526         s64 delta = (s64)(vruntime - max_vruntime);
103527 @@ -575,7 +604,169 @@ static inline bool __entity_less(struct rb_node *a, const struct rb_node *b)
103529         return entity_before(__node_2_se(a), __node_2_se(b));
103531 +#endif /* CONFIG_CACULE_SCHED */
103533 +#ifdef CONFIG_CACULE_SCHED
103534 +static unsigned int
103535 +calc_interactivity(u64 now, struct cacule_node *se)
103537 +       u64 l_se, vr_se, sleep_se = 1ULL, u64_factor;
103538 +       unsigned int score_se;
103540 +       /*
103541 +        * in case of vruntime==0, logical OR with 1 would
103542 +        * make sure that the least sig. bit is 1
103543 +        */
103544 +       l_se            = now - se->cacule_start_time;
103545 +       vr_se           = se->vruntime          | 1;
103546 +       u64_factor      = interactivity_factor;
103548 +       /* safety check */
103549 +       if (likely(l_se > vr_se))
103550 +               sleep_se = (l_se - vr_se) | 1;
103552 +       if (sleep_se >= vr_se)
103553 +               score_se = u64_factor / (sleep_se / vr_se);
103554 +       else
103555 +               score_se = (u64_factor << 1) - (u64_factor / (vr_se / sleep_se));
103557 +       return score_se;
103560 +static inline int is_interactive(struct cacule_node *cn)
103562 +       if (se_of(cn)->vruntime == 0)
103563 +               return 0;
103565 +       return calc_interactivity(sched_clock(), cn) < interactivity_threshold;
103568 +static inline int
103569 +entity_before_cached(u64 now, unsigned int score_curr, struct cacule_node *se)
103571 +       unsigned int score_se;
103572 +       int diff;
103574 +       score_se        = calc_interactivity(now, se);
103575 +       diff            = score_se - score_curr;
103577 +       if (diff <= 0)
103578 +               return 1;
103580 +       return -1;
103584 + * Does se have lower interactivity score value (i.e. interactive) than curr? If yes, return 1,
103585 + * otherwise return -1
103586 + * se is before curr if se has lower interactivity score value
103587 + * the lower score, the more interactive
103588 + */
103589 +static inline int
103590 +entity_before(u64 now, struct cacule_node *curr, struct cacule_node *se)
103592 +       unsigned int score_curr, score_se;
103593 +       int diff;
103595 +       score_curr      = calc_interactivity(now, curr);
103596 +       score_se        = calc_interactivity(now, se);
103598 +       diff            = score_se - score_curr;
103600 +       if (diff < 0)
103601 +               return 1;
103603 +       return -1;
103607 + * Enqueue an entity
103608 + */
103609 +static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *_se)
103611 +       struct cacule_node *se = &(_se->cacule_node);
103612 +       struct cacule_node *iter, *next = NULL;
103613 +       u64 now = sched_clock();
103614 +       unsigned int score_se = calc_interactivity(now, se);
103616 +       se->next = NULL;
103617 +       se->prev = NULL;
103619 +       if (likely(cfs_rq->head)) {
103621 +               // start from tail
103622 +               iter = cfs_rq->tail;
103624 +               // does se have higher IS than iter?
103625 +               while (iter && entity_before_cached(now, score_se, iter) == -1) {
103626 +                       next = iter;
103627 +                       iter = iter->prev;
103628 +               }
103630 +               // se in tail position
103631 +               if (iter == cfs_rq->tail) {
103632 +                       cfs_rq->tail->next      = se;
103633 +                       se->prev                = cfs_rq->tail;
103635 +                       cfs_rq->tail            = se;
103636 +               }
103637 +               // else if not head no tail, insert se after iter
103638 +               else if (iter) {
103639 +                       se->next        = next;
103640 +                       se->prev        = iter;
103642 +                       iter->next      = se;
103643 +                       next->prev      = se;
103644 +               }
103645 +               // insert se at head
103646 +               else {
103647 +                       se->next                = cfs_rq->head;
103648 +                       cfs_rq->head->prev      = se;
103650 +                       // lastly reset the head
103651 +                       cfs_rq->head            = se;
103652 +               }
103653 +       } else {
103654 +               // if empty rq
103655 +               cfs_rq->head = se;
103656 +               cfs_rq->tail = se;
103657 +       }
103660 +static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *_se)
103662 +       struct cacule_node *se = &(_se->cacule_node);
103664 +       // if only one se in rq
103665 +       if (cfs_rq->head == cfs_rq->tail) {
103666 +               cfs_rq->head = NULL;
103667 +               cfs_rq->tail = NULL;
103669 +       } else if (se == cfs_rq->head) {
103670 +               // if it is the head
103671 +               cfs_rq->head            = cfs_rq->head->next;
103672 +               cfs_rq->head->prev      = NULL;
103673 +       } else if (se == cfs_rq->tail) {
103674 +               // if it is the tail
103675 +               cfs_rq->tail            = cfs_rq->tail->prev;
103676 +               cfs_rq->tail->next      = NULL;
103677 +       } else {
103678 +               // if in the middle
103679 +               struct cacule_node *prev = se->prev;
103680 +               struct cacule_node *next = se->next;
103682 +               prev->next = next;
103684 +               if (next)
103685 +                       next->prev = prev;
103686 +       }
103689 +struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
103691 +       return se_of(cfs_rq->head);
103693 +#else
103695   * Enqueue an entity into the rb-tree:
103696   */
103697 @@ -608,16 +799,24 @@ static struct sched_entity *__pick_next_entity(struct sched_entity *se)
103699         return __node_2_se(next);
103701 +#endif /* CONFIG_CACULE_SCHED */
103703  #ifdef CONFIG_SCHED_DEBUG
103704  struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
103706 +#ifdef CONFIG_CACULE_SCHED
103707 +       if (!cfs_rq->tail)
103708 +               return NULL;
103710 +       return se_of(cfs_rq->tail);
103711 +#else
103712         struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
103714         if (!last)
103715                 return NULL;
103717         return __node_2_se(last);
103718 +#endif /* CONFIG_CACULE_SCHED */
103721  /**************************************************************
103722 @@ -682,7 +881,13 @@ static u64 __sched_period(unsigned long nr_running)
103723   */
103724  static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
103726 -       u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
103727 +       unsigned int nr_running = cfs_rq->nr_running;
103728 +       u64 slice;
103730 +       if (sched_feat(ALT_PERIOD))
103731 +               nr_running = rq_of(cfs_rq)->cfs.h_nr_running;
103733 +       slice = __sched_period(nr_running + !se->on_rq);
103735         for_each_sched_entity(se) {
103736                 struct load_weight *load;
103737 @@ -699,9 +904,14 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
103738                 }
103739                 slice = __calc_delta(slice, se->load.weight, load);
103740         }
103742 +       if (sched_feat(BASE_SLICE))
103743 +               slice = max(slice, (u64)sysctl_sched_min_granularity);
103745         return slice;
103748 +#if !defined(CONFIG_CACULE_SCHED)
103750   * We calculate the vruntime slice of a to-be-inserted task.
103751   *
103752 @@ -711,6 +921,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
103754         return calc_delta_fair(sched_slice(cfs_rq, se), se);
103756 +#endif /* CONFIG_CACULE_SCHED */
103758  #include "pelt.h"
103759  #ifdef CONFIG_SMP
103760 @@ -818,14 +1029,51 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq)
103762  #endif /* CONFIG_SMP */
103764 +#ifdef CONFIG_CACULE_SCHED
103765 +static void normalize_lifetime(u64 now, struct sched_entity *se)
103767 +       struct cacule_node *cn = &se->cacule_node;
103768 +       u64 max_life_ns, life_time;
103769 +       s64 diff;
103771 +       /*
103772 +        * left shift 20 bits is approximately = * 1000000
103773 +        * we don't need the precision of life time
103774 +        * Ex. for 30s, with left shift (20bits) == 31.457s
103775 +        */
103776 +       max_life_ns     = ((u64) cacule_max_lifetime) << 20;
103777 +       life_time       = now - cn->cacule_start_time;
103778 +       diff            = life_time - max_life_ns;
103780 +       if (diff > 0) {
103781 +               // multiply life_time by 1024 for more precision
103782 +               u64 old_hrrn_x  = (life_time << 7) / ((cn->vruntime >> 3) | 1);
103784 +               // reset life to half max_life (i.e ~15s)
103785 +               cn->cacule_start_time = now - (max_life_ns >> 1);
103787 +               // avoid division by zero
103788 +               if (old_hrrn_x == 0) old_hrrn_x = 1;
103790 +               // reset vruntime based on old hrrn ratio
103791 +               cn->vruntime = (max_life_ns << 9) / old_hrrn_x;
103792 +       }
103794 +#endif /* CONFIG_CACULE_SCHED */
103797   * Update the current task's runtime statistics.
103798   */
103799  static void update_curr(struct cfs_rq *cfs_rq)
103801         struct sched_entity *curr = cfs_rq->curr;
103802 +#ifdef CONFIG_CACULE_SCHED
103803 +       u64 now = sched_clock();
103804 +       u64 delta_exec, delta_fair;
103805 +#else
103806         u64 now = rq_clock_task(rq_of(cfs_rq));
103807         u64 delta_exec;
103808 +#endif
103810         if (unlikely(!curr))
103811                 return;
103812 @@ -842,8 +1090,15 @@ static void update_curr(struct cfs_rq *cfs_rq)
103813         curr->sum_exec_runtime += delta_exec;
103814         schedstat_add(cfs_rq->exec_clock, delta_exec);
103816 +#ifdef CONFIG_CACULE_SCHED
103817 +       delta_fair = calc_delta_fair(delta_exec, curr);
103818 +       curr->vruntime += delta_fair;
103819 +       curr->cacule_node.vruntime += delta_fair;
103820 +       normalize_lifetime(now, curr);
103821 +#else
103822         curr->vruntime += calc_delta_fair(delta_exec, curr);
103823         update_min_vruntime(cfs_rq);
103824 +#endif
103826         if (entity_is_task(curr)) {
103827                 struct task_struct *curtask = task_of(curr);
103828 @@ -1011,7 +1266,6 @@ update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
103829  static inline void
103830  update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
103833         if (!schedstat_enabled())
103834                 return;
103836 @@ -1043,7 +1297,11 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
103837         /*
103838          * We are starting a new run period:
103839          */
103840 +#ifdef CONFIG_CACULE_SCHED
103841 +       se->exec_start = sched_clock();
103842 +#else
103843         se->exec_start = rq_clock_task(rq_of(cfs_rq));
103844 +#endif
103847  /**************************************************
103848 @@ -3941,6 +4199,8 @@ static inline void util_est_dequeue(struct cfs_rq *cfs_rq,
103849         trace_sched_util_est_cfs_tp(cfs_rq);
103852 +#define UTIL_EST_MARGIN (SCHED_CAPACITY_SCALE / 100)
103855   * Check if a (signed) value is within a specified (unsigned) margin,
103856   * based on the observation that:
103857 @@ -3958,7 +4218,7 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
103858                                    struct task_struct *p,
103859                                    bool task_sleep)
103861 -       long last_ewma_diff;
103862 +       long last_ewma_diff, last_enqueued_diff;
103863         struct util_est ue;
103865         if (!sched_feat(UTIL_EST))
103866 @@ -3979,6 +4239,8 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
103867         if (ue.enqueued & UTIL_AVG_UNCHANGED)
103868                 return;
103870 +       last_enqueued_diff = ue.enqueued;
103872         /*
103873          * Reset EWMA on utilization increases, the moving average is used only
103874          * to smooth utilization decreases.
103875 @@ -3992,12 +4254,17 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
103876         }
103878         /*
103879 -        * Skip update of task's estimated utilization when its EWMA is
103880 +        * Skip update of task's estimated utilization when its members are
103881          * already ~1% close to its last activation value.
103882          */
103883         last_ewma_diff = ue.enqueued - ue.ewma;
103884 -       if (within_margin(last_ewma_diff, (SCHED_CAPACITY_SCALE / 100)))
103885 +       last_enqueued_diff -= ue.enqueued;
103886 +       if (within_margin(last_ewma_diff, UTIL_EST_MARGIN)) {
103887 +               if (!within_margin(last_enqueued_diff, UTIL_EST_MARGIN))
103888 +                       goto done;
103890                 return;
103891 +       }
103893         /*
103894          * To avoid overestimation of actual task utilization, skip updates if
103895 @@ -4097,7 +4364,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
103897  static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
103899 -#ifdef CONFIG_SCHED_DEBUG
103900 +#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_CACULE_SCHED)
103901         s64 d = se->vruntime - cfs_rq->min_vruntime;
103903         if (d < 0)
103904 @@ -4108,6 +4375,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
103905  #endif
103908 +#if !defined(CONFIG_CACULE_SCHED)
103909  static void
103910  place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
103912 @@ -4139,6 +4407,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
103913         /* ensure we never gain time by being placed backwards. */
103914         se->vruntime = max_vruntime(se->vruntime, vruntime);
103916 +#endif /* CONFIG_CACULE_SCHED */
103918  static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
103920 @@ -4197,18 +4466,23 @@ static inline bool cfs_bandwidth_used(void);
103921  static void
103922  enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
103924 +#if !defined(CONFIG_CACULE_SCHED)
103925         bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
103926 +#endif
103927         bool curr = cfs_rq->curr == se;
103929 +#if !defined(CONFIG_CACULE_SCHED)
103930         /*
103931          * If we're the current task, we must renormalise before calling
103932          * update_curr().
103933          */
103934         if (renorm && curr)
103935                 se->vruntime += cfs_rq->min_vruntime;
103936 +#endif
103938         update_curr(cfs_rq);
103940 +#if !defined(CONFIG_CACULE_SCHED)
103941         /*
103942          * Otherwise, renormalise after, such that we're placed at the current
103943          * moment in time, instead of some random moment in the past. Being
103944 @@ -4217,6 +4491,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
103945          */
103946         if (renorm && !curr)
103947                 se->vruntime += cfs_rq->min_vruntime;
103948 +#endif
103950         /*
103951          * When enqueuing a sched_entity, we must:
103952 @@ -4231,8 +4506,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
103953         update_cfs_group(se);
103954         account_entity_enqueue(cfs_rq, se);
103956 +#if !defined(CONFIG_CACULE_SCHED)
103957         if (flags & ENQUEUE_WAKEUP)
103958                 place_entity(cfs_rq, se, 0);
103959 +#endif
103961         check_schedstat_required();
103962         update_stats_enqueue(cfs_rq, se, flags);
103963 @@ -4253,6 +4530,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
103964                 check_enqueue_throttle(cfs_rq);
103967 +#if !defined(CONFIG_CACULE_SCHED)
103968  static void __clear_buddies_last(struct sched_entity *se)
103970         for_each_sched_entity(se) {
103971 @@ -4297,6 +4575,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
103972         if (cfs_rq->skip == se)
103973                 __clear_buddies_skip(se);
103975 +#endif /* !CONFIG_CACULE_SCHED */
103977  static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
103979 @@ -4321,13 +4600,16 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
103981         update_stats_dequeue(cfs_rq, se, flags);
103983 +#if !defined(CONFIG_CACULE_SCHED)
103984         clear_buddies(cfs_rq, se);
103985 +#endif
103987         if (se != cfs_rq->curr)
103988                 __dequeue_entity(cfs_rq, se);
103989         se->on_rq = 0;
103990         account_entity_dequeue(cfs_rq, se);
103992 +#if !defined(CONFIG_CACULE_SCHED)
103993         /*
103994          * Normalize after update_curr(); which will also have moved
103995          * min_vruntime if @se is the one holding it back. But before doing
103996 @@ -4336,12 +4618,14 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
103997          */
103998         if (!(flags & DEQUEUE_SLEEP))
103999                 se->vruntime -= cfs_rq->min_vruntime;
104000 +#endif
104002         /* return excess runtime on last dequeue */
104003         return_cfs_rq_runtime(cfs_rq);
104005         update_cfs_group(se);
104007 +#if !defined(CONFIG_CACULE_SCHED)
104008         /*
104009          * Now advance min_vruntime if @se was the entity holding it back,
104010          * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
104011 @@ -4350,8 +4634,21 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
104012          */
104013         if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
104014                 update_min_vruntime(cfs_rq);
104015 +#endif
104018 +#ifdef CONFIG_CACULE_SCHED
104020 + * Preempt the current task with a newly woken task if needed:
104021 + */
104022 +static void
104023 +check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
104025 +       // does head have higher IS than curr
104026 +       if (entity_before(sched_clock(), &curr->cacule_node, cfs_rq->head) == 1)
104027 +               resched_curr(rq_of(cfs_rq));
104029 +#else
104031   * Preempt the current task with a newly woken task if needed:
104032   */
104033 @@ -4391,6 +4688,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
104034         if (delta > ideal_runtime)
104035                 resched_curr(rq_of(cfs_rq));
104037 +#endif /* CONFIG_CACULE_SCHED */
104039  static void
104040  set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
104041 @@ -4425,6 +4723,21 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
104042         se->prev_sum_exec_runtime = se->sum_exec_runtime;
104045 +#ifdef CONFIG_CACULE_SCHED
104046 +static struct sched_entity *
104047 +pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
104049 +       struct cacule_node *se = cfs_rq->head;
104051 +       if (unlikely(!se))
104052 +               se = &curr->cacule_node;
104053 +       else if (unlikely(curr
104054 +                       && entity_before(sched_clock(), se, &curr->cacule_node) == 1))
104055 +               se = &curr->cacule_node;
104057 +       return se_of(se);
104059 +#else
104060  static int
104061  wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
104063 @@ -4485,6 +4798,7 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
104065         return se;
104067 +#endif /* CONFIG_CACULE_SCHED */
104069  static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
104071 @@ -5587,7 +5901,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
104072         hrtick_update(rq);
104075 +#if !defined(CONFIG_CACULE_SCHED)
104076  static void set_next_buddy(struct sched_entity *se);
104077 +#endif
104080   * The dequeue_task method is called before nr_running is
104081 @@ -5619,12 +5935,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
104082                 if (cfs_rq->load.weight) {
104083                         /* Avoid re-evaluating load for this entity: */
104084                         se = parent_entity(se);
104085 +#if !defined(CONFIG_CACULE_SCHED)
104086                         /*
104087                          * Bias pick_next to pick a task from this cfs_rq, as
104088                          * p is sleeping when it is within its sched_slice.
104089                          */
104090                         if (task_sleep && se && !throttled_hierarchy(cfs_rq))
104091                                 set_next_buddy(se);
104092 +#endif
104093                         break;
104094                 }
104095                 flags |= DEQUEUE_SLEEP;
104096 @@ -5740,6 +6058,7 @@ static unsigned long capacity_of(int cpu)
104097         return cpu_rq(cpu)->cpu_capacity;
104100 +#if !defined(CONFIG_CACULE_SCHED)
104101  static void record_wakee(struct task_struct *p)
104103         /*
104104 @@ -5786,6 +6105,7 @@ static int wake_wide(struct task_struct *p)
104105                 return 0;
104106         return 1;
104108 +#endif /* CONFIG_CACULE_SCHED */
104111   * The purpose of wake_affine() is to quickly determine on which CPU we can run
104112 @@ -6098,6 +6418,24 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu
104113         return -1;
104117 + * Scan the local SMT mask for idle CPUs.
104118 + */
104119 +static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
104121 +       int cpu;
104123 +       for_each_cpu(cpu, cpu_smt_mask(target)) {
104124 +               if (!cpumask_test_cpu(cpu, p->cpus_ptr) ||
104125 +                   !cpumask_test_cpu(cpu, sched_domain_span(sd)))
104126 +                       continue;
104127 +               if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
104128 +                       return cpu;
104129 +       }
104131 +       return -1;
104134  #else /* CONFIG_SCHED_SMT */
104136  static inline void set_idle_cores(int cpu, int val)
104137 @@ -6114,6 +6452,11 @@ static inline int select_idle_core(struct task_struct *p, int core, struct cpuma
104138         return __select_idle_cpu(core);
104141 +static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
104143 +       return -1;
104146  #endif /* CONFIG_SCHED_SMT */
104149 @@ -6121,11 +6464,10 @@ static inline int select_idle_core(struct task_struct *p, int core, struct cpuma
104150   * comparing the average scan cost (tracked in sd->avg_scan_cost) against the
104151   * average idle time for this rq (as found in rq->avg_idle).
104152   */
104153 -static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
104154 +static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool has_idle_core, int target)
104156         struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
104157         int i, cpu, idle_cpu = -1, nr = INT_MAX;
104158 -       bool smt = test_idle_cores(target, false);
104159         int this = smp_processor_id();
104160         struct sched_domain *this_sd;
104161         u64 time;
104162 @@ -6136,7 +6478,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
104164         cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
104166 -       if (sched_feat(SIS_PROP) && !smt) {
104167 +       if (sched_feat(SIS_PROP) && !has_idle_core) {
104168                 u64 avg_cost, avg_idle, span_avg;
104170                 /*
104171 @@ -6156,7 +6498,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
104172         }
104174         for_each_cpu_wrap(cpu, cpus, target) {
104175 -               if (smt) {
104176 +               if (has_idle_core) {
104177                         i = select_idle_core(p, cpu, cpus, &idle_cpu);
104178                         if ((unsigned int)i < nr_cpumask_bits)
104179                                 return i;
104180 @@ -6170,10 +6512,10 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
104181                 }
104182         }
104184 -       if (smt)
104185 -               set_idle_cores(this, false);
104186 +       if (has_idle_core)
104187 +               set_idle_cores(target, false);
104189 -       if (sched_feat(SIS_PROP) && !smt) {
104190 +       if (sched_feat(SIS_PROP) && !has_idle_core) {
104191                 time = cpu_clock(this) - time;
104192                 update_avg(&this_sd->avg_scan_cost, time);
104193         }
104194 @@ -6228,6 +6570,7 @@ static inline bool asym_fits_capacity(int task_util, int cpu)
104195   */
104196  static int select_idle_sibling(struct task_struct *p, int prev, int target)
104198 +       bool has_idle_core = false;
104199         struct sched_domain *sd;
104200         unsigned long task_util;
104201         int i, recent_used_cpu;
104202 @@ -6307,7 +6650,17 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
104203         if (!sd)
104204                 return target;
104206 -       i = select_idle_cpu(p, sd, target);
104207 +       if (sched_smt_active()) {
104208 +               has_idle_core = test_idle_cores(target, false);
104210 +               if (!has_idle_core && cpus_share_cache(prev, target)) {
104211 +                       i = select_idle_smt(p, sd, prev);
104212 +                       if ((unsigned int)i < nr_cpumask_bits)
104213 +                               return i;
104214 +               }
104215 +       }
104217 +       i = select_idle_cpu(p, sd, has_idle_core, target);
104218         if ((unsigned)i < nr_cpumask_bits)
104219                 return i;
104221 @@ -6455,6 +6808,7 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
104222         return min_t(unsigned long, util, capacity_orig_of(cpu));
104225 +#if !defined(CONFIG_CACULE_SCHED)
104227   * Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued)
104228   * to @dst_cpu.
104229 @@ -6518,8 +6872,24 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
104230          * its pd list and will not be accounted by compute_energy().
104231          */
104232         for_each_cpu_and(cpu, pd_mask, cpu_online_mask) {
104233 -               unsigned long cpu_util, util_cfs = cpu_util_next(cpu, p, dst_cpu);
104234 -               struct task_struct *tsk = cpu == dst_cpu ? p : NULL;
104235 +               unsigned long util_freq = cpu_util_next(cpu, p, dst_cpu);
104236 +               unsigned long cpu_util, util_running = util_freq;
104237 +               struct task_struct *tsk = NULL;
104239 +               /*
104240 +                * When @p is placed on @cpu:
104241 +                *
104242 +                * util_running = max(cpu_util, cpu_util_est) +
104243 +                *                max(task_util, _task_util_est)
104244 +                *
104245 +                * while cpu_util_next is: max(cpu_util + task_util,
104246 +                *                             cpu_util_est + _task_util_est)
104247 +                */
104248 +               if (cpu == dst_cpu) {
104249 +                       tsk = p;
104250 +                       util_running =
104251 +                               cpu_util_next(cpu, p, -1) + task_util_est(p);
104252 +               }
104254                 /*
104255                  * Busy time computation: utilization clamping is not
104256 @@ -6527,7 +6897,7 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
104257                  * is already enough to scale the EM reported power
104258                  * consumption at the (eventually clamped) cpu_capacity.
104259                  */
104260 -               sum_util += effective_cpu_util(cpu, util_cfs, cpu_cap,
104261 +               sum_util += effective_cpu_util(cpu, util_running, cpu_cap,
104262                                                ENERGY_UTIL, NULL);
104264                 /*
104265 @@ -6537,7 +6907,7 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
104266                  * NOTE: in case RT tasks are running, by default the
104267                  * FREQUENCY_UTIL's utilization can be max OPP.
104268                  */
104269 -               cpu_util = effective_cpu_util(cpu, util_cfs, cpu_cap,
104270 +               cpu_util = effective_cpu_util(cpu, util_freq, cpu_cap,
104271                                               FREQUENCY_UTIL, tsk);
104272                 max_util = max(max_util, cpu_util);
104273         }
104274 @@ -6688,6 +7058,57 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
104276         return -1;
104278 +#endif /* CONFIG_CACULE_SCHED */
104280 +#ifdef CONFIG_CACULE_SCHED
104281 +static int
104282 +find_least_IS_cpu(struct task_struct *p)
104284 +       struct cfs_rq *cfs_rq;
104285 +       unsigned int max_IS = 0;
104286 +       unsigned int IS, IS_c, IS_h;
104287 +       struct sched_entity *curr_se;
104288 +       struct cacule_node *cn, *head;
104289 +       int cpu_i;
104290 +       int new_cpu = -1;
104292 +       for_each_online_cpu(cpu_i) {
104293 +               if (!cpumask_test_cpu(cpu_i, p->cpus_ptr))
104294 +                       continue;
104296 +               cn = NULL;
104297 +               cfs_rq = &cpu_rq(cpu_i)->cfs;
104299 +               curr_se = cfs_rq->curr;
104300 +               head = cfs_rq->head;
104302 +               if (!curr_se && head)
104303 +                       cn = head;
104304 +               else if (curr_se && !head)
104305 +                       cn = &curr_se->cacule_node;
104306 +               else if (curr_se && head) {
104307 +                       IS_c = calc_interactivity(sched_clock(), &curr_se->cacule_node);
104308 +                       IS_h = calc_interactivity(sched_clock(), head);
104310 +                       IS = IS_c > IS_h? IS_c : IS_h;
104311 +                       goto compare;
104312 +               }
104314 +               if (!cn)
104315 +                       return cpu_i;
104317 +               IS = calc_interactivity(sched_clock(), cn);
104319 +compare:
104320 +               if (IS > max_IS) {
104321 +                       max_IS = IS;
104322 +                       new_cpu = cpu_i;
104323 +               }
104324 +       }
104326 +       return new_cpu;
104328 +#endif
104331   * select_task_rq_fair: Select target runqueue for the waking task in domains
104332 @@ -6712,6 +7133,26 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
104333         /* SD_flags and WF_flags share the first nibble */
104334         int sd_flag = wake_flags & 0xF;
104336 +#ifdef CONFIG_CACULE_SCHED
104337 +       struct sched_entity *se = &p->se;
104339 +       if (!is_interactive(&se->cacule_node))
104340 +               goto cfs_way;
104342 +       // check first if the prev cpu
104343 +       // has 0 tasks
104344 +       if (cpumask_test_cpu(prev_cpu, p->cpus_ptr) &&
104345 +           cpu_rq(prev_cpu)->cfs.nr_running == 0)
104346 +               return prev_cpu;
104348 +       new_cpu = find_least_IS_cpu(p);
104350 +       if (new_cpu != -1)
104351 +               return new_cpu;
104353 +       new_cpu = prev_cpu;
104354 +cfs_way:
104355 +#else
104356         if (wake_flags & WF_TTWU) {
104357                 record_wakee(p);
104359 @@ -6724,6 +7165,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
104361                 want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
104362         }
104363 +#endif /* CONFIG_CACULE_SCHED */
104365         rcu_read_lock();
104366         for_each_domain(cpu, tmp) {
104367 @@ -6770,6 +7212,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se);
104368   */
104369  static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
104371 +#if !defined(CONFIG_CACULE_SCHED)
104372         /*
104373          * As blocked tasks retain absolute vruntime the migration needs to
104374          * deal with this by subtracting the old and adding the new
104375 @@ -6795,6 +7238,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
104377                 se->vruntime -= min_vruntime;
104378         }
104379 +#endif /* CONFIG_CACULE_SCHED */
104381         if (p->on_rq == TASK_ON_RQ_MIGRATING) {
104382                 /*
104383 @@ -6840,6 +7284,7 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
104385  #endif /* CONFIG_SMP */
104387 +#if !defined(CONFIG_CACULE_SCHED)
104388  static unsigned long wakeup_gran(struct sched_entity *se)
104390         unsigned long gran = sysctl_sched_wakeup_granularity;
104391 @@ -6918,6 +7363,7 @@ static void set_skip_buddy(struct sched_entity *se)
104392         for_each_sched_entity(se)
104393                 cfs_rq_of(se)->skip = se;
104395 +#endif /* CONFIG_CACULE_SCHED */
104398   * Preempt the current task with a newly woken task if needed:
104399 @@ -6926,9 +7372,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
104401         struct task_struct *curr = rq->curr;
104402         struct sched_entity *se = &curr->se, *pse = &p->se;
104404 +#if !defined(CONFIG_CACULE_SCHED)
104405         struct cfs_rq *cfs_rq = task_cfs_rq(curr);
104406         int scale = cfs_rq->nr_running >= sched_nr_latency;
104407         int next_buddy_marked = 0;
104408 +#endif /* CONFIG_CACULE_SCHED */
104410         if (unlikely(se == pse))
104411                 return;
104412 @@ -6942,10 +7391,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
104413         if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
104414                 return;
104416 +#if !defined(CONFIG_CACULE_SCHED)
104417         if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
104418                 set_next_buddy(pse);
104419                 next_buddy_marked = 1;
104420         }
104421 +#endif /* CONFIG_CACULE_SCHED */
104423         /*
104424          * We can come here with TIF_NEED_RESCHED already set from new task
104425 @@ -6975,6 +7426,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
104426         find_matching_se(&se, &pse);
104427         update_curr(cfs_rq_of(se));
104428         BUG_ON(!pse);
104430 +#ifdef CONFIG_CACULE_SCHED
104431 +       if (entity_before(sched_clock(), &se->cacule_node, &pse->cacule_node) == 1)
104432 +               goto preempt;
104433 +#else
104434         if (wakeup_preempt_entity(se, pse) == 1) {
104435                 /*
104436                  * Bias pick_next to pick the sched entity that is
104437 @@ -6984,11 +7440,14 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
104438                         set_next_buddy(pse);
104439                 goto preempt;
104440         }
104441 +#endif /* CONFIG_CACULE_SCHED */
104443         return;
104445  preempt:
104446         resched_curr(rq);
104448 +#if !defined(CONFIG_CACULE_SCHED)
104449         /*
104450          * Only set the backward buddy when the current task is still
104451          * on the rq. This can happen when a wakeup gets interleaved
104452 @@ -7003,6 +7462,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
104454         if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
104455                 set_last_buddy(se);
104456 +#endif /* CONFIG_CACULE_SCHED */
104459  struct task_struct *
104460 @@ -7177,7 +7637,10 @@ static void yield_task_fair(struct rq *rq)
104462         struct task_struct *curr = rq->curr;
104463         struct cfs_rq *cfs_rq = task_cfs_rq(curr);
104465 +#if !defined(CONFIG_CACULE_SCHED)
104466         struct sched_entity *se = &curr->se;
104467 +#endif
104469         /*
104470          * Are we the only task in the tree?
104471 @@ -7185,7 +7648,9 @@ static void yield_task_fair(struct rq *rq)
104472         if (unlikely(rq->nr_running == 1))
104473                 return;
104475 +#if !defined(CONFIG_CACULE_SCHED)
104476         clear_buddies(cfs_rq, se);
104477 +#endif
104479         if (curr->policy != SCHED_BATCH) {
104480                 update_rq_clock(rq);
104481 @@ -7201,7 +7666,9 @@ static void yield_task_fair(struct rq *rq)
104482                 rq_clock_skip_update(rq);
104483         }
104485 +#if !defined(CONFIG_CACULE_SCHED)
104486         set_skip_buddy(se);
104487 +#endif
104490  static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
104491 @@ -7212,8 +7679,10 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
104492         if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
104493                 return false;
104495 +#if !defined(CONFIG_CACULE_SCHED)
104496         /* Tell the scheduler that we'd really like pse to run next. */
104497         set_next_buddy(se);
104498 +#endif
104500         yield_task_fair(rq);
104502 @@ -7441,6 +7910,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
104503         if (env->sd->flags & SD_SHARE_CPUCAPACITY)
104504                 return 0;
104506 +#if !defined(CONFIG_CACULE_SCHED)
104507         /*
104508          * Buddy candidates are cache hot:
104509          */
104510 @@ -7448,6 +7918,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
104511                         (&p->se == cfs_rq_of(&p->se)->next ||
104512                          &p->se == cfs_rq_of(&p->se)->last))
104513                 return 1;
104514 +#endif
104516         if (sysctl_sched_migration_cost == -1)
104517                 return 1;
104518 @@ -7539,6 +8010,10 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
104519         if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
104520                 return 0;
104522 +       /* Disregard pcpu kthreads; they are where they need to be. */
104523 +       if (kthread_is_per_cpu(p))
104524 +               return 0;
104526         if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
104527                 int cpu;
104529 @@ -7708,8 +8183,7 @@ static int detach_tasks(struct lb_env *env)
104530                          * scheduler fails to find a good waiting task to
104531                          * migrate.
104532                          */
104534 -                       if ((load >> env->sd->nr_balance_failed) > env->imbalance)
104535 +                       if (shr_bound(load, env->sd->nr_balance_failed) > env->imbalance)
104536                                 goto next;
104538                         env->imbalance -= load;
104539 @@ -10746,11 +11220,30 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
104540         update_overutilized_status(task_rq(curr));
104543 +#ifdef CONFIG_CACULE_SCHED
104545   * called on fork with the child task as argument from the parent's context
104546   *  - child not yet on the tasklist
104547   *  - preemption disabled
104548   */
104549 + static void task_fork_fair(struct task_struct *p)
104551 +       struct cfs_rq *cfs_rq;
104552 +       struct sched_entity *curr;
104553 +       struct rq *rq = this_rq();
104554 +       struct rq_flags rf;
104556 +       rq_lock(rq, &rf);
104557 +       update_rq_clock(rq);
104559 +       cfs_rq = task_cfs_rq(current);
104560 +       curr = cfs_rq->curr;
104561 +       if (curr)
104562 +               update_curr(cfs_rq);
104564 +       rq_unlock(rq, &rf);
104566 +#else
104567  static void task_fork_fair(struct task_struct *p)
104569         struct cfs_rq *cfs_rq;
104570 @@ -10781,6 +11274,7 @@ static void task_fork_fair(struct task_struct *p)
104571         se->vruntime -= cfs_rq->min_vruntime;
104572         rq_unlock(rq, &rf);
104574 +#endif /* CONFIG_CACULE_SCHED */
104577   * Priority of the task has changed. Check to see if we preempt
104578 @@ -10844,16 +11338,22 @@ static void propagate_entity_cfs_rq(struct sched_entity *se)
104580         struct cfs_rq *cfs_rq;
104582 +       list_add_leaf_cfs_rq(cfs_rq_of(se));
104584         /* Start to propagate at parent */
104585         se = se->parent;
104587         for_each_sched_entity(se) {
104588                 cfs_rq = cfs_rq_of(se);
104590 -               if (cfs_rq_throttled(cfs_rq))
104591 -                       break;
104592 +               if (!cfs_rq_throttled(cfs_rq)){
104593 +                       update_load_avg(cfs_rq, se, UPDATE_TG);
104594 +                       list_add_leaf_cfs_rq(cfs_rq);
104595 +                       continue;
104596 +               }
104598 -               update_load_avg(cfs_rq, se, UPDATE_TG);
104599 +               if (list_add_leaf_cfs_rq(cfs_rq))
104600 +                       break;
104601         }
104603  #else
104604 @@ -10893,6 +11393,8 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
104605  static void detach_task_cfs_rq(struct task_struct *p)
104607         struct sched_entity *se = &p->se;
104609 +#if !defined(CONFIG_CACULE_SCHED)
104610         struct cfs_rq *cfs_rq = cfs_rq_of(se);
104612         if (!vruntime_normalized(p)) {
104613 @@ -10903,6 +11405,7 @@ static void detach_task_cfs_rq(struct task_struct *p)
104614                 place_entity(cfs_rq, se, 0);
104615                 se->vruntime -= cfs_rq->min_vruntime;
104616         }
104617 +#endif
104619         detach_entity_cfs_rq(se);
104621 @@ -10910,12 +11413,17 @@ static void detach_task_cfs_rq(struct task_struct *p)
104622  static void attach_task_cfs_rq(struct task_struct *p)
104624         struct sched_entity *se = &p->se;
104626 +#if !defined(CONFIG_CACULE_SCHED)
104627         struct cfs_rq *cfs_rq = cfs_rq_of(se);
104628 +#endif
104630         attach_entity_cfs_rq(se);
104632 +#if !defined(CONFIG_CACULE_SCHED)
104633         if (!vruntime_normalized(p))
104634                 se->vruntime += cfs_rq->min_vruntime;
104635 +#endif
104638  static void switched_from_fair(struct rq *rq, struct task_struct *p)
104639 @@ -10971,13 +11479,22 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
104640  void init_cfs_rq(struct cfs_rq *cfs_rq)
104642         cfs_rq->tasks_timeline = RB_ROOT_CACHED;
104644 +#if !defined(CONFIG_CACULE_SCHED)
104645         cfs_rq->min_vruntime = (u64)(-(1LL << 20));
104646  #ifndef CONFIG_64BIT
104647         cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
104648  #endif
104649 +#endif /* CONFIG_CACULE_SCHED */
104651  #ifdef CONFIG_SMP
104652         raw_spin_lock_init(&cfs_rq->removed.lock);
104653  #endif
104655 +#ifdef CONFIG_CACULE_SCHED
104656 +       cfs_rq->head = NULL;
104657 +       cfs_rq->tail = NULL;
104658 +#endif
104661  #ifdef CONFIG_FAIR_GROUP_SCHED
104662 diff --git a/kernel/sched/features.h b/kernel/sched/features.h
104663 index 1bc2b158fc51..e911111df83a 100644
104664 --- a/kernel/sched/features.h
104665 +++ b/kernel/sched/features.h
104666 @@ -90,3 +90,6 @@ SCHED_FEAT(WA_BIAS, true)
104667   */
104668  SCHED_FEAT(UTIL_EST, true)
104669  SCHED_FEAT(UTIL_EST_FASTUP, true)
104671 +SCHED_FEAT(ALT_PERIOD, true)
104672 +SCHED_FEAT(BASE_SLICE, true)
104673 diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
104674 index 967732c0766c..651218ded981 100644
104675 --- a/kernel/sched/psi.c
104676 +++ b/kernel/sched/psi.c
104677 @@ -711,14 +711,15 @@ static void psi_group_change(struct psi_group *group, int cpu,
104678         for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
104679                 if (!(m & (1 << t)))
104680                         continue;
104681 -               if (groupc->tasks[t] == 0 && !psi_bug) {
104682 +               if (groupc->tasks[t]) {
104683 +                       groupc->tasks[t]--;
104684 +               } else if (!psi_bug) {
104685                         printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n",
104686                                         cpu, t, groupc->tasks[0],
104687                                         groupc->tasks[1], groupc->tasks[2],
104688                                         groupc->tasks[3], clear, set);
104689                         psi_bug = 1;
104690                 }
104691 -               groupc->tasks[t]--;
104692         }
104694         for (t = 0; set; set &= ~(1 << t), t++)
104695 diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
104696 index 10a1522b1e30..0eb4fca83ffe 100644
104697 --- a/kernel/sched/sched.h
104698 +++ b/kernel/sched/sched.h
104699 @@ -204,6 +204,13 @@ static inline void update_avg(u64 *avg, u64 sample)
104700         *avg += diff / 8;
104704 + * Shifting a value by an exponent greater *or equal* to the size of said value
104705 + * is UB; cap at size-1.
104706 + */
104707 +#define shr_bound(val, shift)                                                  \
104708 +       (val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1))
104711   * !! For sched_setattr_nocheck() (kernel) only !!
104712   *
104713 @@ -516,10 +523,13 @@ struct cfs_rq {
104714         unsigned int            idle_h_nr_running; /* SCHED_IDLE */
104716         u64                     exec_clock;
104718 +#if !defined(CONFIG_CACULE_SCHED)
104719         u64                     min_vruntime;
104720  #ifndef CONFIG_64BIT
104721         u64                     min_vruntime_copy;
104722  #endif
104723 +#endif /* CONFIG_CACULE_SCHED */
104725         struct rb_root_cached   tasks_timeline;
104727 @@ -528,9 +538,15 @@ struct cfs_rq {
104728          * It is set to NULL otherwise (i.e when none are currently running).
104729          */
104730         struct sched_entity     *curr;
104731 +#ifdef CONFIG_CACULE_SCHED
104732 +       struct cacule_node      *head;
104733 +       struct cacule_node      *tail;
104735 +#else
104736         struct sched_entity     *next;
104737         struct sched_entity     *last;
104738         struct sched_entity     *skip;
104739 +#endif // CONFIG_CACULE_SCHED
104741  #ifdef CONFIG_SCHED_DEBUG
104742         unsigned int            nr_spread_over;
104743 diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
104744 index 09d35044bd88..12f80587e127 100644
104745 --- a/kernel/sched/topology.c
104746 +++ b/kernel/sched/topology.c
104747 @@ -723,35 +723,6 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
104748         for (tmp = sd; tmp; tmp = tmp->parent)
104749                 numa_distance += !!(tmp->flags & SD_NUMA);
104751 -       /*
104752 -        * FIXME: Diameter >=3 is misrepresented.
104753 -        *
104754 -        * Smallest diameter=3 topology is:
104755 -        *
104756 -        *   node   0   1   2   3
104757 -        *     0:  10  20  30  40
104758 -        *     1:  20  10  20  30
104759 -        *     2:  30  20  10  20
104760 -        *     3:  40  30  20  10
104761 -        *
104762 -        *   0 --- 1 --- 2 --- 3
104763 -        *
104764 -        * NUMA-3       0-3             N/A             N/A             0-3
104765 -        *  groups:     {0-2},{1-3}                                     {1-3},{0-2}
104766 -        *
104767 -        * NUMA-2       0-2             0-3             0-3             1-3
104768 -        *  groups:     {0-1},{1-3}     {0-2},{2-3}     {1-3},{0-1}     {2-3},{0-2}
104769 -        *
104770 -        * NUMA-1       0-1             0-2             1-3             2-3
104771 -        *  groups:     {0},{1}         {1},{2},{0}     {2},{3},{1}     {3},{2}
104772 -        *
104773 -        * NUMA-0       0               1               2               3
104774 -        *
104775 -        * The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the
104776 -        * group span isn't a subset of the domain span.
104777 -        */
104778 -       WARN_ONCE(numa_distance > 2, "Shortest NUMA path spans too many nodes\n");
104780         sched_domain_debug(sd, cpu);
104782         rq_attach_root(rq, rd);
104783 @@ -982,6 +953,31 @@ static void init_overlap_sched_group(struct sched_domain *sd,
104784         sg->sgc->max_capacity = SCHED_CAPACITY_SCALE;
104787 +static struct sched_domain *
104788 +find_descended_sibling(struct sched_domain *sd, struct sched_domain *sibling)
104790 +       /*
104791 +        * The proper descendant would be the one whose child won't span out
104792 +        * of sd
104793 +        */
104794 +       while (sibling->child &&
104795 +              !cpumask_subset(sched_domain_span(sibling->child),
104796 +                              sched_domain_span(sd)))
104797 +               sibling = sibling->child;
104799 +       /*
104800 +        * As we are referencing sgc across different topology level, we need
104801 +        * to go down to skip those sched_domains which don't contribute to
104802 +        * scheduling because they will be degenerated in cpu_attach_domain
104803 +        */
104804 +       while (sibling->child &&
104805 +              cpumask_equal(sched_domain_span(sibling->child),
104806 +                            sched_domain_span(sibling)))
104807 +               sibling = sibling->child;
104809 +       return sibling;
104812  static int
104813  build_overlap_sched_groups(struct sched_domain *sd, int cpu)
104815 @@ -1015,6 +1011,41 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
104816                 if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
104817                         continue;
104819 +               /*
104820 +                * Usually we build sched_group by sibling's child sched_domain
104821 +                * But for machines whose NUMA diameter are 3 or above, we move
104822 +                * to build sched_group by sibling's proper descendant's child
104823 +                * domain because sibling's child sched_domain will span out of
104824 +                * the sched_domain being built as below.
104825 +                *
104826 +                * Smallest diameter=3 topology is:
104827 +                *
104828 +                *   node   0   1   2   3
104829 +                *     0:  10  20  30  40
104830 +                *     1:  20  10  20  30
104831 +                *     2:  30  20  10  20
104832 +                *     3:  40  30  20  10
104833 +                *
104834 +                *   0 --- 1 --- 2 --- 3
104835 +                *
104836 +                * NUMA-3       0-3             N/A             N/A             0-3
104837 +                *  groups:     {0-2},{1-3}                                     {1-3},{0-2}
104838 +                *
104839 +                * NUMA-2       0-2             0-3             0-3             1-3
104840 +                *  groups:     {0-1},{1-3}     {0-2},{2-3}     {1-3},{0-1}     {2-3},{0-2}
104841 +                *
104842 +                * NUMA-1       0-1             0-2             1-3             2-3
104843 +                *  groups:     {0},{1}         {1},{2},{0}     {2},{3},{1}     {3},{2}
104844 +                *
104845 +                * NUMA-0       0               1               2               3
104846 +                *
104847 +                * The NUMA-2 groups for nodes 0 and 3 are obviously buggered, as the
104848 +                * group span isn't a subset of the domain span.
104849 +                */
104850 +               if (sibling->child &&
104851 +                   !cpumask_subset(sched_domain_span(sibling->child), span))
104852 +                       sibling = find_descended_sibling(sd, sibling);
104854                 sg = build_group_from_child_sched_domain(sibling, cpu);
104855                 if (!sg)
104856                         goto fail;
104857 @@ -1022,7 +1053,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
104858                 sg_span = sched_group_span(sg);
104859                 cpumask_or(covered, covered, sg_span);
104861 -               init_overlap_sched_group(sd, sg);
104862 +               init_overlap_sched_group(sibling, sg);
104864                 if (!first)
104865                         first = sg;
104866 diff --git a/kernel/smp.c b/kernel/smp.c
104867 index aeb0adfa0606..c678589fbb76 100644
104868 --- a/kernel/smp.c
104869 +++ b/kernel/smp.c
104870 @@ -110,7 +110,7 @@ static DEFINE_PER_CPU(void *, cur_csd_info);
104871  static atomic_t csd_bug_count = ATOMIC_INIT(0);
104873  /* Record current CSD work for current CPU, NULL to erase. */
104874 -static void csd_lock_record(call_single_data_t *csd)
104875 +static void csd_lock_record(struct __call_single_data *csd)
104877         if (!csd) {
104878                 smp_mb(); /* NULL cur_csd after unlock. */
104879 @@ -125,7 +125,7 @@ static void csd_lock_record(call_single_data_t *csd)
104880                   /* Or before unlock, as the case may be. */
104883 -static __always_inline int csd_lock_wait_getcpu(call_single_data_t *csd)
104884 +static __always_inline int csd_lock_wait_getcpu(struct __call_single_data *csd)
104886         unsigned int csd_type;
104888 @@ -140,7 +140,7 @@ static __always_inline int csd_lock_wait_getcpu(call_single_data_t *csd)
104889   * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
104890   * so waiting on other types gets much less information.
104891   */
104892 -static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id)
104893 +static __always_inline bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id)
104895         int cpu = -1;
104896         int cpux;
104897 @@ -204,7 +204,7 @@ static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 t
104898   * previous function call. For multi-cpu calls its even more interesting
104899   * as we'll have to ensure no other cpu is observing our csd.
104900   */
104901 -static __always_inline void csd_lock_wait(call_single_data_t *csd)
104902 +static __always_inline void csd_lock_wait(struct __call_single_data *csd)
104904         int bug_id = 0;
104905         u64 ts0, ts1;
104906 @@ -219,17 +219,17 @@ static __always_inline void csd_lock_wait(call_single_data_t *csd)
104909  #else
104910 -static void csd_lock_record(call_single_data_t *csd)
104911 +static void csd_lock_record(struct __call_single_data *csd)
104915 -static __always_inline void csd_lock_wait(call_single_data_t *csd)
104916 +static __always_inline void csd_lock_wait(struct __call_single_data *csd)
104918         smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
104920  #endif
104922 -static __always_inline void csd_lock(call_single_data_t *csd)
104923 +static __always_inline void csd_lock(struct __call_single_data *csd)
104925         csd_lock_wait(csd);
104926         csd->node.u_flags |= CSD_FLAG_LOCK;
104927 @@ -242,7 +242,7 @@ static __always_inline void csd_lock(call_single_data_t *csd)
104928         smp_wmb();
104931 -static __always_inline void csd_unlock(call_single_data_t *csd)
104932 +static __always_inline void csd_unlock(struct __call_single_data *csd)
104934         WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
104936 @@ -276,7 +276,7 @@ void __smp_call_single_queue(int cpu, struct llist_node *node)
104937   * for execution on the given CPU. data must already have
104938   * ->func, ->info, and ->flags set.
104939   */
104940 -static int generic_exec_single(int cpu, call_single_data_t *csd)
104941 +static int generic_exec_single(int cpu, struct __call_single_data *csd)
104943         if (cpu == smp_processor_id()) {
104944                 smp_call_func_t func = csd->func;
104945 @@ -542,7 +542,7 @@ EXPORT_SYMBOL(smp_call_function_single);
104946   * NOTE: Be careful, there is unfortunately no current debugging facility to
104947   * validate the correctness of this serialization.
104948   */
104949 -int smp_call_function_single_async(int cpu, call_single_data_t *csd)
104950 +int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
104952         int err = 0;
104954 diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
104955 index 19aa806890d5..1750dfc416d8 100644
104956 --- a/kernel/sys_ni.c
104957 +++ b/kernel/sys_ni.c
104958 @@ -150,6 +150,12 @@ COND_SYSCALL_COMPAT(set_robust_list);
104959  COND_SYSCALL(get_robust_list);
104960  COND_SYSCALL_COMPAT(get_robust_list);
104962 +/* kernel/futex2.c */
104963 +COND_SYSCALL(futex_wait);
104964 +COND_SYSCALL(futex_wake);
104965 +COND_SYSCALL(futex_waitv);
104966 +COND_SYSCALL(futex_requeue);
104968  /* kernel/hrtimer.c */
104970  /* kernel/itimer.c */
104971 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
104972 index 62fbd09b5dc1..c3fe3ddde393 100644
104973 --- a/kernel/sysctl.c
104974 +++ b/kernel/sysctl.c
104975 @@ -120,9 +120,9 @@ static unsigned long long_max = LONG_MAX;
104976  static int one_hundred = 100;
104977  static int two_hundred = 200;
104978  static int one_thousand = 1000;
104979 -#ifdef CONFIG_PRINTK
104980  static int ten_thousand = 10000;
104981 -#endif
104982 +extern int hrtimer_granularity_us;
104983 +extern int hrtimeout_min_us;
104984  #ifdef CONFIG_PERF_EVENTS
104985  static int six_hundred_forty_kb = 640 * 1024;
104986  #endif
104987 @@ -200,6 +200,10 @@ static int min_extfrag_threshold;
104988  static int max_extfrag_threshold = 1000;
104989  #endif
104991 +#ifdef CONFIG_USER_NS
104992 +extern int unprivileged_userns_clone;
104993 +#endif
104995  #endif /* CONFIG_SYSCTL */
104997  #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_SYSCTL)
104998 @@ -1652,6 +1656,24 @@ int proc_do_static_key(struct ctl_table *table, int write,
105001  static struct ctl_table kern_table[] = {
105002 +       {
105003 +               .procname       = "hrtimer_granularity_us",
105004 +               .data           = &hrtimer_granularity_us,
105005 +               .maxlen         = sizeof(int),
105006 +               .mode           = 0644,
105007 +               .proc_handler   = &proc_dointvec_minmax,
105008 +               .extra1         = SYSCTL_ONE,
105009 +               .extra2         = &ten_thousand,
105010 +       },
105011 +       {
105012 +               .procname       = "hrtimeout_min_us",
105013 +               .data           = &hrtimeout_min_us,
105014 +               .maxlen         = sizeof(int),
105015 +               .mode           = 0644,
105016 +               .proc_handler   = &proc_dointvec_minmax,
105017 +               .extra1         = SYSCTL_ONE,
105018 +               .extra2         = &ten_thousand,
105019 +       },
105020         {
105021                 .procname       = "sched_child_runs_first",
105022                 .data           = &sysctl_sched_child_runs_first,
105023 @@ -1659,6 +1681,29 @@ static struct ctl_table kern_table[] = {
105024                 .mode           = 0644,
105025                 .proc_handler   = proc_dointvec,
105026         },
105027 +#ifdef CONFIG_CACULE_SCHED
105028 +       {
105029 +               .procname       = "sched_interactivity_factor",
105030 +               .data           = &interactivity_factor,
105031 +               .maxlen         = sizeof(int),
105032 +               .mode           = 0644,
105033 +               .proc_handler   = proc_dointvec,
105034 +       },
105035 +       {
105036 +               .procname       = "sched_interactivity_threshold",
105037 +               .data           = &interactivity_threshold,
105038 +               .maxlen         = sizeof(unsigned int),
105039 +               .mode           = 0644,
105040 +               .proc_handler   = proc_dointvec,
105041 +       },
105042 +       {
105043 +               .procname       = "sched_max_lifetime_ms",
105044 +               .data           = &cacule_max_lifetime,
105045 +               .maxlen         = sizeof(int),
105046 +               .mode           = 0644,
105047 +               .proc_handler   = proc_dointvec,
105048 +       },
105049 +#endif
105050  #ifdef CONFIG_SCHED_DEBUG
105051         {
105052                 .procname       = "sched_min_granularity_ns",
105053 @@ -1902,6 +1947,15 @@ static struct ctl_table kern_table[] = {
105054                 .proc_handler   = proc_dointvec,
105055         },
105056  #endif
105057 +#ifdef CONFIG_USER_NS
105058 +       {
105059 +               .procname       = "unprivileged_userns_clone",
105060 +               .data           = &unprivileged_userns_clone,
105061 +               .maxlen         = sizeof(int),
105062 +               .mode           = 0644,
105063 +               .proc_handler   = proc_dointvec,
105064 +       },
105065 +#endif
105066  #ifdef CONFIG_PROC_SYSCTL
105067         {
105068                 .procname       = "tainted",
105069 @@ -3093,6 +3147,20 @@ static struct ctl_table vm_table[] = {
105070                 .extra2         = SYSCTL_ONE,
105071         },
105072  #endif
105073 +       {
105074 +               .procname       = "clean_low_kbytes",
105075 +               .data           = &sysctl_clean_low_kbytes,
105076 +               .maxlen         = sizeof(sysctl_clean_low_kbytes),
105077 +               .mode           = 0644,
105078 +               .proc_handler   = proc_doulongvec_minmax,
105079 +       },
105080 +       {
105081 +               .procname       = "clean_min_kbytes",
105082 +               .data           = &sysctl_clean_min_kbytes,
105083 +               .maxlen         = sizeof(sysctl_clean_min_kbytes),
105084 +               .mode           = 0644,
105085 +               .proc_handler   = proc_doulongvec_minmax,
105086 +       },
105087         {
105088                 .procname       = "user_reserve_kbytes",
105089                 .data           = &sysctl_user_reserve_kbytes,
105090 diff --git a/kernel/task_work.c b/kernel/task_work.c
105091 index 9cde961875c0..5c8dea45d4f8 100644
105092 --- a/kernel/task_work.c
105093 +++ b/kernel/task_work.c
105094 @@ -57,6 +57,7 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
105096         return 0;
105098 +EXPORT_SYMBOL(task_work_add);
105100  /**
105101   * task_work_cancel - cancel a pending work added by task_work_add()
105102 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
105103 index 4d94e2b5499d..a7924fedf479 100644
105104 --- a/kernel/time/alarmtimer.c
105105 +++ b/kernel/time/alarmtimer.c
105106 @@ -92,7 +92,7 @@ static int alarmtimer_rtc_add_device(struct device *dev,
105107         if (rtcdev)
105108                 return -EBUSY;
105110 -       if (!rtc->ops->set_alarm)
105111 +       if (!test_bit(RTC_FEATURE_ALARM, rtc->features))
105112                 return -1;
105113         if (!device_may_wakeup(rtc->dev.parent))
105114                 return -1;
105115 diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
105116 index f5490222e134..23db3c39e07a 100644
105117 --- a/kernel/time/clockevents.c
105118 +++ b/kernel/time/clockevents.c
105119 @@ -190,8 +190,9 @@ int clockevents_tick_resume(struct clock_event_device *dev)
105121  #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
105123 +int __read_mostly hrtimer_granularity_us = 100;
105124  /* Limit min_delta to a jiffie */
105125 -#define MIN_DELTA_LIMIT                (NSEC_PER_SEC / HZ)
105126 +#define MIN_DELTA_LIMIT                (hrtimer_granularity_us * NSEC_PER_USEC)
105128  /**
105129   * clockevents_increase_min_delta - raise minimum delta of a clock event device
105130 diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
105131 index 5c9d968187ae..7a3d640dc13a 100644
105132 --- a/kernel/time/hrtimer.c
105133 +++ b/kernel/time/hrtimer.c
105134 @@ -2236,3 +2236,113 @@ int __sched schedule_hrtimeout(ktime_t *expires,
105135         return schedule_hrtimeout_range(expires, 0, mode);
105137  EXPORT_SYMBOL_GPL(schedule_hrtimeout);
105140 + * As per schedule_hrtimeout but taskes a millisecond value and returns how
105141 + * many milliseconds are left.
105142 + */
105143 +long __sched schedule_msec_hrtimeout(long timeout)
105145 +       struct hrtimer_sleeper t;
105146 +       int delta, jiffs;
105147 +       ktime_t expires;
105149 +       if (!timeout) {
105150 +               __set_current_state(TASK_RUNNING);
105151 +               return 0;
105152 +       }
105154 +       jiffs = msecs_to_jiffies(timeout);
105155 +       /*
105156 +        * If regular timer resolution is adequate or hrtimer resolution is not
105157 +        * (yet) better than Hz, as would occur during startup, use regular
105158 +        * timers.
105159 +        */
105160 +       if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ || pm_freezing)
105161 +               return schedule_timeout(jiffs);
105163 +       delta = (timeout % 1000) * NSEC_PER_MSEC;
105164 +       expires = ktime_set(0, delta);
105166 +       hrtimer_init_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
105167 +       hrtimer_set_expires_range_ns(&t.timer, expires, delta);
105169 +       hrtimer_sleeper_start_expires(&t, HRTIMER_MODE_REL);
105171 +       if (likely(t.task))
105172 +               schedule();
105174 +       hrtimer_cancel(&t.timer);
105175 +       destroy_hrtimer_on_stack(&t.timer);
105177 +       __set_current_state(TASK_RUNNING);
105179 +       expires = hrtimer_expires_remaining(&t.timer);
105180 +       timeout = ktime_to_ms(expires);
105181 +       return timeout < 0 ? 0 : timeout;
105184 +EXPORT_SYMBOL(schedule_msec_hrtimeout);
105186 +#define USECS_PER_SEC 1000000
105187 +extern int hrtimer_granularity_us;
105189 +static inline long schedule_usec_hrtimeout(long timeout)
105191 +       struct hrtimer_sleeper t;
105192 +       ktime_t expires;
105193 +       int delta;
105195 +       if (!timeout) {
105196 +               __set_current_state(TASK_RUNNING);
105197 +               return 0;
105198 +       }
105200 +       if (hrtimer_resolution >= NSEC_PER_SEC / HZ)
105201 +               return schedule_timeout(usecs_to_jiffies(timeout));
105203 +       if (timeout < hrtimer_granularity_us)
105204 +               timeout = hrtimer_granularity_us;
105205 +       delta = (timeout % USECS_PER_SEC) * NSEC_PER_USEC;
105206 +       expires = ktime_set(0, delta);
105208 +       hrtimer_init_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
105209 +       hrtimer_set_expires_range_ns(&t.timer, expires, delta);
105211 +       hrtimer_sleeper_start_expires(&t, HRTIMER_MODE_REL);
105213 +       if (likely(t.task))
105214 +               schedule();
105216 +       hrtimer_cancel(&t.timer);
105217 +       destroy_hrtimer_on_stack(&t.timer);
105219 +       __set_current_state(TASK_RUNNING);
105221 +       expires = hrtimer_expires_remaining(&t.timer);
105222 +       timeout = ktime_to_us(expires);
105223 +       return timeout < 0 ? 0 : timeout;
105226 +int __read_mostly hrtimeout_min_us = 500;
105228 +long __sched schedule_min_hrtimeout(void)
105230 +       return usecs_to_jiffies(schedule_usec_hrtimeout(hrtimeout_min_us));
105233 +EXPORT_SYMBOL(schedule_min_hrtimeout);
105235 +long __sched schedule_msec_hrtimeout_interruptible(long timeout)
105237 +       __set_current_state(TASK_INTERRUPTIBLE);
105238 +       return schedule_msec_hrtimeout(timeout);
105240 +EXPORT_SYMBOL(schedule_msec_hrtimeout_interruptible);
105242 +long __sched schedule_msec_hrtimeout_uninterruptible(long timeout)
105244 +       __set_current_state(TASK_UNINTERRUPTIBLE);
105245 +       return schedule_msec_hrtimeout(timeout);
105247 +EXPORT_SYMBOL(schedule_msec_hrtimeout_uninterruptible);
105248 diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
105249 index bf540f5a4115..dd5697d7347b 100644
105250 --- a/kernel/time/posix-timers.c
105251 +++ b/kernel/time/posix-timers.c
105252 @@ -1191,8 +1191,8 @@ SYSCALL_DEFINE2(clock_adjtime32, clockid_t, which_clock,
105254         err = do_clock_adjtime(which_clock, &ktx);
105256 -       if (err >= 0)
105257 -               err = put_old_timex32(utp, &ktx);
105258 +       if (err >= 0 && put_old_timex32(utp, &ktx))
105259 +               return -EFAULT;
105261         return err;
105263 diff --git a/kernel/time/timer.c b/kernel/time/timer.c
105264 index f475f1a027c8..8d82fe9f6fbb 100644
105265 --- a/kernel/time/timer.c
105266 +++ b/kernel/time/timer.c
105267 @@ -44,6 +44,7 @@
105268  #include <linux/slab.h>
105269  #include <linux/compat.h>
105270  #include <linux/random.h>
105271 +#include <linux/freezer.h>
105273  #include <linux/uaccess.h>
105274  #include <asm/unistd.h>
105275 @@ -1886,6 +1887,18 @@ signed long __sched schedule_timeout(signed long timeout)
105277         expire = timeout + jiffies;
105279 +#ifdef CONFIG_HIGH_RES_TIMERS
105280 +       if (timeout == 1 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
105281 +               /*
105282 +                * Special case 1 as being a request for the minimum timeout
105283 +                * and use highres timers to timeout after 1ms to workaround
105284 +                * the granularity of low Hz tick timers.
105285 +                */
105286 +               if (!schedule_min_hrtimeout())
105287 +                       return 0;
105288 +               goto out_timeout;
105289 +       }
105290 +#endif
105291         timer.task = current;
105292         timer_setup_on_stack(&timer.timer, process_timeout, 0);
105293         __mod_timer(&timer.timer, expire, MOD_TIMER_NOTPENDING);
105294 @@ -1894,10 +1907,10 @@ signed long __sched schedule_timeout(signed long timeout)
105296         /* Remove the timer from the object tracker */
105297         destroy_timer_on_stack(&timer.timer);
105299 +out_timeout:
105300         timeout = expire - jiffies;
105302 - out:
105303 +out:
105304         return timeout < 0 ? 0 : timeout;
105306  EXPORT_SYMBOL(schedule_timeout);
105307 @@ -2040,7 +2053,19 @@ void __init init_timers(void)
105308   */
105309  void msleep(unsigned int msecs)
105311 -       unsigned long timeout = msecs_to_jiffies(msecs) + 1;
105312 +       int jiffs = msecs_to_jiffies(msecs);
105313 +       unsigned long timeout;
105315 +       /*
105316 +        * Use high resolution timers where the resolution of tick based
105317 +        * timers is inadequate.
105318 +        */
105319 +       if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ && !pm_freezing) {
105320 +               while (msecs)
105321 +                       msecs = schedule_msec_hrtimeout_uninterruptible(msecs);
105322 +               return;
105323 +       }
105324 +       timeout = jiffs + 1;
105326         while (timeout)
105327                 timeout = schedule_timeout_uninterruptible(timeout);
105328 @@ -2054,7 +2079,15 @@ EXPORT_SYMBOL(msleep);
105329   */
105330  unsigned long msleep_interruptible(unsigned int msecs)
105332 -       unsigned long timeout = msecs_to_jiffies(msecs) + 1;
105333 +       int jiffs = msecs_to_jiffies(msecs);
105334 +       unsigned long timeout;
105336 +       if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ && !pm_freezing) {
105337 +               while (msecs && !signal_pending(current))
105338 +                       msecs = schedule_msec_hrtimeout_interruptible(msecs);
105339 +               return msecs;
105340 +       }
105341 +       timeout = jiffs + 1;
105343         while (timeout && !signal_pending(current))
105344                 timeout = schedule_timeout_interruptible(timeout);
105345 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
105346 index 3ba52d4e1314..826b88b727a6 100644
105347 --- a/kernel/trace/ftrace.c
105348 +++ b/kernel/trace/ftrace.c
105349 @@ -5631,7 +5631,10 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
105351         parser = &iter->parser;
105352         if (trace_parser_loaded(parser)) {
105353 -               ftrace_match_records(iter->hash, parser->buffer, parser->idx);
105354 +               int enable = !(iter->flags & FTRACE_ITER_NOTRACE);
105356 +               ftrace_process_regex(iter, parser->buffer,
105357 +                                    parser->idx, enable);
105358         }
105360         trace_parser_put(parser);
105361 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
105362 index c0c9aa5cd8e2..67c01dc5cdeb 100644
105363 --- a/kernel/trace/trace.c
105364 +++ b/kernel/trace/trace.c
105365 @@ -2390,14 +2390,13 @@ static void tracing_stop_tr(struct trace_array *tr)
105367  static int trace_save_cmdline(struct task_struct *tsk)
105369 -       unsigned pid, idx;
105370 +       unsigned tpid, idx;
105372         /* treat recording of idle task as a success */
105373         if (!tsk->pid)
105374                 return 1;
105376 -       if (unlikely(tsk->pid > PID_MAX_DEFAULT))
105377 -               return 0;
105378 +       tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
105380         /*
105381          * It's not the end of the world if we don't get
105382 @@ -2408,26 +2407,15 @@ static int trace_save_cmdline(struct task_struct *tsk)
105383         if (!arch_spin_trylock(&trace_cmdline_lock))
105384                 return 0;
105386 -       idx = savedcmd->map_pid_to_cmdline[tsk->pid];
105387 +       idx = savedcmd->map_pid_to_cmdline[tpid];
105388         if (idx == NO_CMDLINE_MAP) {
105389                 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
105391 -               /*
105392 -                * Check whether the cmdline buffer at idx has a pid
105393 -                * mapped. We are going to overwrite that entry so we
105394 -                * need to clear the map_pid_to_cmdline. Otherwise we
105395 -                * would read the new comm for the old pid.
105396 -                */
105397 -               pid = savedcmd->map_cmdline_to_pid[idx];
105398 -               if (pid != NO_CMDLINE_MAP)
105399 -                       savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
105401 -               savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
105402 -               savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
105404 +               savedcmd->map_pid_to_cmdline[tpid] = idx;
105405                 savedcmd->cmdline_idx = idx;
105406         }
105408 +       savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
105409         set_cmdline(idx, tsk->comm);
105411         arch_spin_unlock(&trace_cmdline_lock);
105412 @@ -2438,6 +2426,7 @@ static int trace_save_cmdline(struct task_struct *tsk)
105413  static void __trace_find_cmdline(int pid, char comm[])
105415         unsigned map;
105416 +       int tpid;
105418         if (!pid) {
105419                 strcpy(comm, "<idle>");
105420 @@ -2449,16 +2438,16 @@ static void __trace_find_cmdline(int pid, char comm[])
105421                 return;
105422         }
105424 -       if (pid > PID_MAX_DEFAULT) {
105425 -               strcpy(comm, "<...>");
105426 -               return;
105427 +       tpid = pid & (PID_MAX_DEFAULT - 1);
105428 +       map = savedcmd->map_pid_to_cmdline[tpid];
105429 +       if (map != NO_CMDLINE_MAP) {
105430 +               tpid = savedcmd->map_cmdline_to_pid[map];
105431 +               if (tpid == pid) {
105432 +                       strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
105433 +                       return;
105434 +               }
105435         }
105437 -       map = savedcmd->map_pid_to_cmdline[pid];
105438 -       if (map != NO_CMDLINE_MAP)
105439 -               strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
105440 -       else
105441 -               strcpy(comm, "<...>");
105442 +       strcpy(comm, "<...>");
105445  void trace_find_cmdline(int pid, char comm[])
105446 diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
105447 index aaf6793ededa..c1637f90c8a3 100644
105448 --- a/kernel/trace/trace_clock.c
105449 +++ b/kernel/trace/trace_clock.c
105450 @@ -95,33 +95,49 @@ u64 notrace trace_clock_global(void)
105452         unsigned long flags;
105453         int this_cpu;
105454 -       u64 now;
105455 +       u64 now, prev_time;
105457         raw_local_irq_save(flags);
105459         this_cpu = raw_smp_processor_id();
105460 -       now = sched_clock_cpu(this_cpu);
105462         /*
105463 -        * If in an NMI context then dont risk lockups and return the
105464 -        * cpu_clock() time:
105465 +        * The global clock "guarantees" that the events are ordered
105466 +        * between CPUs. But if two events on two different CPUS call
105467 +        * trace_clock_global at roughly the same time, it really does
105468 +        * not matter which one gets the earlier time. Just make sure
105469 +        * that the same CPU will always show a monotonic clock.
105470 +        *
105471 +        * Use a read memory barrier to get the latest written
105472 +        * time that was recorded.
105473          */
105474 -       if (unlikely(in_nmi()))
105475 -               goto out;
105476 +       smp_rmb();
105477 +       prev_time = READ_ONCE(trace_clock_struct.prev_time);
105478 +       now = sched_clock_cpu(this_cpu);
105480 -       arch_spin_lock(&trace_clock_struct.lock);
105481 +       /* Make sure that now is always greater than prev_time */
105482 +       if ((s64)(now - prev_time) < 0)
105483 +               now = prev_time + 1;
105485         /*
105486 -        * TODO: if this happens often then maybe we should reset
105487 -        * my_scd->clock to prev_time+1, to make sure
105488 -        * we start ticking with the local clock from now on?
105489 +        * If in an NMI context then dont risk lockups and simply return
105490 +        * the current time.
105491          */
105492 -       if ((s64)(now - trace_clock_struct.prev_time) < 0)
105493 -               now = trace_clock_struct.prev_time + 1;
105494 +       if (unlikely(in_nmi()))
105495 +               goto out;
105497 -       trace_clock_struct.prev_time = now;
105498 +       /* Tracing can cause strange recursion, always use a try lock */
105499 +       if (arch_spin_trylock(&trace_clock_struct.lock)) {
105500 +               /* Reread prev_time in case it was already updated */
105501 +               prev_time = READ_ONCE(trace_clock_struct.prev_time);
105502 +               if ((s64)(now - prev_time) < 0)
105503 +                       now = prev_time + 1;
105505 -       arch_spin_unlock(&trace_clock_struct.lock);
105506 +               trace_clock_struct.prev_time = now;
105508 +               /* The unlock acts as the wmb for the above rmb */
105509 +               arch_spin_unlock(&trace_clock_struct.lock);
105510 +       }
105511   out:
105512         raw_local_irq_restore(flags);
105514 diff --git a/kernel/up.c b/kernel/up.c
105515 index c6f323dcd45b..4edd5493eba2 100644
105516 --- a/kernel/up.c
105517 +++ b/kernel/up.c
105518 @@ -25,7 +25,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
105520  EXPORT_SYMBOL(smp_call_function_single);
105522 -int smp_call_function_single_async(int cpu, call_single_data_t *csd)
105523 +int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
105525         unsigned long flags;
105527 diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
105528 index 9a4b980d695b..0475d15b1c66 100644
105529 --- a/kernel/user_namespace.c
105530 +++ b/kernel/user_namespace.c
105531 @@ -21,6 +21,9 @@
105532  #include <linux/bsearch.h>
105533  #include <linux/sort.h>
105535 +/* sysctl */
105536 +int unprivileged_userns_clone = 1;
105538  static struct kmem_cache *user_ns_cachep __read_mostly;
105539  static DEFINE_MUTEX(userns_state_mutex);
105541 diff --git a/kernel/watchdog.c b/kernel/watchdog.c
105542 index 107bc38b1945..8cf0678378d2 100644
105543 --- a/kernel/watchdog.c
105544 +++ b/kernel/watchdog.c
105545 @@ -154,7 +154,11 @@ static void lockup_detector_update_enable(void)
105547  #ifdef CONFIG_SOFTLOCKUP_DETECTOR
105549 -#define SOFTLOCKUP_RESET       ULONG_MAX
105551 + * Delay the soflockup report when running a known slow code.
105552 + * It does _not_ affect the timestamp of the last successdul reschedule.
105553 + */
105554 +#define SOFTLOCKUP_DELAY_REPORT        ULONG_MAX
105556  #ifdef CONFIG_SMP
105557  int __read_mostly sysctl_softlockup_all_cpu_backtrace;
105558 @@ -169,10 +173,12 @@ unsigned int __read_mostly softlockup_panic =
105559  static bool softlockup_initialized __read_mostly;
105560  static u64 __read_mostly sample_period;
105562 +/* Timestamp taken after the last successful reschedule. */
105563  static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
105564 +/* Timestamp of the last softlockup report. */
105565 +static DEFINE_PER_CPU(unsigned long, watchdog_report_ts);
105566  static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
105567  static DEFINE_PER_CPU(bool, softlockup_touch_sync);
105568 -static DEFINE_PER_CPU(bool, soft_watchdog_warn);
105569  static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
105570  static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
105571  static unsigned long soft_lockup_nmi_warn;
105572 @@ -235,10 +241,16 @@ static void set_sample_period(void)
105573         watchdog_update_hrtimer_threshold(sample_period);
105576 +static void update_report_ts(void)
105578 +       __this_cpu_write(watchdog_report_ts, get_timestamp());
105581  /* Commands for resetting the watchdog */
105582 -static void __touch_watchdog(void)
105583 +static void update_touch_ts(void)
105585         __this_cpu_write(watchdog_touch_ts, get_timestamp());
105586 +       update_report_ts();
105589  /**
105590 @@ -252,10 +264,10 @@ static void __touch_watchdog(void)
105591  notrace void touch_softlockup_watchdog_sched(void)
105593         /*
105594 -        * Preemption can be enabled.  It doesn't matter which CPU's timestamp
105595 -        * gets zeroed here, so use the raw_ operation.
105596 +        * Preemption can be enabled.  It doesn't matter which CPU's watchdog
105597 +        * report period gets restarted here, so use the raw_ operation.
105598          */
105599 -       raw_cpu_write(watchdog_touch_ts, SOFTLOCKUP_RESET);
105600 +       raw_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
105603  notrace void touch_softlockup_watchdog(void)
105604 @@ -279,7 +291,7 @@ void touch_all_softlockup_watchdogs(void)
105605          * the softlockup check.
105606          */
105607         for_each_cpu(cpu, &watchdog_allowed_mask) {
105608 -               per_cpu(watchdog_touch_ts, cpu) = SOFTLOCKUP_RESET;
105609 +               per_cpu(watchdog_report_ts, cpu) = SOFTLOCKUP_DELAY_REPORT;
105610                 wq_watchdog_touch(cpu);
105611         }
105613 @@ -287,16 +299,16 @@ void touch_all_softlockup_watchdogs(void)
105614  void touch_softlockup_watchdog_sync(void)
105616         __this_cpu_write(softlockup_touch_sync, true);
105617 -       __this_cpu_write(watchdog_touch_ts, SOFTLOCKUP_RESET);
105618 +       __this_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
105621 -static int is_softlockup(unsigned long touch_ts)
105622 +static int is_softlockup(unsigned long touch_ts, unsigned long period_ts)
105624         unsigned long now = get_timestamp();
105626         if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
105627                 /* Warn about unreasonable delays. */
105628 -               if (time_after(now, touch_ts + get_softlockup_thresh()))
105629 +               if (time_after(now, period_ts + get_softlockup_thresh()))
105630                         return now - touch_ts;
105631         }
105632         return 0;
105633 @@ -332,7 +344,7 @@ static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
105634   */
105635  static int softlockup_fn(void *data)
105637 -       __touch_watchdog();
105638 +       update_touch_ts();
105639         complete(this_cpu_ptr(&softlockup_completion));
105641         return 0;
105642 @@ -342,6 +354,7 @@ static int softlockup_fn(void *data)
105643  static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
105645         unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
105646 +       unsigned long period_ts = __this_cpu_read(watchdog_report_ts);
105647         struct pt_regs *regs = get_irq_regs();
105648         int duration;
105649         int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
105650 @@ -363,7 +376,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
105651         /* .. and repeat */
105652         hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
105654 -       if (touch_ts == SOFTLOCKUP_RESET) {
105655 +       /* Reset the interval when touched externally by a known slow code. */
105656 +       if (period_ts == SOFTLOCKUP_DELAY_REPORT) {
105657                 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
105658                         /*
105659                          * If the time stamp was touched atomically
105660 @@ -375,7 +389,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
105662                 /* Clear the guest paused flag on watchdog reset */
105663                 kvm_check_and_clear_guest_paused();
105664 -               __touch_watchdog();
105665 +               update_report_ts();
105667                 return HRTIMER_RESTART;
105668         }
105670 @@ -385,7 +400,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
105671          * indicate it is getting cpu time.  If it hasn't then
105672          * this is a good indication some task is hogging the cpu
105673          */
105674 -       duration = is_softlockup(touch_ts);
105675 +       duration = is_softlockup(touch_ts, period_ts);
105676         if (unlikely(duration)) {
105677                 /*
105678                  * If a virtual machine is stopped by the host it can look to
105679 @@ -395,21 +410,18 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
105680                 if (kvm_check_and_clear_guest_paused())
105681                         return HRTIMER_RESTART;
105683 -               /* only warn once */
105684 -               if (__this_cpu_read(soft_watchdog_warn) == true)
105685 -                       return HRTIMER_RESTART;
105687 +               /*
105688 +                * Prevent multiple soft-lockup reports if one cpu is already
105689 +                * engaged in dumping all cpu back traces.
105690 +                */
105691                 if (softlockup_all_cpu_backtrace) {
105692 -                       /* Prevent multiple soft-lockup reports if one cpu is already
105693 -                        * engaged in dumping cpu back traces
105694 -                        */
105695 -                       if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
105696 -                               /* Someone else will report us. Let's give up */
105697 -                               __this_cpu_write(soft_watchdog_warn, true);
105698 +                       if (test_and_set_bit_lock(0, &soft_lockup_nmi_warn))
105699                                 return HRTIMER_RESTART;
105700 -                       }
105701                 }
105703 +               /* Start period for the next softlockup warning. */
105704 +               update_report_ts();
105706                 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
105707                         smp_processor_id(), duration,
105708                         current->comm, task_pid_nr(current));
105709 @@ -421,22 +433,14 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
105710                         dump_stack();
105712                 if (softlockup_all_cpu_backtrace) {
105713 -                       /* Avoid generating two back traces for current
105714 -                        * given that one is already made above
105715 -                        */
105716                         trigger_allbutself_cpu_backtrace();
105718 -                       clear_bit(0, &soft_lockup_nmi_warn);
105719 -                       /* Barrier to sync with other cpus */
105720 -                       smp_mb__after_atomic();
105721 +                       clear_bit_unlock(0, &soft_lockup_nmi_warn);
105722                 }
105724                 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
105725                 if (softlockup_panic)
105726                         panic("softlockup: hung tasks");
105727 -               __this_cpu_write(soft_watchdog_warn, true);
105728 -       } else
105729 -               __this_cpu_write(soft_watchdog_warn, false);
105730 +       }
105732         return HRTIMER_RESTART;
105734 @@ -461,7 +465,7 @@ static void watchdog_enable(unsigned int cpu)
105735                       HRTIMER_MODE_REL_PINNED_HARD);
105737         /* Initialize timestamp */
105738 -       __touch_watchdog();
105739 +       update_touch_ts();
105740         /* Enable the perf event */
105741         if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
105742                 watchdog_nmi_enable(cpu);
105743 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
105744 index 417c3d3e521b..03d75fe17edf 100644
105745 --- a/lib/Kconfig.debug
105746 +++ b/lib/Kconfig.debug
105747 @@ -179,7 +179,7 @@ config DYNAMIC_DEBUG_CORE
105749  config SYMBOLIC_ERRNAME
105750         bool "Support symbolic error names in printf"
105751 -       default y if PRINTK
105752 +       default n
105753         help
105754           If you say Y here, the kernel's printf implementation will
105755           be able to print symbolic error names such as ENOSPC instead
105756 @@ -189,7 +189,7 @@ config SYMBOLIC_ERRNAME
105757  config DEBUG_BUGVERBOSE
105758         bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT
105759         depends on BUG && (GENERIC_BUG || HAVE_DEBUG_BUGVERBOSE)
105760 -       default y
105761 +       default n
105762         help
105763           Say Y here to make BUG() panics output the file name and line number
105764           of the BUG call as well as the EIP and oops trace.  This aids
105765 diff --git a/lib/Kconfig.kfence b/lib/Kconfig.kfence
105766 index 78f50ccb3b45..e641add33947 100644
105767 --- a/lib/Kconfig.kfence
105768 +++ b/lib/Kconfig.kfence
105769 @@ -7,6 +7,7 @@ menuconfig KFENCE
105770         bool "KFENCE: low-overhead sampling-based memory safety error detector"
105771         depends on HAVE_ARCH_KFENCE && (SLAB || SLUB)
105772         select STACKTRACE
105773 +       select IRQ_WORK
105774         help
105775           KFENCE is a low-overhead sampling-based detector of heap out-of-bounds
105776           access, use-after-free, and invalid-free errors. KFENCE is designed
105777 diff --git a/lib/bug.c b/lib/bug.c
105778 index 8f9d537bfb2a..b92da1f6e21b 100644
105779 --- a/lib/bug.c
105780 +++ b/lib/bug.c
105781 @@ -155,30 +155,27 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
105783         file = NULL;
105784         line = 0;
105785 -       warning = 0;
105787 -       if (bug) {
105788  #ifdef CONFIG_DEBUG_BUGVERBOSE
105789  #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
105790 -               file = bug->file;
105791 +       file = bug->file;
105792  #else
105793 -               file = (const char *)bug + bug->file_disp;
105794 +       file = (const char *)bug + bug->file_disp;
105795  #endif
105796 -               line = bug->line;
105797 +       line = bug->line;
105798  #endif
105799 -               warning = (bug->flags & BUGFLAG_WARNING) != 0;
105800 -               once = (bug->flags & BUGFLAG_ONCE) != 0;
105801 -               done = (bug->flags & BUGFLAG_DONE) != 0;
105803 -               if (warning && once) {
105804 -                       if (done)
105805 -                               return BUG_TRAP_TYPE_WARN;
105807 -                       /*
105808 -                        * Since this is the only store, concurrency is not an issue.
105809 -                        */
105810 -                       bug->flags |= BUGFLAG_DONE;
105811 -               }
105812 +       warning = (bug->flags & BUGFLAG_WARNING) != 0;
105813 +       once = (bug->flags & BUGFLAG_ONCE) != 0;
105814 +       done = (bug->flags & BUGFLAG_DONE) != 0;
105816 +       if (warning && once) {
105817 +               if (done)
105818 +                       return BUG_TRAP_TYPE_WARN;
105820 +               /*
105821 +                * Since this is the only store, concurrency is not an issue.
105822 +                */
105823 +               bug->flags |= BUGFLAG_DONE;
105824         }
105826         /*
105827 diff --git a/lib/crypto/poly1305-donna32.c b/lib/crypto/poly1305-donna32.c
105828 index 3cc77d94390b..7fb71845cc84 100644
105829 --- a/lib/crypto/poly1305-donna32.c
105830 +++ b/lib/crypto/poly1305-donna32.c
105831 @@ -10,7 +10,8 @@
105832  #include <asm/unaligned.h>
105833  #include <crypto/internal/poly1305.h>
105835 -void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16])
105836 +void poly1305_core_setkey(struct poly1305_core_key *key,
105837 +                         const u8 raw_key[POLY1305_BLOCK_SIZE])
105839         /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
105840         key->key.r[0] = (get_unaligned_le32(&raw_key[0])) & 0x3ffffff;
105841 diff --git a/lib/crypto/poly1305-donna64.c b/lib/crypto/poly1305-donna64.c
105842 index 6ae181bb4345..d34cf4053668 100644
105843 --- a/lib/crypto/poly1305-donna64.c
105844 +++ b/lib/crypto/poly1305-donna64.c
105845 @@ -12,7 +12,8 @@
105847  typedef __uint128_t u128;
105849 -void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16])
105850 +void poly1305_core_setkey(struct poly1305_core_key *key,
105851 +                         const u8 raw_key[POLY1305_BLOCK_SIZE])
105853         u64 t0, t1;
105855 diff --git a/lib/crypto/poly1305.c b/lib/crypto/poly1305.c
105856 index 9d2d14df0fee..26d87fc3823e 100644
105857 --- a/lib/crypto/poly1305.c
105858 +++ b/lib/crypto/poly1305.c
105859 @@ -12,7 +12,8 @@
105860  #include <linux/module.h>
105861  #include <asm/unaligned.h>
105863 -void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key)
105864 +void poly1305_init_generic(struct poly1305_desc_ctx *desc,
105865 +                          const u8 key[POLY1305_KEY_SIZE])
105867         poly1305_core_setkey(&desc->core_r, key);
105868         desc->s[0] = get_unaligned_le32(key + 16);
105869 diff --git a/lib/decompress_unzstd.c b/lib/decompress_unzstd.c
105870 index 790abc472f5b..6e5ecfba0a8d 100644
105871 --- a/lib/decompress_unzstd.c
105872 +++ b/lib/decompress_unzstd.c
105873 @@ -68,11 +68,7 @@
105874  #ifdef STATIC
105875  # define UNZSTD_PREBOOT
105876  # include "xxhash.c"
105877 -# include "zstd/entropy_common.c"
105878 -# include "zstd/fse_decompress.c"
105879 -# include "zstd/huf_decompress.c"
105880 -# include "zstd/zstd_common.c"
105881 -# include "zstd/decompress.c"
105882 +# include "zstd/decompress_sources.h"
105883  #endif
105885  #include <linux/decompress/mm.h>
105886 @@ -91,11 +87,15 @@
105888  static int INIT handle_zstd_error(size_t ret, void (*error)(char *x))
105890 -       const int err = ZSTD_getErrorCode(ret);
105891 +       const zstd_error_code err = zstd_get_error_code(ret);
105893 -       if (!ZSTD_isError(ret))
105894 +       if (!zstd_is_error(ret))
105895                 return 0;
105897 +       /*
105898 +        * zstd_get_error_name() cannot be used because error takes a char *
105899 +        * not a const char *
105900 +        */
105901         switch (err) {
105902         case ZSTD_error_memory_allocation:
105903                 error("ZSTD decompressor ran out of memory");
105904 @@ -124,28 +124,28 @@ static int INIT decompress_single(const u8 *in_buf, long in_len, u8 *out_buf,
105905                                   long out_len, long *in_pos,
105906                                   void (*error)(char *x))
105908 -       const size_t wksp_size = ZSTD_DCtxWorkspaceBound();
105909 +       const size_t wksp_size = zstd_dctx_workspace_bound();
105910         void *wksp = large_malloc(wksp_size);
105911 -       ZSTD_DCtx *dctx = ZSTD_initDCtx(wksp, wksp_size);
105912 +       zstd_dctx *dctx = zstd_init_dctx(wksp, wksp_size);
105913         int err;
105914         size_t ret;
105916         if (dctx == NULL) {
105917 -               error("Out of memory while allocating ZSTD_DCtx");
105918 +               error("Out of memory while allocating zstd_dctx");
105919                 err = -1;
105920                 goto out;
105921         }
105922         /*
105923          * Find out how large the frame actually is, there may be junk at
105924 -        * the end of the frame that ZSTD_decompressDCtx() can't handle.
105925 +        * the end of the frame that zstd_decompress_dctx() can't handle.
105926          */
105927 -       ret = ZSTD_findFrameCompressedSize(in_buf, in_len);
105928 +       ret = zstd_find_frame_compressed_size(in_buf, in_len);
105929         err = handle_zstd_error(ret, error);
105930         if (err)
105931                 goto out;
105932         in_len = (long)ret;
105934 -       ret = ZSTD_decompressDCtx(dctx, out_buf, out_len, in_buf, in_len);
105935 +       ret = zstd_decompress_dctx(dctx, out_buf, out_len, in_buf, in_len);
105936         err = handle_zstd_error(ret, error);
105937         if (err)
105938                 goto out;
105939 @@ -167,14 +167,14 @@ static int INIT __unzstd(unsigned char *in_buf, long in_len,
105940                          long *in_pos,
105941                          void (*error)(char *x))
105943 -       ZSTD_inBuffer in;
105944 -       ZSTD_outBuffer out;
105945 -       ZSTD_frameParams params;
105946 +       zstd_in_buffer in;
105947 +       zstd_out_buffer out;
105948 +       zstd_frame_header header;
105949         void *in_allocated = NULL;
105950         void *out_allocated = NULL;
105951         void *wksp = NULL;
105952         size_t wksp_size;
105953 -       ZSTD_DStream *dstream;
105954 +       zstd_dstream *dstream;
105955         int err;
105956         size_t ret;
105958 @@ -238,13 +238,13 @@ static int INIT __unzstd(unsigned char *in_buf, long in_len,
105959         out.size = out_len;
105961         /*
105962 -        * We need to know the window size to allocate the ZSTD_DStream.
105963 +        * We need to know the window size to allocate the zstd_dstream.
105964          * Since we are streaming, we need to allocate a buffer for the sliding
105965          * window. The window size varies from 1 KB to ZSTD_WINDOWSIZE_MAX
105966          * (8 MB), so it is important to use the actual value so as not to
105967          * waste memory when it is smaller.
105968          */
105969 -       ret = ZSTD_getFrameParams(&params, in.src, in.size);
105970 +       ret = zstd_get_frame_header(&header, in.src, in.size);
105971         err = handle_zstd_error(ret, error);
105972         if (err)
105973                 goto out;
105974 @@ -253,19 +253,19 @@ static int INIT __unzstd(unsigned char *in_buf, long in_len,
105975                 err = -1;
105976                 goto out;
105977         }
105978 -       if (params.windowSize > ZSTD_WINDOWSIZE_MAX) {
105979 +       if (header.windowSize > ZSTD_WINDOWSIZE_MAX) {
105980                 error("ZSTD-compressed data has too large a window size");
105981                 err = -1;
105982                 goto out;
105983         }
105985         /*
105986 -        * Allocate the ZSTD_DStream now that we know how much memory is
105987 +        * Allocate the zstd_dstream now that we know how much memory is
105988          * required.
105989          */
105990 -       wksp_size = ZSTD_DStreamWorkspaceBound(params.windowSize);
105991 +       wksp_size = zstd_dstream_workspace_bound(header.windowSize);
105992         wksp = large_malloc(wksp_size);
105993 -       dstream = ZSTD_initDStream(params.windowSize, wksp, wksp_size);
105994 +       dstream = zstd_init_dstream(header.windowSize, wksp, wksp_size);
105995         if (dstream == NULL) {
105996                 error("Out of memory while allocating ZSTD_DStream");
105997                 err = -1;
105998 @@ -298,7 +298,7 @@ static int INIT __unzstd(unsigned char *in_buf, long in_len,
105999                         in.size = in_len;
106000                 }
106001                 /* Returns zero when the frame is complete. */
106002 -               ret = ZSTD_decompressStream(dstream, &out, &in);
106003 +               ret = zstd_decompress_stream(dstream, &out, &in);
106004                 err = handle_zstd_error(ret, error);
106005                 if (err)
106006                         goto out;
106007 diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
106008 index c70d6347afa2..921d0a654243 100644
106009 --- a/lib/dynamic_debug.c
106010 +++ b/lib/dynamic_debug.c
106011 @@ -396,7 +396,7 @@ static int ddebug_parse_query(char *words[], int nwords,
106012                         /* tail :$info is function or line-range */
106013                         fline = strchr(query->filename, ':');
106014                         if (!fline)
106015 -                               break;
106016 +                               continue;
106017                         *fline++ = '\0';
106018                         if (isalpha(*fline) || *fline == '*' || *fline == '?') {
106019                                 /* take as function name */
106020 diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
106021 index 7998affa45d4..c87d5b6a8a55 100644
106022 --- a/lib/kobject_uevent.c
106023 +++ b/lib/kobject_uevent.c
106024 @@ -251,12 +251,13 @@ static int kobj_usermode_filter(struct kobject *kobj)
106026  static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem)
106028 +       int buffer_size = sizeof(env->buf) - env->buflen;
106029         int len;
106031 -       len = strlcpy(&env->buf[env->buflen], subsystem,
106032 -                     sizeof(env->buf) - env->buflen);
106033 -       if (len >= (sizeof(env->buf) - env->buflen)) {
106034 -               WARN(1, KERN_ERR "init_uevent_argv: buffer size too small\n");
106035 +       len = strlcpy(&env->buf[env->buflen], subsystem, buffer_size);
106036 +       if (len >= buffer_size) {
106037 +               pr_warn("init_uevent_argv: buffer size of %d too small, needed %d\n",
106038 +                       buffer_size, len);
106039                 return -ENOMEM;
106040         }
106042 diff --git a/lib/nlattr.c b/lib/nlattr.c
106043 index 5b6116e81f9f..1d051ef66afe 100644
106044 --- a/lib/nlattr.c
106045 +++ b/lib/nlattr.c
106046 @@ -828,7 +828,7 @@ int nla_strcmp(const struct nlattr *nla, const char *str)
106047         int attrlen = nla_len(nla);
106048         int d;
106050 -       if (attrlen > 0 && buf[attrlen - 1] == '\0')
106051 +       while (attrlen > 0 && buf[attrlen - 1] == '\0')
106052                 attrlen--;
106054         d = attrlen - len;
106055 diff --git a/lib/stackdepot.c b/lib/stackdepot.c
106056 index 49f67a0c6e5d..df9179f4f441 100644
106057 --- a/lib/stackdepot.c
106058 +++ b/lib/stackdepot.c
106059 @@ -71,7 +71,7 @@ static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
106060  static int depot_index;
106061  static int next_slab_inited;
106062  static size_t depot_offset;
106063 -static DEFINE_SPINLOCK(depot_lock);
106064 +static DEFINE_RAW_SPINLOCK(depot_lock);
106066  static bool init_stack_slab(void **prealloc)
106068 @@ -305,7 +305,7 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
106069                         prealloc = page_address(page);
106070         }
106072 -       spin_lock_irqsave(&depot_lock, flags);
106073 +       raw_spin_lock_irqsave(&depot_lock, flags);
106075         found = find_stack(*bucket, entries, nr_entries, hash);
106076         if (!found) {
106077 @@ -329,7 +329,7 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
106078                 WARN_ON(!init_stack_slab(&prealloc));
106079         }
106081 -       spin_unlock_irqrestore(&depot_lock, flags);
106082 +       raw_spin_unlock_irqrestore(&depot_lock, flags);
106083  exit:
106084         if (prealloc) {
106085                 /* Nobody used this memory, ok to free it. */
106086 diff --git a/lib/test_kasan.c b/lib/test_kasan.c
106087 index e5647d147b35..be69c3aa615a 100644
106088 --- a/lib/test_kasan.c
106089 +++ b/lib/test_kasan.c
106090 @@ -646,8 +646,20 @@ static char global_array[10];
106092  static void kasan_global_oob(struct kunit *test)
106094 -       volatile int i = 3;
106095 -       char *p = &global_array[ARRAY_SIZE(global_array) + i];
106096 +       /*
106097 +        * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
106098 +        * from failing here and panicing the kernel, access the array via a
106099 +        * volatile pointer, which will prevent the compiler from being able to
106100 +        * determine the array bounds.
106101 +        *
106102 +        * This access uses a volatile pointer to char (char *volatile) rather
106103 +        * than the more conventional pointer to volatile char (volatile char *)
106104 +        * because we want to prevent the compiler from making inferences about
106105 +        * the pointer itself (i.e. its array bounds), not the data that it
106106 +        * refers to.
106107 +        */
106108 +       char *volatile array = global_array;
106109 +       char *p = &array[ARRAY_SIZE(global_array) + 3];
106111         /* Only generic mode instruments globals. */
106112         KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
106113 @@ -695,8 +707,9 @@ static void ksize_uaf(struct kunit *test)
106114  static void kasan_stack_oob(struct kunit *test)
106116         char stack_array[10];
106117 -       volatile int i = OOB_TAG_OFF;
106118 -       char *p = &stack_array[ARRAY_SIZE(stack_array) + i];
106119 +       /* See comment in kasan_global_oob. */
106120 +       char *volatile array = stack_array;
106121 +       char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
106123         KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
106125 @@ -707,7 +720,9 @@ static void kasan_alloca_oob_left(struct kunit *test)
106127         volatile int i = 10;
106128         char alloca_array[i];
106129 -       char *p = alloca_array - 1;
106130 +       /* See comment in kasan_global_oob. */
106131 +       char *volatile array = alloca_array;
106132 +       char *p = array - 1;
106134         /* Only generic mode instruments dynamic allocas. */
106135         KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
106136 @@ -720,7 +735,9 @@ static void kasan_alloca_oob_right(struct kunit *test)
106138         volatile int i = 10;
106139         char alloca_array[i];
106140 -       char *p = alloca_array + i;
106141 +       /* See comment in kasan_global_oob. */
106142 +       char *volatile array = alloca_array;
106143 +       char *p = array + i;
106145         /* Only generic mode instruments dynamic allocas. */
106146         KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
106147 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
106148 index 41ddc353ebb8..39ef2e314da5 100644
106149 --- a/lib/vsprintf.c
106150 +++ b/lib/vsprintf.c
106151 @@ -3135,8 +3135,6 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
106152                         switch (*fmt) {
106153                         case 'S':
106154                         case 's':
106155 -                       case 'F':
106156 -                       case 'f':
106157                         case 'x':
106158                         case 'K':
106159                         case 'e':
106160 diff --git a/lib/zstd/Makefile b/lib/zstd/Makefile
106161 index f5d778e7e5c7..19485e3cc7c9 100644
106162 --- a/lib/zstd/Makefile
106163 +++ b/lib/zstd/Makefile
106164 @@ -1,10 +1,46 @@
106165  # SPDX-License-Identifier: GPL-2.0-only
106166 +# ################################################################
106167 +# Copyright (c) Facebook, Inc.
106168 +# All rights reserved.
106170 +# This source code is licensed under both the BSD-style license (found in the
106171 +# LICENSE file in the root directory of this source tree) and the GPLv2 (found
106172 +# in the COPYING file in the root directory of this source tree).
106173 +# You may select, at your option, one of the above-listed licenses.
106174 +# ################################################################
106175  obj-$(CONFIG_ZSTD_COMPRESS) += zstd_compress.o
106176  obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd_decompress.o
106178  ccflags-y += -O3
106180 -zstd_compress-y := fse_compress.o huf_compress.o compress.o \
106181 -                  entropy_common.o fse_decompress.o zstd_common.o
106182 -zstd_decompress-y := huf_decompress.o decompress.o \
106183 -                    entropy_common.o fse_decompress.o zstd_common.o
106184 +zstd_compress-y := \
106185 +               zstd_compress_module.o \
106186 +               common/debug.o \
106187 +               common/entropy_common.o \
106188 +               common/error_private.o \
106189 +               common/fse_decompress.o \
106190 +               common/zstd_common.o \
106191 +               compress/fse_compress.o \
106192 +               compress/hist.o \
106193 +               compress/huf_compress.o \
106194 +               compress/zstd_compress.o \
106195 +               compress/zstd_compress_literals.o \
106196 +               compress/zstd_compress_sequences.o \
106197 +               compress/zstd_compress_superblock.o \
106198 +               compress/zstd_double_fast.o \
106199 +               compress/zstd_fast.o \
106200 +               compress/zstd_lazy.o \
106201 +               compress/zstd_ldm.o \
106202 +               compress/zstd_opt.o \
106204 +zstd_decompress-y := \
106205 +               zstd_decompress_module.o \
106206 +               common/debug.o \
106207 +               common/entropy_common.o \
106208 +               common/error_private.o \
106209 +               common/fse_decompress.o \
106210 +               common/zstd_common.o \
106211 +               decompress/huf_decompress.o \
106212 +               decompress/zstd_ddict.o \
106213 +               decompress/zstd_decompress.o \
106214 +               decompress/zstd_decompress_block.o \
106215 diff --git a/lib/zstd/bitstream.h b/lib/zstd/bitstream.h
106216 deleted file mode 100644
106217 index 5d6343c1a909..000000000000
106218 --- a/lib/zstd/bitstream.h
106219 +++ /dev/null
106220 @@ -1,380 +0,0 @@
106222 - * bitstream
106223 - * Part of FSE library
106224 - * header file (to include)
106225 - * Copyright (C) 2013-2016, Yann Collet.
106227 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
106229 - * Redistribution and use in source and binary forms, with or without
106230 - * modification, are permitted provided that the following conditions are
106231 - * met:
106233 - *   * Redistributions of source code must retain the above copyright
106234 - * notice, this list of conditions and the following disclaimer.
106235 - *   * Redistributions in binary form must reproduce the above
106236 - * copyright notice, this list of conditions and the following disclaimer
106237 - * in the documentation and/or other materials provided with the
106238 - * distribution.
106240 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
106241 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
106242 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
106243 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
106244 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
106245 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
106246 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
106247 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
106248 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
106249 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
106250 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
106252 - * This program is free software; you can redistribute it and/or modify it under
106253 - * the terms of the GNU General Public License version 2 as published by the
106254 - * Free Software Foundation. This program is dual-licensed; you may select
106255 - * either version 2 of the GNU General Public License ("GPL") or BSD license
106256 - * ("BSD").
106258 - * You can contact the author at :
106259 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
106260 - */
106261 -#ifndef BITSTREAM_H_MODULE
106262 -#define BITSTREAM_H_MODULE
106265 -*  This API consists of small unitary functions, which must be inlined for best performance.
106266 -*  Since link-time-optimization is not available for all compilers,
106267 -*  these functions are defined into a .h to be included.
106270 -/*-****************************************
106271 -*  Dependencies
106272 -******************************************/
106273 -#include "error_private.h" /* error codes and messages */
106274 -#include "mem.h"          /* unaligned access routines */
106276 -/*=========================================
106277 -*  Target specific
106278 -=========================================*/
106279 -#define STREAM_ACCUMULATOR_MIN_32 25
106280 -#define STREAM_ACCUMULATOR_MIN_64 57
106281 -#define STREAM_ACCUMULATOR_MIN ((U32)(ZSTD_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64))
106283 -/*-******************************************
106284 -*  bitStream encoding API (write forward)
106285 -********************************************/
106286 -/* bitStream can mix input from multiple sources.
106287 -*  A critical property of these streams is that they encode and decode in **reverse** direction.
106288 -*  So the first bit sequence you add will be the last to be read, like a LIFO stack.
106290 -typedef struct {
106291 -       size_t bitContainer;
106292 -       int bitPos;
106293 -       char *startPtr;
106294 -       char *ptr;
106295 -       char *endPtr;
106296 -} BIT_CStream_t;
106298 -ZSTD_STATIC size_t BIT_initCStream(BIT_CStream_t *bitC, void *dstBuffer, size_t dstCapacity);
106299 -ZSTD_STATIC void BIT_addBits(BIT_CStream_t *bitC, size_t value, unsigned nbBits);
106300 -ZSTD_STATIC void BIT_flushBits(BIT_CStream_t *bitC);
106301 -ZSTD_STATIC size_t BIT_closeCStream(BIT_CStream_t *bitC);
106303 -/* Start with initCStream, providing the size of buffer to write into.
106304 -*  bitStream will never write outside of this buffer.
106305 -*  `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code.
106307 -*  bits are first added to a local register.
106308 -*  Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems.
106309 -*  Writing data into memory is an explicit operation, performed by the flushBits function.
106310 -*  Hence keep track how many bits are potentially stored into local register to avoid register overflow.
106311 -*  After a flushBits, a maximum of 7 bits might still be stored into local register.
106313 -*  Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers.
106315 -*  Last operation is to close the bitStream.
106316 -*  The function returns the final size of CStream in bytes.
106317 -*  If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable)
106320 -/*-********************************************
106321 -*  bitStream decoding API (read backward)
106322 -**********************************************/
106323 -typedef struct {
106324 -       size_t bitContainer;
106325 -       unsigned bitsConsumed;
106326 -       const char *ptr;
106327 -       const char *start;
106328 -} BIT_DStream_t;
106330 -typedef enum {
106331 -       BIT_DStream_unfinished = 0,
106332 -       BIT_DStream_endOfBuffer = 1,
106333 -       BIT_DStream_completed = 2,
106334 -       BIT_DStream_overflow = 3
106335 -} BIT_DStream_status; /* result of BIT_reloadDStream() */
106336 -/* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
106338 -ZSTD_STATIC size_t BIT_initDStream(BIT_DStream_t *bitD, const void *srcBuffer, size_t srcSize);
106339 -ZSTD_STATIC size_t BIT_readBits(BIT_DStream_t *bitD, unsigned nbBits);
106340 -ZSTD_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t *bitD);
106341 -ZSTD_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t *bitD);
106343 -/* Start by invoking BIT_initDStream().
106344 -*  A chunk of the bitStream is then stored into a local register.
106345 -*  Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
106346 -*  You can then retrieve bitFields stored into the local register, **in reverse order**.
106347 -*  Local register is explicitly reloaded from memory by the BIT_reloadDStream() method.
106348 -*  A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished.
106349 -*  Otherwise, it can be less than that, so proceed accordingly.
106350 -*  Checking if DStream has reached its end can be performed with BIT_endOfDStream().
106353 -/*-****************************************
106354 -*  unsafe API
106355 -******************************************/
106356 -ZSTD_STATIC void BIT_addBitsFast(BIT_CStream_t *bitC, size_t value, unsigned nbBits);
106357 -/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */
106359 -ZSTD_STATIC void BIT_flushBitsFast(BIT_CStream_t *bitC);
106360 -/* unsafe version; does not check buffer overflow */
106362 -ZSTD_STATIC size_t BIT_readBitsFast(BIT_DStream_t *bitD, unsigned nbBits);
106363 -/* faster, but works only if nbBits >= 1 */
106365 -/*-**************************************************************
106366 -*  Internal functions
106367 -****************************************************************/
106368 -ZSTD_STATIC unsigned BIT_highbit32(register U32 val) { return 31 - __builtin_clz(val); }
106370 -/*=====    Local Constants   =====*/
106371 -static const unsigned BIT_mask[] = {0,       1,       3,       7,      0xF,      0x1F,     0x3F,     0x7F,      0xFF,
106372 -                                   0x1FF,   0x3FF,   0x7FF,   0xFFF,    0x1FFF,   0x3FFF,   0x7FFF,   0xFFFF,    0x1FFFF,
106373 -                                   0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF, 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF}; /* up to 26 bits */
106375 -/*-**************************************************************
106376 -*  bitStream encoding
106377 -****************************************************************/
106378 -/*! BIT_initCStream() :
106379 - *  `dstCapacity` must be > sizeof(void*)
106380 - *  @return : 0 if success,
106381 -                         otherwise an error code (can be tested using ERR_isError() ) */
106382 -ZSTD_STATIC size_t BIT_initCStream(BIT_CStream_t *bitC, void *startPtr, size_t dstCapacity)
106384 -       bitC->bitContainer = 0;
106385 -       bitC->bitPos = 0;
106386 -       bitC->startPtr = (char *)startPtr;
106387 -       bitC->ptr = bitC->startPtr;
106388 -       bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->ptr);
106389 -       if (dstCapacity <= sizeof(bitC->ptr))
106390 -               return ERROR(dstSize_tooSmall);
106391 -       return 0;
106394 -/*! BIT_addBits() :
106395 -       can add up to 26 bits into `bitC`.
106396 -       Does not check for register overflow ! */
106397 -ZSTD_STATIC void BIT_addBits(BIT_CStream_t *bitC, size_t value, unsigned nbBits)
106399 -       bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;
106400 -       bitC->bitPos += nbBits;
106403 -/*! BIT_addBitsFast() :
106404 - *  works only if `value` is _clean_, meaning all high bits above nbBits are 0 */
106405 -ZSTD_STATIC void BIT_addBitsFast(BIT_CStream_t *bitC, size_t value, unsigned nbBits)
106407 -       bitC->bitContainer |= value << bitC->bitPos;
106408 -       bitC->bitPos += nbBits;
106411 -/*! BIT_flushBitsFast() :
106412 - *  unsafe version; does not check buffer overflow */
106413 -ZSTD_STATIC void BIT_flushBitsFast(BIT_CStream_t *bitC)
106415 -       size_t const nbBytes = bitC->bitPos >> 3;
106416 -       ZSTD_writeLEST(bitC->ptr, bitC->bitContainer);
106417 -       bitC->ptr += nbBytes;
106418 -       bitC->bitPos &= 7;
106419 -       bitC->bitContainer >>= nbBytes * 8; /* if bitPos >= sizeof(bitContainer)*8 --> undefined behavior */
106422 -/*! BIT_flushBits() :
106423 - *  safe version; check for buffer overflow, and prevents it.
106424 - *  note : does not signal buffer overflow. This will be revealed later on using BIT_closeCStream() */
106425 -ZSTD_STATIC void BIT_flushBits(BIT_CStream_t *bitC)
106427 -       size_t const nbBytes = bitC->bitPos >> 3;
106428 -       ZSTD_writeLEST(bitC->ptr, bitC->bitContainer);
106429 -       bitC->ptr += nbBytes;
106430 -       if (bitC->ptr > bitC->endPtr)
106431 -               bitC->ptr = bitC->endPtr;
106432 -       bitC->bitPos &= 7;
106433 -       bitC->bitContainer >>= nbBytes * 8; /* if bitPos >= sizeof(bitContainer)*8 --> undefined behavior */
106436 -/*! BIT_closeCStream() :
106437 - *  @return : size of CStream, in bytes,
106438 -                         or 0 if it could not fit into dstBuffer */
106439 -ZSTD_STATIC size_t BIT_closeCStream(BIT_CStream_t *bitC)
106441 -       BIT_addBitsFast(bitC, 1, 1); /* endMark */
106442 -       BIT_flushBits(bitC);
106444 -       if (bitC->ptr >= bitC->endPtr)
106445 -               return 0; /* doesn't fit within authorized budget : cancel */
106447 -       return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
106450 -/*-********************************************************
106451 -* bitStream decoding
106452 -**********************************************************/
106453 -/*! BIT_initDStream() :
106454 -*   Initialize a BIT_DStream_t.
106455 -*   `bitD` : a pointer to an already allocated BIT_DStream_t structure.
106456 -*   `srcSize` must be the *exact* size of the bitStream, in bytes.
106457 -*   @return : size of stream (== srcSize) or an errorCode if a problem is detected
106459 -ZSTD_STATIC size_t BIT_initDStream(BIT_DStream_t *bitD, const void *srcBuffer, size_t srcSize)
106461 -       if (srcSize < 1) {
106462 -               memset(bitD, 0, sizeof(*bitD));
106463 -               return ERROR(srcSize_wrong);
106464 -       }
106466 -       if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */
106467 -               bitD->start = (const char *)srcBuffer;
106468 -               bitD->ptr = (const char *)srcBuffer + srcSize - sizeof(bitD->bitContainer);
106469 -               bitD->bitContainer = ZSTD_readLEST(bitD->ptr);
106470 -               {
106471 -                       BYTE const lastByte = ((const BYTE *)srcBuffer)[srcSize - 1];
106472 -                       bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */
106473 -                       if (lastByte == 0)
106474 -                               return ERROR(GENERIC); /* endMark not present */
106475 -               }
106476 -       } else {
106477 -               bitD->start = (const char *)srcBuffer;
106478 -               bitD->ptr = bitD->start;
106479 -               bitD->bitContainer = *(const BYTE *)(bitD->start);
106480 -               switch (srcSize) {
106481 -               case 7: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[6]) << (sizeof(bitD->bitContainer) * 8 - 16);
106482 -                       fallthrough;
106483 -               case 6: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[5]) << (sizeof(bitD->bitContainer) * 8 - 24);
106484 -                       fallthrough;
106485 -               case 5: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[4]) << (sizeof(bitD->bitContainer) * 8 - 32);
106486 -                       fallthrough;
106487 -               case 4: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[3]) << 24;
106488 -                       fallthrough;
106489 -               case 3: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[2]) << 16;
106490 -                       fallthrough;
106491 -               case 2: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[1]) << 8;
106492 -                       fallthrough;
106493 -               default:;
106494 -               }
106495 -               {
106496 -                       BYTE const lastByte = ((const BYTE *)srcBuffer)[srcSize - 1];
106497 -                       bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
106498 -                       if (lastByte == 0)
106499 -                               return ERROR(GENERIC); /* endMark not present */
106500 -               }
106501 -               bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize) * 8;
106502 -       }
106504 -       return srcSize;
106507 -ZSTD_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start) { return bitContainer >> start; }
106509 -ZSTD_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits) { return (bitContainer >> start) & BIT_mask[nbBits]; }
106511 -ZSTD_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits) { return bitContainer & BIT_mask[nbBits]; }
106513 -/*! BIT_lookBits() :
106514 - *  Provides next n bits from local register.
106515 - *  local register is not modified.
106516 - *  On 32-bits, maxNbBits==24.
106517 - *  On 64-bits, maxNbBits==56.
106518 - *  @return : value extracted
106519 - */
106520 -ZSTD_STATIC size_t BIT_lookBits(const BIT_DStream_t *bitD, U32 nbBits)
106522 -       U32 const bitMask = sizeof(bitD->bitContainer) * 8 - 1;
106523 -       return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask - nbBits) & bitMask);
106526 -/*! BIT_lookBitsFast() :
106527 -*   unsafe version; only works only if nbBits >= 1 */
106528 -ZSTD_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t *bitD, U32 nbBits)
106530 -       U32 const bitMask = sizeof(bitD->bitContainer) * 8 - 1;
106531 -       return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask + 1) - nbBits) & bitMask);
106534 -ZSTD_STATIC void BIT_skipBits(BIT_DStream_t *bitD, U32 nbBits) { bitD->bitsConsumed += nbBits; }
106536 -/*! BIT_readBits() :
106537 - *  Read (consume) next n bits from local register and update.
106538 - *  Pay attention to not read more than nbBits contained into local register.
106539 - *  @return : extracted value.
106540 - */
106541 -ZSTD_STATIC size_t BIT_readBits(BIT_DStream_t *bitD, U32 nbBits)
106543 -       size_t const value = BIT_lookBits(bitD, nbBits);
106544 -       BIT_skipBits(bitD, nbBits);
106545 -       return value;
106548 -/*! BIT_readBitsFast() :
106549 -*   unsafe version; only works only if nbBits >= 1 */
106550 -ZSTD_STATIC size_t BIT_readBitsFast(BIT_DStream_t *bitD, U32 nbBits)
106552 -       size_t const value = BIT_lookBitsFast(bitD, nbBits);
106553 -       BIT_skipBits(bitD, nbBits);
106554 -       return value;
106557 -/*! BIT_reloadDStream() :
106558 -*   Refill `bitD` from buffer previously set in BIT_initDStream() .
106559 -*   This function is safe, it guarantees it will not read beyond src buffer.
106560 -*   @return : status of `BIT_DStream_t` internal register.
106561 -                         if status == BIT_DStream_unfinished, internal register is filled with >= (sizeof(bitD->bitContainer)*8 - 7) bits */
106562 -ZSTD_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t *bitD)
106564 -       if (bitD->bitsConsumed > (sizeof(bitD->bitContainer) * 8)) /* should not happen => corruption detected */
106565 -               return BIT_DStream_overflow;
106567 -       if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) {
106568 -               bitD->ptr -= bitD->bitsConsumed >> 3;
106569 -               bitD->bitsConsumed &= 7;
106570 -               bitD->bitContainer = ZSTD_readLEST(bitD->ptr);
106571 -               return BIT_DStream_unfinished;
106572 -       }
106573 -       if (bitD->ptr == bitD->start) {
106574 -               if (bitD->bitsConsumed < sizeof(bitD->bitContainer) * 8)
106575 -                       return BIT_DStream_endOfBuffer;
106576 -               return BIT_DStream_completed;
106577 -       }
106578 -       {
106579 -               U32 nbBytes = bitD->bitsConsumed >> 3;
106580 -               BIT_DStream_status result = BIT_DStream_unfinished;
106581 -               if (bitD->ptr - nbBytes < bitD->start) {
106582 -                       nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
106583 -                       result = BIT_DStream_endOfBuffer;
106584 -               }
106585 -               bitD->ptr -= nbBytes;
106586 -               bitD->bitsConsumed -= nbBytes * 8;
106587 -               bitD->bitContainer = ZSTD_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */
106588 -               return result;
106589 -       }
106592 -/*! BIT_endOfDStream() :
106593 -*   @return Tells if DStream has exactly reached its end (all bits consumed).
106595 -ZSTD_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t *DStream)
106597 -       return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer) * 8));
106600 -#endif /* BITSTREAM_H_MODULE */
106601 diff --git a/lib/zstd/common/bitstream.h b/lib/zstd/common/bitstream.h
106602 new file mode 100644
106603 index 000000000000..2d6c95b4f40c
106604 --- /dev/null
106605 +++ b/lib/zstd/common/bitstream.h
106606 @@ -0,0 +1,437 @@
106607 +/* ******************************************************************
106608 + * bitstream
106609 + * Part of FSE library
106610 + * Copyright (c) Yann Collet, Facebook, Inc.
106612 + * You can contact the author at :
106613 + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
106615 + * This source code is licensed under both the BSD-style license (found in the
106616 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
106617 + * in the COPYING file in the root directory of this source tree).
106618 + * You may select, at your option, one of the above-listed licenses.
106619 +****************************************************************** */
106620 +#ifndef BITSTREAM_H_MODULE
106621 +#define BITSTREAM_H_MODULE
106624 +*  This API consists of small unitary functions, which must be inlined for best performance.
106625 +*  Since link-time-optimization is not available for all compilers,
106626 +*  these functions are defined into a .h to be included.
106629 +/*-****************************************
106630 +*  Dependencies
106631 +******************************************/
106632 +#include "mem.h"            /* unaligned access routines */
106633 +#include "compiler.h"       /* UNLIKELY() */
106634 +#include "debug.h"          /* assert(), DEBUGLOG(), RAWLOG() */
106635 +#include "error_private.h"  /* error codes and messages */
106638 +/*=========================================
106639 +*  Target specific
106640 +=========================================*/
106642 +#define STREAM_ACCUMULATOR_MIN_32  25
106643 +#define STREAM_ACCUMULATOR_MIN_64  57
106644 +#define STREAM_ACCUMULATOR_MIN    ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64))
106647 +/*-******************************************
106648 +*  bitStream encoding API (write forward)
106649 +********************************************/
106650 +/* bitStream can mix input from multiple sources.
106651 + * A critical property of these streams is that they encode and decode in **reverse** direction.
106652 + * So the first bit sequence you add will be the last to be read, like a LIFO stack.
106653 + */
106654 +typedef struct {
106655 +    size_t bitContainer;
106656 +    unsigned bitPos;
106657 +    char*  startPtr;
106658 +    char*  ptr;
106659 +    char*  endPtr;
106660 +} BIT_CStream_t;
106662 +MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity);
106663 +MEM_STATIC void   BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
106664 +MEM_STATIC void   BIT_flushBits(BIT_CStream_t* bitC);
106665 +MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC);
106667 +/* Start with initCStream, providing the size of buffer to write into.
106668 +*  bitStream will never write outside of this buffer.
106669 +*  `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code.
106671 +*  bits are first added to a local register.
106672 +*  Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems.
106673 +*  Writing data into memory is an explicit operation, performed by the flushBits function.
106674 +*  Hence keep track how many bits are potentially stored into local register to avoid register overflow.
106675 +*  After a flushBits, a maximum of 7 bits might still be stored into local register.
106677 +*  Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers.
106679 +*  Last operation is to close the bitStream.
106680 +*  The function returns the final size of CStream in bytes.
106681 +*  If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable)
106685 +/*-********************************************
106686 +*  bitStream decoding API (read backward)
106687 +**********************************************/
106688 +typedef struct {
106689 +    size_t   bitContainer;
106690 +    unsigned bitsConsumed;
106691 +    const char* ptr;
106692 +    const char* start;
106693 +    const char* limitPtr;
106694 +} BIT_DStream_t;
106696 +typedef enum { BIT_DStream_unfinished = 0,
106697 +               BIT_DStream_endOfBuffer = 1,
106698 +               BIT_DStream_completed = 2,
106699 +               BIT_DStream_overflow = 3 } BIT_DStream_status;  /* result of BIT_reloadDStream() */
106700 +               /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
106702 +MEM_STATIC size_t   BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
106703 +MEM_STATIC size_t   BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
106704 +MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
106705 +MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
106708 +/* Start by invoking BIT_initDStream().
106709 +*  A chunk of the bitStream is then stored into a local register.
106710 +*  Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
106711 +*  You can then retrieve bitFields stored into the local register, **in reverse order**.
106712 +*  Local register is explicitly reloaded from memory by the BIT_reloadDStream() method.
106713 +*  A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished.
106714 +*  Otherwise, it can be less than that, so proceed accordingly.
106715 +*  Checking if DStream has reached its end can be performed with BIT_endOfDStream().
106719 +/*-****************************************
106720 +*  unsafe API
106721 +******************************************/
106722 +MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
106723 +/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */
106725 +MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC);
106726 +/* unsafe version; does not check buffer overflow */
106728 +MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
106729 +/* faster, but works only if nbBits >= 1 */
106733 +/*-**************************************************************
106734 +*  Internal functions
106735 +****************************************************************/
106736 +MEM_STATIC unsigned BIT_highbit32 (U32 val)
106738 +    assert(val != 0);
106739 +    {
106740 +#   if (__GNUC__ >= 3)   /* Use GCC Intrinsic */
106741 +        return __builtin_clz (val) ^ 31;
106742 +#   else   /* Software version */
106743 +        static const unsigned DeBruijnClz[32] = { 0,  9,  1, 10, 13, 21,  2, 29,
106744 +                                                 11, 14, 16, 18, 22, 25,  3, 30,
106745 +                                                  8, 12, 20, 28, 15, 17, 24,  7,
106746 +                                                 19, 27, 23,  6, 26,  5,  4, 31 };
106747 +        U32 v = val;
106748 +        v |= v >> 1;
106749 +        v |= v >> 2;
106750 +        v |= v >> 4;
106751 +        v |= v >> 8;
106752 +        v |= v >> 16;
106753 +        return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
106754 +#   endif
106755 +    }
106758 +/*=====    Local Constants   =====*/
106759 +static const unsigned BIT_mask[] = {
106760 +    0,          1,         3,         7,         0xF,       0x1F,
106761 +    0x3F,       0x7F,      0xFF,      0x1FF,     0x3FF,     0x7FF,
106762 +    0xFFF,      0x1FFF,    0x3FFF,    0x7FFF,    0xFFFF,    0x1FFFF,
106763 +    0x3FFFF,    0x7FFFF,   0xFFFFF,   0x1FFFFF,  0x3FFFFF,  0x7FFFFF,
106764 +    0xFFFFFF,   0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF,
106765 +    0x3FFFFFFF, 0x7FFFFFFF}; /* up to 31 bits */
106766 +#define BIT_MASK_SIZE (sizeof(BIT_mask) / sizeof(BIT_mask[0]))
106768 +/*-**************************************************************
106769 +*  bitStream encoding
106770 +****************************************************************/
106771 +/*! BIT_initCStream() :
106772 + *  `dstCapacity` must be > sizeof(size_t)
106773 + *  @return : 0 if success,
106774 + *            otherwise an error code (can be tested using ERR_isError()) */
106775 +MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
106776 +                                  void* startPtr, size_t dstCapacity)
106778 +    bitC->bitContainer = 0;
106779 +    bitC->bitPos = 0;
106780 +    bitC->startPtr = (char*)startPtr;
106781 +    bitC->ptr = bitC->startPtr;
106782 +    bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer);
106783 +    if (dstCapacity <= sizeof(bitC->bitContainer)) return ERROR(dstSize_tooSmall);
106784 +    return 0;
106787 +/*! BIT_addBits() :
106788 + *  can add up to 31 bits into `bitC`.
106789 + *  Note : does not check for register overflow ! */
106790 +MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
106791 +                            size_t value, unsigned nbBits)
106793 +    DEBUG_STATIC_ASSERT(BIT_MASK_SIZE == 32);
106794 +    assert(nbBits < BIT_MASK_SIZE);
106795 +    assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
106796 +    bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;
106797 +    bitC->bitPos += nbBits;
106800 +/*! BIT_addBitsFast() :
106801 + *  works only if `value` is _clean_,
106802 + *  meaning all high bits above nbBits are 0 */
106803 +MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC,
106804 +                                size_t value, unsigned nbBits)
106806 +    assert((value>>nbBits) == 0);
106807 +    assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
106808 +    bitC->bitContainer |= value << bitC->bitPos;
106809 +    bitC->bitPos += nbBits;
106812 +/*! BIT_flushBitsFast() :
106813 + *  assumption : bitContainer has not overflowed
106814 + *  unsafe version; does not check buffer overflow */
106815 +MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC)
106817 +    size_t const nbBytes = bitC->bitPos >> 3;
106818 +    assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
106819 +    assert(bitC->ptr <= bitC->endPtr);
106820 +    MEM_writeLEST(bitC->ptr, bitC->bitContainer);
106821 +    bitC->ptr += nbBytes;
106822 +    bitC->bitPos &= 7;
106823 +    bitC->bitContainer >>= nbBytes*8;
106826 +/*! BIT_flushBits() :
106827 + *  assumption : bitContainer has not overflowed
106828 + *  safe version; check for buffer overflow, and prevents it.
106829 + *  note : does not signal buffer overflow.
106830 + *  overflow will be revealed later on using BIT_closeCStream() */
106831 +MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC)
106833 +    size_t const nbBytes = bitC->bitPos >> 3;
106834 +    assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
106835 +    assert(bitC->ptr <= bitC->endPtr);
106836 +    MEM_writeLEST(bitC->ptr, bitC->bitContainer);
106837 +    bitC->ptr += nbBytes;
106838 +    if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
106839 +    bitC->bitPos &= 7;
106840 +    bitC->bitContainer >>= nbBytes*8;
106843 +/*! BIT_closeCStream() :
106844 + *  @return : size of CStream, in bytes,
106845 + *            or 0 if it could not fit into dstBuffer */
106846 +MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC)
106848 +    BIT_addBitsFast(bitC, 1, 1);   /* endMark */
106849 +    BIT_flushBits(bitC);
106850 +    if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
106851 +    return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
106855 +/*-********************************************************
106856 +*  bitStream decoding
106857 +**********************************************************/
106858 +/*! BIT_initDStream() :
106859 + *  Initialize a BIT_DStream_t.
106860 + * `bitD` : a pointer to an already allocated BIT_DStream_t structure.
106861 + * `srcSize` must be the *exact* size of the bitStream, in bytes.
106862 + * @return : size of stream (== srcSize), or an errorCode if a problem is detected
106863 + */
106864 +MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
106866 +    if (srcSize < 1) { ZSTD_memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
106868 +    bitD->start = (const char*)srcBuffer;
106869 +    bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer);
106871 +    if (srcSize >=  sizeof(bitD->bitContainer)) {  /* normal case */
106872 +        bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
106873 +        bitD->bitContainer = MEM_readLEST(bitD->ptr);
106874 +        { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
106875 +          bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;  /* ensures bitsConsumed is always set */
106876 +          if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
106877 +    } else {
106878 +        bitD->ptr   = bitD->start;
106879 +        bitD->bitContainer = *(const BYTE*)(bitD->start);
106880 +        switch(srcSize)
106881 +        {
106882 +        case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
106883 +                /* fall-through */
106885 +        case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
106886 +                /* fall-through */
106888 +        case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
106889 +                /* fall-through */
106891 +        case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
106892 +                /* fall-through */
106894 +        case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
106895 +                /* fall-through */
106897 +        case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) <<  8;
106898 +                /* fall-through */
106900 +        default: break;
106901 +        }
106902 +        {   BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
106903 +            bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
106904 +            if (lastByte == 0) return ERROR(corruption_detected);  /* endMark not present */
106905 +        }
106906 +        bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
106907 +    }
106909 +    return srcSize;
106912 +MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getUpperBits(size_t bitContainer, U32 const start)
106914 +    return bitContainer >> start;
106917 +MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits)
106919 +    U32 const regMask = sizeof(bitContainer)*8 - 1;
106920 +    /* if start > regMask, bitstream is corrupted, and result is undefined */
106921 +    assert(nbBits < BIT_MASK_SIZE);
106922 +    return (bitContainer >> (start & regMask)) & BIT_mask[nbBits];
106925 +MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
106927 +    assert(nbBits < BIT_MASK_SIZE);
106928 +    return bitContainer & BIT_mask[nbBits];
106931 +/*! BIT_lookBits() :
106932 + *  Provides next n bits from local register.
106933 + *  local register is not modified.
106934 + *  On 32-bits, maxNbBits==24.
106935 + *  On 64-bits, maxNbBits==56.
106936 + * @return : value extracted */
106937 +MEM_STATIC  FORCE_INLINE_ATTR size_t BIT_lookBits(const BIT_DStream_t*  bitD, U32 nbBits)
106939 +    /* arbitrate between double-shift and shift+mask */
106940 +#if 1
106941 +    /* if bitD->bitsConsumed + nbBits > sizeof(bitD->bitContainer)*8,
106942 +     * bitstream is likely corrupted, and result is undefined */
106943 +    return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits);
106944 +#else
106945 +    /* this code path is slower on my os-x laptop */
106946 +    U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
106947 +    return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask);
106948 +#endif
106951 +/*! BIT_lookBitsFast() :
106952 + *  unsafe version; only works if nbBits >= 1 */
106953 +MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits)
106955 +    U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
106956 +    assert(nbBits >= 1);
106957 +    return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask);
106960 +MEM_STATIC FORCE_INLINE_ATTR void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
106962 +    bitD->bitsConsumed += nbBits;
106965 +/*! BIT_readBits() :
106966 + *  Read (consume) next n bits from local register and update.
106967 + *  Pay attention to not read more than nbBits contained into local register.
106968 + * @return : extracted value. */
106969 +MEM_STATIC FORCE_INLINE_ATTR size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits)
106971 +    size_t const value = BIT_lookBits(bitD, nbBits);
106972 +    BIT_skipBits(bitD, nbBits);
106973 +    return value;
106976 +/*! BIT_readBitsFast() :
106977 + *  unsafe version; only works only if nbBits >= 1 */
106978 +MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits)
106980 +    size_t const value = BIT_lookBitsFast(bitD, nbBits);
106981 +    assert(nbBits >= 1);
106982 +    BIT_skipBits(bitD, nbBits);
106983 +    return value;
106986 +/*! BIT_reloadDStreamFast() :
106987 + *  Similar to BIT_reloadDStream(), but with two differences:
106988 + *  1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold!
106989 + *  2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this
106990 + *     point you must use BIT_reloadDStream() to reload.
106991 + */
106992 +MEM_STATIC BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD)
106994 +    if (UNLIKELY(bitD->ptr < bitD->limitPtr))
106995 +        return BIT_DStream_overflow;
106996 +    assert(bitD->bitsConsumed <= sizeof(bitD->bitContainer)*8);
106997 +    bitD->ptr -= bitD->bitsConsumed >> 3;
106998 +    bitD->bitsConsumed &= 7;
106999 +    bitD->bitContainer = MEM_readLEST(bitD->ptr);
107000 +    return BIT_DStream_unfinished;
107003 +/*! BIT_reloadDStream() :
107004 + *  Refill `bitD` from buffer previously set in BIT_initDStream() .
107005 + *  This function is safe, it guarantees it will not read beyond src buffer.
107006 + * @return : status of `BIT_DStream_t` internal register.
107007 + *           when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
107008 +MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
107010 +    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* overflow detected, like end of stream */
107011 +        return BIT_DStream_overflow;
107013 +    if (bitD->ptr >= bitD->limitPtr) {
107014 +        return BIT_reloadDStreamFast(bitD);
107015 +    }
107016 +    if (bitD->ptr == bitD->start) {
107017 +        if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
107018 +        return BIT_DStream_completed;
107019 +    }
107020 +    /* start < ptr < limitPtr */
107021 +    {   U32 nbBytes = bitD->bitsConsumed >> 3;
107022 +        BIT_DStream_status result = BIT_DStream_unfinished;
107023 +        if (bitD->ptr - nbBytes < bitD->start) {
107024 +            nbBytes = (U32)(bitD->ptr - bitD->start);  /* ptr > start */
107025 +            result = BIT_DStream_endOfBuffer;
107026 +        }
107027 +        bitD->ptr -= nbBytes;
107028 +        bitD->bitsConsumed -= nbBytes*8;
107029 +        bitD->bitContainer = MEM_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */
107030 +        return result;
107031 +    }
107034 +/*! BIT_endOfDStream() :
107035 + * @return : 1 if DStream has _exactly_ reached its end (all bits consumed).
107036 + */
107037 +MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
107039 +    return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
107043 +#endif /* BITSTREAM_H_MODULE */
107044 diff --git a/lib/zstd/common/compiler.h b/lib/zstd/common/compiler.h
107045 new file mode 100644
107046 index 000000000000..9269b58a93e2
107047 --- /dev/null
107048 +++ b/lib/zstd/common/compiler.h
107049 @@ -0,0 +1,151 @@
107051 + * Copyright (c) Yann Collet, Facebook, Inc.
107052 + * All rights reserved.
107054 + * This source code is licensed under both the BSD-style license (found in the
107055 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
107056 + * in the COPYING file in the root directory of this source tree).
107057 + * You may select, at your option, one of the above-listed licenses.
107058 + */
107060 +#ifndef ZSTD_COMPILER_H
107061 +#define ZSTD_COMPILER_H
107063 +/*-*******************************************************
107064 +*  Compiler specifics
107065 +*********************************************************/
107066 +/* force inlining */
107068 +#if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
107069 +#  define INLINE_KEYWORD inline
107070 +#else
107071 +#  define INLINE_KEYWORD
107072 +#endif
107074 +#define FORCE_INLINE_ATTR __attribute__((always_inline))
107078 +  On MSVC qsort requires that functions passed into it use the __cdecl calling conversion(CC).
107079 +  This explictly marks such functions as __cdecl so that the code will still compile
107080 +  if a CC other than __cdecl has been made the default.
107082 +#define WIN_CDECL
107085 + * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
107086 + * parameters. They must be inlined for the compiler to eliminate the constant
107087 + * branches.
107088 + */
107089 +#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
107091 + * HINT_INLINE is used to help the compiler generate better code. It is *not*
107092 + * used for "templates", so it can be tweaked based on the compilers
107093 + * performance.
107095 + * gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the
107096 + * always_inline attribute.
107098 + * clang up to 5.0.0 (trunk) benefit tremendously from the always_inline
107099 + * attribute.
107100 + */
107101 +#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5
107102 +#  define HINT_INLINE static INLINE_KEYWORD
107103 +#else
107104 +#  define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR
107105 +#endif
107107 +/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */
107108 +#define UNUSED_ATTR __attribute__((unused))
107110 +/* force no inlining */
107111 +#define FORCE_NOINLINE static __attribute__((__noinline__))
107114 +/* target attribute */
107115 +#ifndef __has_attribute
107116 +  #define __has_attribute(x) 0  /* Compatibility with non-clang compilers. */
107117 +#endif
107118 +#define TARGET_ATTRIBUTE(target) __attribute__((__target__(target)))
107120 +/* Enable runtime BMI2 dispatch based on the CPU.
107121 + * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default.
107122 + */
107123 +#ifndef DYNAMIC_BMI2
107124 +  #if ((defined(__clang__) && __has_attribute(__target__)) \
107125 +      || (defined(__GNUC__) \
107126 +          && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \
107127 +      && (defined(__x86_64__) || defined(_M_X86)) \
107128 +      && !defined(__BMI2__)
107129 +  #  define DYNAMIC_BMI2 1
107130 +  #else
107131 +  #  define DYNAMIC_BMI2 0
107132 +  #endif
107133 +#endif
107135 +/* prefetch
107136 + * can be disabled, by declaring NO_PREFETCH build macro */
107137 +#if ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
107138 +#  define PREFETCH_L1(ptr)  __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
107139 +#  define PREFETCH_L2(ptr)  __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */)
107140 +#elif defined(__aarch64__)
107141 +#  define PREFETCH_L1(ptr)  __asm__ __volatile__("prfm pldl1keep, %0" ::"Q"(*(ptr)))
107142 +#  define PREFETCH_L2(ptr)  __asm__ __volatile__("prfm pldl2keep, %0" ::"Q"(*(ptr)))
107143 +#else
107144 +#  define PREFETCH_L1(ptr) (void)(ptr)  /* disabled */
107145 +#  define PREFETCH_L2(ptr) (void)(ptr)  /* disabled */
107146 +#endif  /* NO_PREFETCH */
107148 +#define CACHELINE_SIZE 64
107150 +#define PREFETCH_AREA(p, s)  {            \
107151 +    const char* const _ptr = (const char*)(p);  \
107152 +    size_t const _size = (size_t)(s);     \
107153 +    size_t _pos;                          \
107154 +    for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) {  \
107155 +        PREFETCH_L2(_ptr + _pos);         \
107156 +    }                                     \
107159 +/* vectorization
107160 + * older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax */
107161 +#if !defined(__INTEL_COMPILER) && !defined(__clang__) && defined(__GNUC__)
107162 +#  if (__GNUC__ == 4 && __GNUC_MINOR__ > 3) || (__GNUC__ >= 5)
107163 +#    define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
107164 +#  else
107165 +#    define DONT_VECTORIZE _Pragma("GCC optimize(\"no-tree-vectorize\")")
107166 +#  endif
107167 +#else
107168 +#  define DONT_VECTORIZE
107169 +#endif
107171 +/* Tell the compiler that a branch is likely or unlikely.
107172 + * Only use these macros if it causes the compiler to generate better code.
107173 + * If you can remove a LIKELY/UNLIKELY annotation without speed changes in gcc
107174 + * and clang, please do.
107175 + */
107176 +#define LIKELY(x) (__builtin_expect((x), 1))
107177 +#define UNLIKELY(x) (__builtin_expect((x), 0))
107179 +/* disable warnings */
107181 +/*Like DYNAMIC_BMI2 but for compile time determination of BMI2 support*/
107184 +/* compat. with non-clang compilers */
107185 +#ifndef __has_builtin
107186 +#  define __has_builtin(x) 0
107187 +#endif
107189 +/* compat. with non-clang compilers */
107190 +#ifndef __has_feature
107191 +#  define __has_feature(x) 0
107192 +#endif
107194 +/* detects whether we are being compiled under msan */
107197 +/* detects whether we are being compiled under asan */
107200 +#endif /* ZSTD_COMPILER_H */
107201 diff --git a/lib/zstd/common/cpu.h b/lib/zstd/common/cpu.h
107202 new file mode 100644
107203 index 000000000000..0202d94076a3
107204 --- /dev/null
107205 +++ b/lib/zstd/common/cpu.h
107206 @@ -0,0 +1,194 @@
107208 + * Copyright (c) Facebook, Inc.
107209 + * All rights reserved.
107211 + * This source code is licensed under both the BSD-style license (found in the
107212 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
107213 + * in the COPYING file in the root directory of this source tree).
107214 + * You may select, at your option, one of the above-listed licenses.
107215 + */
107217 +#ifndef ZSTD_COMMON_CPU_H
107218 +#define ZSTD_COMMON_CPU_H
107221 + * Implementation taken from folly/CpuId.h
107222 + * https://github.com/facebook/folly/blob/master/folly/CpuId.h
107223 + */
107225 +#include "mem.h"
107228 +typedef struct {
107229 +    U32 f1c;
107230 +    U32 f1d;
107231 +    U32 f7b;
107232 +    U32 f7c;
107233 +} ZSTD_cpuid_t;
107235 +MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) {
107236 +    U32 f1c = 0;
107237 +    U32 f1d = 0;
107238 +    U32 f7b = 0;
107239 +    U32 f7c = 0;
107240 +#if defined(__i386__) && defined(__PIC__) && !defined(__clang__) && defined(__GNUC__)
107241 +    /* The following block like the normal cpuid branch below, but gcc
107242 +     * reserves ebx for use of its pic register so we must specially
107243 +     * handle the save and restore to avoid clobbering the register
107244 +     */
107245 +    U32 n;
107246 +    __asm__(
107247 +        "pushl %%ebx\n\t"
107248 +        "cpuid\n\t"
107249 +        "popl %%ebx\n\t"
107250 +        : "=a"(n)
107251 +        : "a"(0)
107252 +        : "ecx", "edx");
107253 +    if (n >= 1) {
107254 +      U32 f1a;
107255 +      __asm__(
107256 +          "pushl %%ebx\n\t"
107257 +          "cpuid\n\t"
107258 +          "popl %%ebx\n\t"
107259 +          : "=a"(f1a), "=c"(f1c), "=d"(f1d)
107260 +          : "a"(1));
107261 +    }
107262 +    if (n >= 7) {
107263 +      __asm__(
107264 +          "pushl %%ebx\n\t"
107265 +          "cpuid\n\t"
107266 +          "movl %%ebx, %%eax\n\t"
107267 +          "popl %%ebx"
107268 +          : "=a"(f7b), "=c"(f7c)
107269 +          : "a"(7), "c"(0)
107270 +          : "edx");
107271 +    }
107272 +#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386__)
107273 +    U32 n;
107274 +    __asm__("cpuid" : "=a"(n) : "a"(0) : "ebx", "ecx", "edx");
107275 +    if (n >= 1) {
107276 +      U32 f1a;
107277 +      __asm__("cpuid" : "=a"(f1a), "=c"(f1c), "=d"(f1d) : "a"(1) : "ebx");
107278 +    }
107279 +    if (n >= 7) {
107280 +      U32 f7a;
107281 +      __asm__("cpuid"
107282 +              : "=a"(f7a), "=b"(f7b), "=c"(f7c)
107283 +              : "a"(7), "c"(0)
107284 +              : "edx");
107285 +    }
107286 +#endif
107287 +    {
107288 +        ZSTD_cpuid_t cpuid;
107289 +        cpuid.f1c = f1c;
107290 +        cpuid.f1d = f1d;
107291 +        cpuid.f7b = f7b;
107292 +        cpuid.f7c = f7c;
107293 +        return cpuid;
107294 +    }
107297 +#define X(name, r, bit)                                                        \
107298 +  MEM_STATIC int ZSTD_cpuid_##name(ZSTD_cpuid_t const cpuid) {                 \
107299 +    return ((cpuid.r) & (1U << bit)) != 0;                                     \
107300 +  }
107302 +/* cpuid(1): Processor Info and Feature Bits. */
107303 +#define C(name, bit) X(name, f1c, bit)
107304 +  C(sse3, 0)
107305 +  C(pclmuldq, 1)
107306 +  C(dtes64, 2)
107307 +  C(monitor, 3)
107308 +  C(dscpl, 4)
107309 +  C(vmx, 5)
107310 +  C(smx, 6)
107311 +  C(eist, 7)
107312 +  C(tm2, 8)
107313 +  C(ssse3, 9)
107314 +  C(cnxtid, 10)
107315 +  C(fma, 12)
107316 +  C(cx16, 13)
107317 +  C(xtpr, 14)
107318 +  C(pdcm, 15)
107319 +  C(pcid, 17)
107320 +  C(dca, 18)
107321 +  C(sse41, 19)
107322 +  C(sse42, 20)
107323 +  C(x2apic, 21)
107324 +  C(movbe, 22)
107325 +  C(popcnt, 23)
107326 +  C(tscdeadline, 24)
107327 +  C(aes, 25)
107328 +  C(xsave, 26)
107329 +  C(osxsave, 27)
107330 +  C(avx, 28)
107331 +  C(f16c, 29)
107332 +  C(rdrand, 30)
107333 +#undef C
107334 +#define D(name, bit) X(name, f1d, bit)
107335 +  D(fpu, 0)
107336 +  D(vme, 1)
107337 +  D(de, 2)
107338 +  D(pse, 3)
107339 +  D(tsc, 4)
107340 +  D(msr, 5)
107341 +  D(pae, 6)
107342 +  D(mce, 7)
107343 +  D(cx8, 8)
107344 +  D(apic, 9)
107345 +  D(sep, 11)
107346 +  D(mtrr, 12)
107347 +  D(pge, 13)
107348 +  D(mca, 14)
107349 +  D(cmov, 15)
107350 +  D(pat, 16)
107351 +  D(pse36, 17)
107352 +  D(psn, 18)
107353 +  D(clfsh, 19)
107354 +  D(ds, 21)
107355 +  D(acpi, 22)
107356 +  D(mmx, 23)
107357 +  D(fxsr, 24)
107358 +  D(sse, 25)
107359 +  D(sse2, 26)
107360 +  D(ss, 27)
107361 +  D(htt, 28)
107362 +  D(tm, 29)
107363 +  D(pbe, 31)
107364 +#undef D
107366 +/* cpuid(7): Extended Features. */
107367 +#define B(name, bit) X(name, f7b, bit)
107368 +  B(bmi1, 3)
107369 +  B(hle, 4)
107370 +  B(avx2, 5)
107371 +  B(smep, 7)
107372 +  B(bmi2, 8)
107373 +  B(erms, 9)
107374 +  B(invpcid, 10)
107375 +  B(rtm, 11)
107376 +  B(mpx, 14)
107377 +  B(avx512f, 16)
107378 +  B(avx512dq, 17)
107379 +  B(rdseed, 18)
107380 +  B(adx, 19)
107381 +  B(smap, 20)
107382 +  B(avx512ifma, 21)
107383 +  B(pcommit, 22)
107384 +  B(clflushopt, 23)
107385 +  B(clwb, 24)
107386 +  B(avx512pf, 26)
107387 +  B(avx512er, 27)
107388 +  B(avx512cd, 28)
107389 +  B(sha, 29)
107390 +  B(avx512bw, 30)
107391 +  B(avx512vl, 31)
107392 +#undef B
107393 +#define C(name, bit) X(name, f7c, bit)
107394 +  C(prefetchwt1, 0)
107395 +  C(avx512vbmi, 1)
107396 +#undef C
107398 +#undef X
107400 +#endif /* ZSTD_COMMON_CPU_H */
107401 diff --git a/lib/zstd/common/debug.c b/lib/zstd/common/debug.c
107402 new file mode 100644
107403 index 000000000000..bb863c9ea616
107404 --- /dev/null
107405 +++ b/lib/zstd/common/debug.c
107406 @@ -0,0 +1,24 @@
107407 +/* ******************************************************************
107408 + * debug
107409 + * Part of FSE library
107410 + * Copyright (c) Yann Collet, Facebook, Inc.
107412 + * You can contact the author at :
107413 + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
107415 + * This source code is licensed under both the BSD-style license (found in the
107416 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
107417 + * in the COPYING file in the root directory of this source tree).
107418 + * You may select, at your option, one of the above-listed licenses.
107419 +****************************************************************** */
107423 + * This module only hosts one global variable
107424 + * which can be used to dynamically influence the verbosity of traces,
107425 + * such as DEBUGLOG and RAWLOG
107426 + */
107428 +#include "debug.h"
107430 +int g_debuglevel = DEBUGLEVEL;
107431 diff --git a/lib/zstd/common/debug.h b/lib/zstd/common/debug.h
107432 new file mode 100644
107433 index 000000000000..6dd88d1fbd02
107434 --- /dev/null
107435 +++ b/lib/zstd/common/debug.h
107436 @@ -0,0 +1,101 @@
107437 +/* ******************************************************************
107438 + * debug
107439 + * Part of FSE library
107440 + * Copyright (c) Yann Collet, Facebook, Inc.
107442 + * You can contact the author at :
107443 + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
107445 + * This source code is licensed under both the BSD-style license (found in the
107446 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
107447 + * in the COPYING file in the root directory of this source tree).
107448 + * You may select, at your option, one of the above-listed licenses.
107449 +****************************************************************** */
107453 + * The purpose of this header is to enable debug functions.
107454 + * They regroup assert(), DEBUGLOG() and RAWLOG() for run-time,
107455 + * and DEBUG_STATIC_ASSERT() for compile-time.
107457 + * By default, DEBUGLEVEL==0, which means run-time debug is disabled.
107459 + * Level 1 enables assert() only.
107460 + * Starting level 2, traces can be generated and pushed to stderr.
107461 + * The higher the level, the more verbose the traces.
107463 + * It's possible to dynamically adjust level using variable g_debug_level,
107464 + * which is only declared if DEBUGLEVEL>=2,
107465 + * and is a global variable, not multi-thread protected (use with care)
107466 + */
107468 +#ifndef DEBUG_H_12987983217
107469 +#define DEBUG_H_12987983217
107473 +/* static assert is triggered at compile time, leaving no runtime artefact.
107474 + * static assert only works with compile-time constants.
107475 + * Also, this variant can only be used inside a function. */
107476 +#define DEBUG_STATIC_ASSERT(c) (void)sizeof(char[(c) ? 1 : -1])
107479 +/* DEBUGLEVEL is expected to be defined externally,
107480 + * typically through compiler command line.
107481 + * Value must be a number. */
107482 +#ifndef DEBUGLEVEL
107483 +#  define DEBUGLEVEL 0
107484 +#endif
107487 +/* recommended values for DEBUGLEVEL :
107488 + * 0 : release mode, no debug, all run-time checks disabled
107489 + * 1 : enables assert() only, no display
107490 + * 2 : reserved, for currently active debug path
107491 + * 3 : events once per object lifetime (CCtx, CDict, etc.)
107492 + * 4 : events once per frame
107493 + * 5 : events once per block
107494 + * 6 : events once per sequence (verbose)
107495 + * 7+: events at every position (*very* verbose)
107497 + * It's generally inconvenient to output traces > 5.
107498 + * In which case, it's possible to selectively trigger high verbosity levels
107499 + * by modifying g_debug_level.
107500 + */
107502 +#if (DEBUGLEVEL>=1)
107503 +#  define ZSTD_DEPS_NEED_ASSERT
107504 +#  include "zstd_deps.h"
107505 +#else
107506 +#  ifndef assert   /* assert may be already defined, due to prior #include <assert.h> */
107507 +#    define assert(condition) ((void)0)   /* disable assert (default) */
107508 +#  endif
107509 +#endif
107511 +#if (DEBUGLEVEL>=2)
107512 +#  define ZSTD_DEPS_NEED_IO
107513 +#  include "zstd_deps.h"
107514 +extern int g_debuglevel; /* the variable is only declared,
107515 +                            it actually lives in debug.c,
107516 +                            and is shared by the whole process.
107517 +                            It's not thread-safe.
107518 +                            It's useful when enabling very verbose levels
107519 +                            on selective conditions (such as position in src) */
107521 +#  define RAWLOG(l, ...) {                                       \
107522 +                if (l<=g_debuglevel) {                           \
107523 +                    ZSTD_DEBUG_PRINT(__VA_ARGS__);               \
107524 +            }   }
107525 +#  define DEBUGLOG(l, ...) {                                     \
107526 +                if (l<=g_debuglevel) {                           \
107527 +                    ZSTD_DEBUG_PRINT(__FILE__ ": " __VA_ARGS__); \
107528 +                    ZSTD_DEBUG_PRINT(" \n");                     \
107529 +            }   }
107530 +#else
107531 +#  define RAWLOG(l, ...)      {}    /* disabled */
107532 +#  define DEBUGLOG(l, ...)    {}    /* disabled */
107533 +#endif
107537 +#endif /* DEBUG_H_12987983217 */
107538 diff --git a/lib/zstd/common/entropy_common.c b/lib/zstd/common/entropy_common.c
107539 new file mode 100644
107540 index 000000000000..53b47a2b52ff
107541 --- /dev/null
107542 +++ b/lib/zstd/common/entropy_common.c
107543 @@ -0,0 +1,357 @@
107544 +/* ******************************************************************
107545 + * Common functions of New Generation Entropy library
107546 + * Copyright (c) Yann Collet, Facebook, Inc.
107548 + *  You can contact the author at :
107549 + *  - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
107550 + *  - Public forum : https://groups.google.com/forum/#!forum/lz4c
107552 + * This source code is licensed under both the BSD-style license (found in the
107553 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
107554 + * in the COPYING file in the root directory of this source tree).
107555 + * You may select, at your option, one of the above-listed licenses.
107556 +****************************************************************** */
107558 +/* *************************************
107559 +*  Dependencies
107560 +***************************************/
107561 +#include "mem.h"
107562 +#include "error_private.h"       /* ERR_*, ERROR */
107563 +#define FSE_STATIC_LINKING_ONLY  /* FSE_MIN_TABLELOG */
107564 +#include "fse.h"
107565 +#define HUF_STATIC_LINKING_ONLY  /* HUF_TABLELOG_ABSOLUTEMAX */
107566 +#include "huf.h"
107569 +/*===   Version   ===*/
107570 +unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
107573 +/*===   Error Management   ===*/
107574 +unsigned FSE_isError(size_t code) { return ERR_isError(code); }
107575 +const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); }
107577 +unsigned HUF_isError(size_t code) { return ERR_isError(code); }
107578 +const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); }
107581 +/*-**************************************************************
107582 +*  FSE NCount encoding-decoding
107583 +****************************************************************/
107584 +static U32 FSE_ctz(U32 val)
107586 +    assert(val != 0);
107587 +    {
107588 +#   if (__GNUC__ >= 3)   /* GCC Intrinsic */
107589 +        return __builtin_ctz(val);
107590 +#   else   /* Software version */
107591 +        U32 count = 0;
107592 +        while ((val & 1) == 0) {
107593 +            val >>= 1;
107594 +            ++count;
107595 +        }
107596 +        return count;
107597 +#   endif
107598 +    }
107601 +FORCE_INLINE_TEMPLATE
107602 +size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
107603 +                           const void* headerBuffer, size_t hbSize)
107605 +    const BYTE* const istart = (const BYTE*) headerBuffer;
107606 +    const BYTE* const iend = istart + hbSize;
107607 +    const BYTE* ip = istart;
107608 +    int nbBits;
107609 +    int remaining;
107610 +    int threshold;
107611 +    U32 bitStream;
107612 +    int bitCount;
107613 +    unsigned charnum = 0;
107614 +    unsigned const maxSV1 = *maxSVPtr + 1;
107615 +    int previous0 = 0;
107617 +    if (hbSize < 8) {
107618 +        /* This function only works when hbSize >= 8 */
107619 +        char buffer[8] = {0};
107620 +        ZSTD_memcpy(buffer, headerBuffer, hbSize);
107621 +        {   size_t const countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr,
107622 +                                                    buffer, sizeof(buffer));
107623 +            if (FSE_isError(countSize)) return countSize;
107624 +            if (countSize > hbSize) return ERROR(corruption_detected);
107625 +            return countSize;
107626 +    }   }
107627 +    assert(hbSize >= 8);
107629 +    /* init */
107630 +    ZSTD_memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0]));   /* all symbols not present in NCount have a frequency of 0 */
107631 +    bitStream = MEM_readLE32(ip);
107632 +    nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG;   /* extract tableLog */
107633 +    if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
107634 +    bitStream >>= 4;
107635 +    bitCount = 4;
107636 +    *tableLogPtr = nbBits;
107637 +    remaining = (1<<nbBits)+1;
107638 +    threshold = 1<<nbBits;
107639 +    nbBits++;
107641 +    for (;;) {
107642 +        if (previous0) {
107643 +            /* Count the number of repeats. Each time the
107644 +             * 2-bit repeat code is 0b11 there is another
107645 +             * repeat.
107646 +             * Avoid UB by setting the high bit to 1.
107647 +             */
107648 +            int repeats = FSE_ctz(~bitStream | 0x80000000) >> 1;
107649 +            while (repeats >= 12) {
107650 +                charnum += 3 * 12;
107651 +                if (LIKELY(ip <= iend-7)) {
107652 +                    ip += 3;
107653 +                } else {
107654 +                    bitCount -= (int)(8 * (iend - 7 - ip));
107655 +                    bitCount &= 31;
107656 +                    ip = iend - 4;
107657 +                }
107658 +                bitStream = MEM_readLE32(ip) >> bitCount;
107659 +                repeats = FSE_ctz(~bitStream | 0x80000000) >> 1;
107660 +            }
107661 +            charnum += 3 * repeats;
107662 +            bitStream >>= 2 * repeats;
107663 +            bitCount += 2 * repeats;
107665 +            /* Add the final repeat which isn't 0b11. */
107666 +            assert((bitStream & 3) < 3);
107667 +            charnum += bitStream & 3;
107668 +            bitCount += 2;
107670 +            /* This is an error, but break and return an error
107671 +             * at the end, because returning out of a loop makes
107672 +             * it harder for the compiler to optimize.
107673 +             */
107674 +            if (charnum >= maxSV1) break;
107676 +            /* We don't need to set the normalized count to 0
107677 +             * because we already memset the whole buffer to 0.
107678 +             */
107680 +            if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
107681 +                assert((bitCount >> 3) <= 3); /* For first condition to work */
107682 +                ip += bitCount>>3;
107683 +                bitCount &= 7;
107684 +            } else {
107685 +                bitCount -= (int)(8 * (iend - 4 - ip));
107686 +                bitCount &= 31;
107687 +                ip = iend - 4;
107688 +            }
107689 +            bitStream = MEM_readLE32(ip) >> bitCount;
107690 +        }
107691 +        {
107692 +            int const max = (2*threshold-1) - remaining;
107693 +            int count;
107695 +            if ((bitStream & (threshold-1)) < (U32)max) {
107696 +                count = bitStream & (threshold-1);
107697 +                bitCount += nbBits-1;
107698 +            } else {
107699 +                count = bitStream & (2*threshold-1);
107700 +                if (count >= threshold) count -= max;
107701 +                bitCount += nbBits;
107702 +            }
107704 +            count--;   /* extra accuracy */
107705 +            /* When it matters (small blocks), this is a
107706 +             * predictable branch, because we don't use -1.
107707 +             */
107708 +            if (count >= 0) {
107709 +                remaining -= count;
107710 +            } else {
107711 +                assert(count == -1);
107712 +                remaining += count;
107713 +            }
107714 +            normalizedCounter[charnum++] = (short)count;
107715 +            previous0 = !count;
107717 +            assert(threshold > 1);
107718 +            if (remaining < threshold) {
107719 +                /* This branch can be folded into the
107720 +                 * threshold update condition because we
107721 +                 * know that threshold > 1.
107722 +                 */
107723 +                if (remaining <= 1) break;
107724 +                nbBits = BIT_highbit32(remaining) + 1;
107725 +                threshold = 1 << (nbBits - 1);
107726 +            }
107727 +            if (charnum >= maxSV1) break;
107729 +            if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
107730 +                ip += bitCount>>3;
107731 +                bitCount &= 7;
107732 +            } else {
107733 +                bitCount -= (int)(8 * (iend - 4 - ip));
107734 +                bitCount &= 31;
107735 +                ip = iend - 4;
107736 +            }
107737 +            bitStream = MEM_readLE32(ip) >> bitCount;
107738 +    }   }
107739 +    if (remaining != 1) return ERROR(corruption_detected);
107740 +    /* Only possible when there are too many zeros. */
107741 +    if (charnum > maxSV1) return ERROR(maxSymbolValue_tooSmall);
107742 +    if (bitCount > 32) return ERROR(corruption_detected);
107743 +    *maxSVPtr = charnum-1;
107745 +    ip += (bitCount+7)>>3;
107746 +    return ip-istart;
107749 +/* Avoids the FORCE_INLINE of the _body() function. */
107750 +static size_t FSE_readNCount_body_default(
107751 +        short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
107752 +        const void* headerBuffer, size_t hbSize)
107754 +    return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
107757 +#if DYNAMIC_BMI2
107758 +TARGET_ATTRIBUTE("bmi2") static size_t FSE_readNCount_body_bmi2(
107759 +        short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
107760 +        const void* headerBuffer, size_t hbSize)
107762 +    return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
107764 +#endif
107766 +size_t FSE_readNCount_bmi2(
107767 +        short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
107768 +        const void* headerBuffer, size_t hbSize, int bmi2)
107770 +#if DYNAMIC_BMI2
107771 +    if (bmi2) {
107772 +        return FSE_readNCount_body_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
107773 +    }
107774 +#endif
107775 +    (void)bmi2;
107776 +    return FSE_readNCount_body_default(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
107779 +size_t FSE_readNCount(
107780 +        short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
107781 +        const void* headerBuffer, size_t hbSize)
107783 +    return FSE_readNCount_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize, /* bmi2 */ 0);
107787 +/*! HUF_readStats() :
107788 +    Read compact Huffman tree, saved by HUF_writeCTable().
107789 +    `huffWeight` is destination buffer.
107790 +    `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
107791 +    @return : size read from `src` , or an error Code .
107792 +    Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
107794 +size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
107795 +                     U32* nbSymbolsPtr, U32* tableLogPtr,
107796 +                     const void* src, size_t srcSize)
107798 +    U32 wksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
107799 +    return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* bmi2 */ 0);
107802 +FORCE_INLINE_TEMPLATE size_t
107803 +HUF_readStats_body(BYTE* huffWeight, size_t hwSize, U32* rankStats,
107804 +                   U32* nbSymbolsPtr, U32* tableLogPtr,
107805 +                   const void* src, size_t srcSize,
107806 +                   void* workSpace, size_t wkspSize,
107807 +                   int bmi2)
107809 +    U32 weightTotal;
107810 +    const BYTE* ip = (const BYTE*) src;
107811 +    size_t iSize;
107812 +    size_t oSize;
107814 +    if (!srcSize) return ERROR(srcSize_wrong);
107815 +    iSize = ip[0];
107816 +    /* ZSTD_memset(huffWeight, 0, hwSize);   *//* is not necessary, even though some analyzer complain ... */
107818 +    if (iSize >= 128) {  /* special header */
107819 +        oSize = iSize - 127;
107820 +        iSize = ((oSize+1)/2);
107821 +        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
107822 +        if (oSize >= hwSize) return ERROR(corruption_detected);
107823 +        ip += 1;
107824 +        {   U32 n;
107825 +            for (n=0; n<oSize; n+=2) {
107826 +                huffWeight[n]   = ip[n/2] >> 4;
107827 +                huffWeight[n+1] = ip[n/2] & 15;
107828 +    }   }   }
107829 +    else  {   /* header compressed with FSE (normal case) */
107830 +        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
107831 +        /* max (hwSize-1) values decoded, as last one is implied */
107832 +        oSize = FSE_decompress_wksp_bmi2(huffWeight, hwSize-1, ip+1, iSize, 6, workSpace, wkspSize, bmi2);
107833 +        if (FSE_isError(oSize)) return oSize;
107834 +    }
107836 +    /* collect weight stats */
107837 +    ZSTD_memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
107838 +    weightTotal = 0;
107839 +    {   U32 n; for (n=0; n<oSize; n++) {
107840 +            if (huffWeight[n] >= HUF_TABLELOG_MAX) return ERROR(corruption_detected);
107841 +            rankStats[huffWeight[n]]++;
107842 +            weightTotal += (1 << huffWeight[n]) >> 1;
107843 +    }   }
107844 +    if (weightTotal == 0) return ERROR(corruption_detected);
107846 +    /* get last non-null symbol weight (implied, total must be 2^n) */
107847 +    {   U32 const tableLog = BIT_highbit32(weightTotal) + 1;
107848 +        if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
107849 +        *tableLogPtr = tableLog;
107850 +        /* determine last weight */
107851 +        {   U32 const total = 1 << tableLog;
107852 +            U32 const rest = total - weightTotal;
107853 +            U32 const verif = 1 << BIT_highbit32(rest);
107854 +            U32 const lastWeight = BIT_highbit32(rest) + 1;
107855 +            if (verif != rest) return ERROR(corruption_detected);    /* last value must be a clean power of 2 */
107856 +            huffWeight[oSize] = (BYTE)lastWeight;
107857 +            rankStats[lastWeight]++;
107858 +    }   }
107860 +    /* check tree construction validity */
107861 +    if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected);   /* by construction : at least 2 elts of rank 1, must be even */
107863 +    /* results */
107864 +    *nbSymbolsPtr = (U32)(oSize+1);
107865 +    return iSize+1;
107868 +/* Avoids the FORCE_INLINE of the _body() function. */
107869 +static size_t HUF_readStats_body_default(BYTE* huffWeight, size_t hwSize, U32* rankStats,
107870 +                     U32* nbSymbolsPtr, U32* tableLogPtr,
107871 +                     const void* src, size_t srcSize,
107872 +                     void* workSpace, size_t wkspSize)
107874 +    return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 0);
107877 +#if DYNAMIC_BMI2
107878 +static TARGET_ATTRIBUTE("bmi2") size_t HUF_readStats_body_bmi2(BYTE* huffWeight, size_t hwSize, U32* rankStats,
107879 +                     U32* nbSymbolsPtr, U32* tableLogPtr,
107880 +                     const void* src, size_t srcSize,
107881 +                     void* workSpace, size_t wkspSize)
107883 +    return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 1);
107885 +#endif
107887 +size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, U32* rankStats,
107888 +                     U32* nbSymbolsPtr, U32* tableLogPtr,
107889 +                     const void* src, size_t srcSize,
107890 +                     void* workSpace, size_t wkspSize,
107891 +                     int bmi2)
107893 +#if DYNAMIC_BMI2
107894 +    if (bmi2) {
107895 +        return HUF_readStats_body_bmi2(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
107896 +    }
107897 +#endif
107898 +    (void)bmi2;
107899 +    return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
107901 diff --git a/lib/zstd/common/error_private.c b/lib/zstd/common/error_private.c
107902 new file mode 100644
107903 index 000000000000..6d1135f8c373
107904 --- /dev/null
107905 +++ b/lib/zstd/common/error_private.c
107906 @@ -0,0 +1,56 @@
107908 + * Copyright (c) Yann Collet, Facebook, Inc.
107909 + * All rights reserved.
107911 + * This source code is licensed under both the BSD-style license (found in the
107912 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
107913 + * in the COPYING file in the root directory of this source tree).
107914 + * You may select, at your option, one of the above-listed licenses.
107915 + */
107917 +/* The purpose of this file is to have a single list of error strings embedded in binary */
107919 +#include "error_private.h"
107921 +const char* ERR_getErrorString(ERR_enum code)
107923 +#ifdef ZSTD_STRIP_ERROR_STRINGS
107924 +    (void)code;
107925 +    return "Error strings stripped";
107926 +#else
107927 +    static const char* const notErrorCode = "Unspecified error code";
107928 +    switch( code )
107929 +    {
107930 +    case PREFIX(no_error): return "No error detected";
107931 +    case PREFIX(GENERIC):  return "Error (generic)";
107932 +    case PREFIX(prefix_unknown): return "Unknown frame descriptor";
107933 +    case PREFIX(version_unsupported): return "Version not supported";
107934 +    case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter";
107935 +    case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding";
107936 +    case PREFIX(corruption_detected): return "Corrupted block detected";
107937 +    case PREFIX(checksum_wrong): return "Restored data doesn't match checksum";
107938 +    case PREFIX(parameter_unsupported): return "Unsupported parameter";
107939 +    case PREFIX(parameter_outOfBound): return "Parameter is out of bound";
107940 +    case PREFIX(init_missing): return "Context should be init first";
107941 +    case PREFIX(memory_allocation): return "Allocation error : not enough memory";
107942 +    case PREFIX(workSpace_tooSmall): return "workSpace buffer is not large enough";
107943 +    case PREFIX(stage_wrong): return "Operation not authorized at current processing stage";
107944 +    case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported";
107945 +    case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large";
107946 +    case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small";
107947 +    case PREFIX(dictionary_corrupted): return "Dictionary is corrupted";
107948 +    case PREFIX(dictionary_wrong): return "Dictionary mismatch";
107949 +    case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples";
107950 +    case PREFIX(dstSize_tooSmall): return "Destination buffer is too small";
107951 +    case PREFIX(srcSize_wrong): return "Src size is incorrect";
107952 +    case PREFIX(dstBuffer_null): return "Operation on NULL destination buffer";
107953 +        /* following error codes are not stable and may be removed or changed in a future version */
107954 +    case PREFIX(frameIndex_tooLarge): return "Frame index is too large";
107955 +    case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking";
107956 +    case PREFIX(dstBuffer_wrong): return "Destination buffer is wrong";
107957 +    case PREFIX(srcBuffer_wrong): return "Source buffer is wrong";
107958 +    case PREFIX(maxCode):
107959 +    default: return notErrorCode;
107960 +    }
107961 +#endif
107963 diff --git a/lib/zstd/common/error_private.h b/lib/zstd/common/error_private.h
107964 new file mode 100644
107965 index 000000000000..d14e686adf95
107966 --- /dev/null
107967 +++ b/lib/zstd/common/error_private.h
107968 @@ -0,0 +1,66 @@
107970 + * Copyright (c) Yann Collet, Facebook, Inc.
107971 + * All rights reserved.
107973 + * This source code is licensed under both the BSD-style license (found in the
107974 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
107975 + * in the COPYING file in the root directory of this source tree).
107976 + * You may select, at your option, one of the above-listed licenses.
107977 + */
107979 +/* Note : this module is expected to remain private, do not expose it */
107981 +#ifndef ERROR_H_MODULE
107982 +#define ERROR_H_MODULE
107986 +/* ****************************************
107987 +*  Dependencies
107988 +******************************************/
107989 +#include "zstd_deps.h"    /* size_t */
107990 +#include <linux/zstd_errors.h>  /* enum list */
107993 +/* ****************************************
107994 +*  Compiler-specific
107995 +******************************************/
107996 +#define ERR_STATIC static __attribute__((unused))
107999 +/*-****************************************
108000 +*  Customization (error_public.h)
108001 +******************************************/
108002 +typedef ZSTD_ErrorCode ERR_enum;
108003 +#define PREFIX(name) ZSTD_error_##name
108006 +/*-****************************************
108007 +*  Error codes handling
108008 +******************************************/
108009 +#undef ERROR   /* already defined on Visual Studio */
108010 +#define ERROR(name) ZSTD_ERROR(name)
108011 +#define ZSTD_ERROR(name) ((size_t)-PREFIX(name))
108013 +ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
108015 +ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); }
108017 +/* check and forward error code */
108018 +#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
108019 +#define CHECK_F(f)   { CHECK_V_F(_var_err__, f); }
108022 +/*-****************************************
108023 +*  Error Strings
108024 +******************************************/
108026 +const char* ERR_getErrorString(ERR_enum code);   /* error_private.c */
108028 +ERR_STATIC const char* ERR_getErrorName(size_t code)
108030 +    return ERR_getErrorString(ERR_getErrorCode(code));
108034 +#endif /* ERROR_H_MODULE */
108035 diff --git a/lib/zstd/common/fse.h b/lib/zstd/common/fse.h
108036 new file mode 100644
108037 index 000000000000..477e642ffb41
108038 --- /dev/null
108039 +++ b/lib/zstd/common/fse.h
108040 @@ -0,0 +1,708 @@
108041 +/* ******************************************************************
108042 + * FSE : Finite State Entropy codec
108043 + * Public Prototypes declaration
108044 + * Copyright (c) Yann Collet, Facebook, Inc.
108046 + * You can contact the author at :
108047 + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
108049 + * This source code is licensed under both the BSD-style license (found in the
108050 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
108051 + * in the COPYING file in the root directory of this source tree).
108052 + * You may select, at your option, one of the above-listed licenses.
108053 +****************************************************************** */
108056 +#ifndef FSE_H
108057 +#define FSE_H
108060 +/*-*****************************************
108061 +*  Dependencies
108062 +******************************************/
108063 +#include "zstd_deps.h"    /* size_t, ptrdiff_t */
108066 +/*-*****************************************
108067 +*  FSE_PUBLIC_API : control library symbols visibility
108068 +******************************************/
108069 +#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
108070 +#  define FSE_PUBLIC_API __attribute__ ((visibility ("default")))
108071 +#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1)   /* Visual expected */
108072 +#  define FSE_PUBLIC_API __declspec(dllexport)
108073 +#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
108074 +#  define FSE_PUBLIC_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
108075 +#else
108076 +#  define FSE_PUBLIC_API
108077 +#endif
108079 +/*------   Version   ------*/
108080 +#define FSE_VERSION_MAJOR    0
108081 +#define FSE_VERSION_MINOR    9
108082 +#define FSE_VERSION_RELEASE  0
108084 +#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE
108085 +#define FSE_QUOTE(str) #str
108086 +#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str)
108087 +#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION)
108089 +#define FSE_VERSION_NUMBER  (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE)
108090 +FSE_PUBLIC_API unsigned FSE_versionNumber(void);   /**< library version number; to be used when checking dll version */
108093 +/*-****************************************
108094 +*  FSE simple functions
108095 +******************************************/
108096 +/*! FSE_compress() :
108097 +    Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'.
108098 +    'dst' buffer must be already allocated. Compression runs faster is dstCapacity >= FSE_compressBound(srcSize).
108099 +    @return : size of compressed data (<= dstCapacity).
108100 +    Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
108101 +                     if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead.
108102 +                     if FSE_isError(return), compression failed (more details using FSE_getErrorName())
108104 +FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity,
108105 +                             const void* src, size_t srcSize);
108107 +/*! FSE_decompress():
108108 +    Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
108109 +    into already allocated destination buffer 'dst', of size 'dstCapacity'.
108110 +    @return : size of regenerated data (<= maxDstSize),
108111 +              or an error code, which can be tested using FSE_isError() .
108113 +    ** Important ** : FSE_decompress() does not decompress non-compressible nor RLE data !!!
108114 +    Why ? : making this distinction requires a header.
108115 +    Header management is intentionally delegated to the user layer, which can better manage special cases.
108117 +FSE_PUBLIC_API size_t FSE_decompress(void* dst,  size_t dstCapacity,
108118 +                               const void* cSrc, size_t cSrcSize);
108121 +/*-*****************************************
108122 +*  Tool functions
108123 +******************************************/
108124 +FSE_PUBLIC_API size_t FSE_compressBound(size_t size);       /* maximum compressed size */
108126 +/* Error Management */
108127 +FSE_PUBLIC_API unsigned    FSE_isError(size_t code);        /* tells if a return value is an error code */
108128 +FSE_PUBLIC_API const char* FSE_getErrorName(size_t code);   /* provides error code string (useful for debugging) */
108131 +/*-*****************************************
108132 +*  FSE advanced functions
108133 +******************************************/
108134 +/*! FSE_compress2() :
108135 +    Same as FSE_compress(), but allows the selection of 'maxSymbolValue' and 'tableLog'
108136 +    Both parameters can be defined as '0' to mean : use default value
108137 +    @return : size of compressed data
108138 +    Special values : if return == 0, srcData is not compressible => Nothing is stored within cSrc !!!
108139 +                     if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression.
108140 +                     if FSE_isError(return), it's an error code.
108142 +FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
108145 +/*-*****************************************
108146 +*  FSE detailed API
108147 +******************************************/
108149 +FSE_compress() does the following:
108150 +1. count symbol occurrence from source[] into table count[] (see hist.h)
108151 +2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
108152 +3. save normalized counters to memory buffer using writeNCount()
108153 +4. build encoding table 'CTable' from normalized counters
108154 +5. encode the data stream using encoding table 'CTable'
108156 +FSE_decompress() does the following:
108157 +1. read normalized counters with readNCount()
108158 +2. build decoding table 'DTable' from normalized counters
108159 +3. decode the data stream using decoding table 'DTable'
108161 +The following API allows targeting specific sub-functions for advanced tasks.
108162 +For example, it's possible to compress several blocks using the same 'CTable',
108163 +or to save and provide normalized distribution using external method.
108166 +/* *** COMPRESSION *** */
108168 +/*! FSE_optimalTableLog():
108169 +    dynamically downsize 'tableLog' when conditions are met.
108170 +    It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
108171 +    @return : recommended tableLog (necessarily <= 'maxTableLog') */
108172 +FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
108174 +/*! FSE_normalizeCount():
108175 +    normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
108176 +    'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
108177 +    useLowProbCount is a boolean parameter which trades off compressed size for
108178 +    faster header decoding. When it is set to 1, the compressed data will be slightly
108179 +    smaller. And when it is set to 0, FSE_readNCount() and FSE_buildDTable() will be
108180 +    faster. If you are compressing a small amount of data (< 2 KB) then useLowProbCount=0
108181 +    is a good default, since header deserialization makes a big speed difference.
108182 +    Otherwise, useLowProbCount=1 is a good default, since the speed difference is small.
108183 +    @return : tableLog,
108184 +              or an errorCode, which can be tested using FSE_isError() */
108185 +FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog,
108186 +                    const unsigned* count, size_t srcSize, unsigned maxSymbolValue, unsigned useLowProbCount);
108188 +/*! FSE_NCountWriteBound():
108189 +    Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
108190 +    Typically useful for allocation purpose. */
108191 +FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog);
108193 +/*! FSE_writeNCount():
108194 +    Compactly save 'normalizedCounter' into 'buffer'.
108195 +    @return : size of the compressed table,
108196 +              or an errorCode, which can be tested using FSE_isError(). */
108197 +FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize,
108198 +                                 const short* normalizedCounter,
108199 +                                 unsigned maxSymbolValue, unsigned tableLog);
108201 +/*! Constructor and Destructor of FSE_CTable.
108202 +    Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
108203 +typedef unsigned FSE_CTable;   /* don't allocate that. It's only meant to be more restrictive than void* */
108204 +FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog);
108205 +FSE_PUBLIC_API void        FSE_freeCTable (FSE_CTable* ct);
108207 +/*! FSE_buildCTable():
108208 +    Builds `ct`, which must be already allocated, using FSE_createCTable().
108209 +    @return : 0, or an errorCode, which can be tested using FSE_isError() */
108210 +FSE_PUBLIC_API size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
108212 +/*! FSE_compress_usingCTable():
108213 +    Compress `src` using `ct` into `dst` which must be already allocated.
108214 +    @return : size of compressed data (<= `dstCapacity`),
108215 +              or 0 if compressed data could not fit into `dst`,
108216 +              or an errorCode, which can be tested using FSE_isError() */
108217 +FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct);
108220 +Tutorial :
108221 +----------
108222 +The first step is to count all symbols. FSE_count() does this job very fast.
108223 +Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells.
108224 +'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0]
108225 +maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value)
108226 +FSE_count() will return the number of occurrence of the most frequent symbol.
108227 +This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility.
108228 +If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
108230 +The next step is to normalize the frequencies.
108231 +FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'.
108232 +It also guarantees a minimum of 1 to any Symbol with frequency >= 1.
108233 +You can use 'tableLog'==0 to mean "use default tableLog value".
108234 +If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(),
108235 +which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default").
108237 +The result of FSE_normalizeCount() will be saved into a table,
108238 +called 'normalizedCounter', which is a table of signed short.
108239 +'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells.
108240 +The return value is tableLog if everything proceeded as expected.
108241 +It is 0 if there is a single symbol within distribution.
108242 +If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()).
108244 +'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount().
108245 +'buffer' must be already allocated.
108246 +For guaranteed success, buffer size must be at least FSE_headerBound().
108247 +The result of the function is the number of bytes written into 'buffer'.
108248 +If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small).
108250 +'normalizedCounter' can then be used to create the compression table 'CTable'.
108251 +The space required by 'CTable' must be already allocated, using FSE_createCTable().
108252 +You can then use FSE_buildCTable() to fill 'CTable'.
108253 +If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()).
108255 +'CTable' can then be used to compress 'src', with FSE_compress_usingCTable().
108256 +Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize'
108257 +The function returns the size of compressed data (without header), necessarily <= `dstCapacity`.
108258 +If it returns '0', compressed data could not fit into 'dst'.
108259 +If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
108263 +/* *** DECOMPRESSION *** */
108265 +/*! FSE_readNCount():
108266 +    Read compactly saved 'normalizedCounter' from 'rBuffer'.
108267 +    @return : size read from 'rBuffer',
108268 +              or an errorCode, which can be tested using FSE_isError().
108269 +              maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
108270 +FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter,
108271 +                           unsigned* maxSymbolValuePtr, unsigned* tableLogPtr,
108272 +                           const void* rBuffer, size_t rBuffSize);
108274 +/*! FSE_readNCount_bmi2():
108275 + * Same as FSE_readNCount() but pass bmi2=1 when your CPU supports BMI2 and 0 otherwise.
108276 + */
108277 +FSE_PUBLIC_API size_t FSE_readNCount_bmi2(short* normalizedCounter,
108278 +                           unsigned* maxSymbolValuePtr, unsigned* tableLogPtr,
108279 +                           const void* rBuffer, size_t rBuffSize, int bmi2);
108281 +/*! Constructor and Destructor of FSE_DTable.
108282 +    Note that its size depends on 'tableLog' */
108283 +typedef unsigned FSE_DTable;   /* don't allocate that. It's just a way to be more restrictive than void* */
108284 +FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog);
108285 +FSE_PUBLIC_API void        FSE_freeDTable(FSE_DTable* dt);
108287 +/*! FSE_buildDTable():
108288 +    Builds 'dt', which must be already allocated, using FSE_createDTable().
108289 +    return : 0, or an errorCode, which can be tested using FSE_isError() */
108290 +FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
108292 +/*! FSE_decompress_usingDTable():
108293 +    Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
108294 +    into `dst` which must be already allocated.
108295 +    @return : size of regenerated data (necessarily <= `dstCapacity`),
108296 +              or an errorCode, which can be tested using FSE_isError() */
108297 +FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt);
108300 +Tutorial :
108301 +----------
108302 +(Note : these functions only decompress FSE-compressed blocks.
108303 + If block is uncompressed, use memcpy() instead
108304 + If block is a single repeated byte, use memset() instead )
108306 +The first step is to obtain the normalized frequencies of symbols.
108307 +This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().
108308 +'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
108309 +In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
108310 +or size the table to handle worst case situations (typically 256).
108311 +FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
108312 +The result of FSE_readNCount() is the number of bytes read from 'rBuffer'.
108313 +Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
108314 +If there is an error, the function will return an error code, which can be tested using FSE_isError().
108316 +The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.
108317 +This is performed by the function FSE_buildDTable().
108318 +The space required by 'FSE_DTable' must be already allocated using FSE_createDTable().
108319 +If there is an error, the function will return an error code, which can be tested using FSE_isError().
108321 +`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable().
108322 +`cSrcSize` must be strictly correct, otherwise decompression will fail.
108323 +FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
108324 +If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
108327 +#endif  /* FSE_H */
108329 +#if !defined(FSE_H_FSE_STATIC_LINKING_ONLY)
108330 +#define FSE_H_FSE_STATIC_LINKING_ONLY
108332 +/* *** Dependency *** */
108333 +#include "bitstream.h"
108336 +/* *****************************************
108337 +*  Static allocation
108338 +*******************************************/
108339 +/* FSE buffer bounds */
108340 +#define FSE_NCOUNTBOUND 512
108341 +#define FSE_BLOCKBOUND(size) ((size) + ((size)>>7) + 4 /* fse states */ + sizeof(size_t) /* bitContainer */)
108342 +#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
108344 +/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */
108345 +#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue)   (1 + (1<<((maxTableLog)-1)) + (((maxSymbolValue)+1)*2))
108346 +#define FSE_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<<(maxTableLog)))
108348 +/* or use the size to malloc() space directly. Pay attention to alignment restrictions though */
108349 +#define FSE_CTABLE_SIZE(maxTableLog, maxSymbolValue)   (FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(FSE_CTable))
108350 +#define FSE_DTABLE_SIZE(maxTableLog)                   (FSE_DTABLE_SIZE_U32(maxTableLog) * sizeof(FSE_DTable))
108353 +/* *****************************************
108354 + *  FSE advanced API
108355 + ***************************************** */
108357 +unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
108358 +/**< same as FSE_optimalTableLog(), which used `minus==2` */
108360 +/* FSE_compress_wksp() :
108361 + * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
108362 + * FSE_COMPRESS_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable.
108363 + */
108364 +#define FSE_COMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue)   ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) )
108365 +size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
108367 +size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits);
108368 +/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */
108370 +size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue);
108371 +/**< build a fake FSE_CTable, designed to compress always the same symbolValue */
108373 +/* FSE_buildCTable_wksp() :
108374 + * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
108375 + * `wkspSize` must be >= `FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog)` of `unsigned`.
108376 + */
108377 +#define FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog) (maxSymbolValue + 2 + (1ull << (tableLog - 2)))
108378 +#define FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) (sizeof(unsigned) * FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog))
108379 +size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
108381 +#define FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) (sizeof(short) * (maxSymbolValue + 1) + (1ULL << maxTableLog) + 8)
108382 +#define FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ((FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) + sizeof(unsigned) - 1) / sizeof(unsigned))
108383 +FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
108384 +/**< Same as FSE_buildDTable(), using an externally allocated `workspace` produced with `FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxSymbolValue)` */
108386 +size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
108387 +/**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */
108389 +size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
108390 +/**< build a fake FSE_DTable, designed to always generate the same symbolValue */
108392 +#define FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) (FSE_DTABLE_SIZE_U32(maxTableLog) + FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) + (FSE_MAX_SYMBOL_VALUE + 1) / 2 + 1)
108393 +#define FSE_DECOMPRESS_WKSP_SIZE(maxTableLog, maxSymbolValue) (FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(unsigned))
108394 +size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize);
108395 +/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DECOMPRESS_WKSP_SIZE_U32(maxLog, maxSymbolValue)` */
108397 +size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2);
108398 +/**< Same as FSE_decompress_wksp() but with dynamic BMI2 support. Pass 1 if your CPU supports BMI2 or 0 if it doesn't. */
108400 +typedef enum {
108401 +   FSE_repeat_none,  /**< Cannot use the previous table */
108402 +   FSE_repeat_check, /**< Can use the previous table but it must be checked */
108403 +   FSE_repeat_valid  /**< Can use the previous table and it is assumed to be valid */
108404 + } FSE_repeat;
108406 +/* *****************************************
108407 +*  FSE symbol compression API
108408 +*******************************************/
108410 +   This API consists of small unitary functions, which highly benefit from being inlined.
108411 +   Hence their body are included in next section.
108413 +typedef struct {
108414 +    ptrdiff_t   value;
108415 +    const void* stateTable;
108416 +    const void* symbolTT;
108417 +    unsigned    stateLog;
108418 +} FSE_CState_t;
108420 +static void FSE_initCState(FSE_CState_t* CStatePtr, const FSE_CTable* ct);
108422 +static void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* CStatePtr, unsigned symbol);
108424 +static void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* CStatePtr);
108426 +/**<
108427 +These functions are inner components of FSE_compress_usingCTable().
108428 +They allow the creation of custom streams, mixing multiple tables and bit sources.
108430 +A key property to keep in mind is that encoding and decoding are done **in reverse direction**.
108431 +So the first symbol you will encode is the last you will decode, like a LIFO stack.
108433 +You will need a few variables to track your CStream. They are :
108435 +FSE_CTable    ct;         // Provided by FSE_buildCTable()
108436 +BIT_CStream_t bitStream;  // bitStream tracking structure
108437 +FSE_CState_t  state;      // State tracking structure (can have several)
108440 +The first thing to do is to init bitStream and state.
108441 +    size_t errorCode = BIT_initCStream(&bitStream, dstBuffer, maxDstSize);
108442 +    FSE_initCState(&state, ct);
108444 +Note that BIT_initCStream() can produce an error code, so its result should be tested, using FSE_isError();
108445 +You can then encode your input data, byte after byte.
108446 +FSE_encodeSymbol() outputs a maximum of 'tableLog' bits at a time.
108447 +Remember decoding will be done in reverse direction.
108448 +    FSE_encodeByte(&bitStream, &state, symbol);
108450 +At any time, you can also add any bit sequence.
108451 +Note : maximum allowed nbBits is 25, for compatibility with 32-bits decoders
108452 +    BIT_addBits(&bitStream, bitField, nbBits);
108454 +The above methods don't commit data to memory, they just store it into local register, for speed.
108455 +Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
108456 +Writing data to memory is a manual operation, performed by the flushBits function.
108457 +    BIT_flushBits(&bitStream);
108459 +Your last FSE encoding operation shall be to flush your last state value(s).
108460 +    FSE_flushState(&bitStream, &state);
108462 +Finally, you must close the bitStream.
108463 +The function returns the size of CStream in bytes.
108464 +If data couldn't fit into dstBuffer, it will return a 0 ( == not compressible)
108465 +If there is an error, it returns an errorCode (which can be tested using FSE_isError()).
108466 +    size_t size = BIT_closeCStream(&bitStream);
108470 +/* *****************************************
108471 +*  FSE symbol decompression API
108472 +*******************************************/
108473 +typedef struct {
108474 +    size_t      state;
108475 +    const void* table;   /* precise table may vary, depending on U16 */
108476 +} FSE_DState_t;
108479 +static void     FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
108481 +static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
108483 +static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
108485 +/**<
108486 +Let's now decompose FSE_decompress_usingDTable() into its unitary components.
108487 +You will decode FSE-encoded symbols from the bitStream,
108488 +and also any other bitFields you put in, **in reverse order**.
108490 +You will need a few variables to track your bitStream. They are :
108492 +BIT_DStream_t DStream;    // Stream context
108493 +FSE_DState_t  DState;     // State context. Multiple ones are possible
108494 +FSE_DTable*   DTablePtr;  // Decoding table, provided by FSE_buildDTable()
108496 +The first thing to do is to init the bitStream.
108497 +    errorCode = BIT_initDStream(&DStream, srcBuffer, srcSize);
108499 +You should then retrieve your initial state(s)
108500 +(in reverse flushing order if you have several ones) :
108501 +    errorCode = FSE_initDState(&DState, &DStream, DTablePtr);
108503 +You can then decode your data, symbol after symbol.
108504 +For information the maximum number of bits read by FSE_decodeSymbol() is 'tableLog'.
108505 +Keep in mind that symbols are decoded in reverse order, like a LIFO stack (last in, first out).
108506 +    unsigned char symbol = FSE_decodeSymbol(&DState, &DStream);
108508 +You can retrieve any bitfield you eventually stored into the bitStream (in reverse order)
108509 +Note : maximum allowed nbBits is 25, for 32-bits compatibility
108510 +    size_t bitField = BIT_readBits(&DStream, nbBits);
108512 +All above operations only read from local register (which size depends on size_t).
108513 +Refueling the register from memory is manually performed by the reload method.
108514 +    endSignal = FSE_reloadDStream(&DStream);
108516 +BIT_reloadDStream() result tells if there is still some more data to read from DStream.
108517 +BIT_DStream_unfinished : there is still some data left into the DStream.
108518 +BIT_DStream_endOfBuffer : Dstream reached end of buffer. Its container may no longer be completely filled.
108519 +BIT_DStream_completed : Dstream reached its exact end, corresponding in general to decompression completed.
108520 +BIT_DStream_tooFar : Dstream went too far. Decompression result is corrupted.
108522 +When reaching end of buffer (BIT_DStream_endOfBuffer), progress slowly, notably if you decode multiple symbols per loop,
108523 +to properly detect the exact end of stream.
108524 +After each decoded symbol, check if DStream is fully consumed using this simple test :
108525 +    BIT_reloadDStream(&DStream) >= BIT_DStream_completed
108527 +When it's done, verify decompression is fully completed, by checking both DStream and the relevant states.
108528 +Checking if DStream has reached its end is performed by :
108529 +    BIT_endOfDStream(&DStream);
108530 +Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible.
108531 +    FSE_endOfDState(&DState);
108535 +/* *****************************************
108536 +*  FSE unsafe API
108537 +*******************************************/
108538 +static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
108539 +/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
108542 +/* *****************************************
108543 +*  Implementation of inlined functions
108544 +*******************************************/
108545 +typedef struct {
108546 +    int deltaFindState;
108547 +    U32 deltaNbBits;
108548 +} FSE_symbolCompressionTransform; /* total 8 bytes */
108550 +MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct)
108552 +    const void* ptr = ct;
108553 +    const U16* u16ptr = (const U16*) ptr;
108554 +    const U32 tableLog = MEM_read16(ptr);
108555 +    statePtr->value = (ptrdiff_t)1<<tableLog;
108556 +    statePtr->stateTable = u16ptr+2;
108557 +    statePtr->symbolTT = ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1);
108558 +    statePtr->stateLog = tableLog;
108562 +/*! FSE_initCState2() :
108563 +*   Same as FSE_initCState(), but the first symbol to include (which will be the last to be read)
108564 +*   uses the smallest state value possible, saving the cost of this symbol */
108565 +MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U32 symbol)
108567 +    FSE_initCState(statePtr, ct);
108568 +    {   const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
108569 +        const U16* stateTable = (const U16*)(statePtr->stateTable);
108570 +        U32 nbBitsOut  = (U32)((symbolTT.deltaNbBits + (1<<15)) >> 16);
108571 +        statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits;
108572 +        statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
108573 +    }
108576 +MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, unsigned symbol)
108578 +    FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
108579 +    const U16* const stateTable = (const U16*)(statePtr->stateTable);
108580 +    U32 const nbBitsOut  = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
108581 +    BIT_addBits(bitC, statePtr->value, nbBitsOut);
108582 +    statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
108585 +MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr)
108587 +    BIT_addBits(bitC, statePtr->value, statePtr->stateLog);
108588 +    BIT_flushBits(bitC);
108592 +/* FSE_getMaxNbBits() :
108593 + * Approximate maximum cost of a symbol, in bits.
108594 + * Fractional get rounded up (i.e : a symbol with a normalized frequency of 3 gives the same result as a frequency of 2)
108595 + * note 1 : assume symbolValue is valid (<= maxSymbolValue)
108596 + * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
108597 +MEM_STATIC U32 FSE_getMaxNbBits(const void* symbolTTPtr, U32 symbolValue)
108599 +    const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr;
108600 +    return (symbolTT[symbolValue].deltaNbBits + ((1<<16)-1)) >> 16;
108603 +/* FSE_bitCost() :
108604 + * Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits)
108605 + * note 1 : assume symbolValue is valid (<= maxSymbolValue)
108606 + * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */
108607 +MEM_STATIC U32 FSE_bitCost(const void* symbolTTPtr, U32 tableLog, U32 symbolValue, U32 accuracyLog)
108609 +    const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr;
108610 +    U32 const minNbBits = symbolTT[symbolValue].deltaNbBits >> 16;
108611 +    U32 const threshold = (minNbBits+1) << 16;
108612 +    assert(tableLog < 16);
108613 +    assert(accuracyLog < 31-tableLog);  /* ensure enough room for renormalization double shift */
108614 +    {   U32 const tableSize = 1 << tableLog;
108615 +        U32 const deltaFromThreshold = threshold - (symbolTT[symbolValue].deltaNbBits + tableSize);
108616 +        U32 const normalizedDeltaFromThreshold = (deltaFromThreshold << accuracyLog) >> tableLog;   /* linear interpolation (very approximate) */
108617 +        U32 const bitMultiplier = 1 << accuracyLog;
108618 +        assert(symbolTT[symbolValue].deltaNbBits + tableSize <= threshold);
108619 +        assert(normalizedDeltaFromThreshold <= bitMultiplier);
108620 +        return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold;
108621 +    }
108625 +/* ======    Decompression    ====== */
108627 +typedef struct {
108628 +    U16 tableLog;
108629 +    U16 fastMode;
108630 +} FSE_DTableHeader;   /* sizeof U32 */
108632 +typedef struct
108634 +    unsigned short newState;
108635 +    unsigned char  symbol;
108636 +    unsigned char  nbBits;
108637 +} FSE_decode_t;   /* size == U32 */
108639 +MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
108641 +    const void* ptr = dt;
108642 +    const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr;
108643 +    DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
108644 +    BIT_reloadDStream(bitD);
108645 +    DStatePtr->table = dt + 1;
108648 +MEM_STATIC BYTE FSE_peekSymbol(const FSE_DState_t* DStatePtr)
108650 +    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
108651 +    return DInfo.symbol;
108654 +MEM_STATIC void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
108656 +    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
108657 +    U32 const nbBits = DInfo.nbBits;
108658 +    size_t const lowBits = BIT_readBits(bitD, nbBits);
108659 +    DStatePtr->state = DInfo.newState + lowBits;
108662 +MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
108664 +    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
108665 +    U32 const nbBits = DInfo.nbBits;
108666 +    BYTE const symbol = DInfo.symbol;
108667 +    size_t const lowBits = BIT_readBits(bitD, nbBits);
108669 +    DStatePtr->state = DInfo.newState + lowBits;
108670 +    return symbol;
108673 +/*! FSE_decodeSymbolFast() :
108674 +    unsafe, only works if no symbol has a probability > 50% */
108675 +MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
108677 +    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
108678 +    U32 const nbBits = DInfo.nbBits;
108679 +    BYTE const symbol = DInfo.symbol;
108680 +    size_t const lowBits = BIT_readBitsFast(bitD, nbBits);
108682 +    DStatePtr->state = DInfo.newState + lowBits;
108683 +    return symbol;
108686 +MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
108688 +    return DStatePtr->state == 0;
108693 +#ifndef FSE_COMMONDEFS_ONLY
108695 +/* **************************************************************
108696 +*  Tuning parameters
108697 +****************************************************************/
108698 +/*!MEMORY_USAGE :
108699 +*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
108700 +*  Increasing memory usage improves compression ratio
108701 +*  Reduced memory usage can improve speed, due to cache effect
108702 +*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
108703 +#ifndef FSE_MAX_MEMORY_USAGE
108704 +#  define FSE_MAX_MEMORY_USAGE 14
108705 +#endif
108706 +#ifndef FSE_DEFAULT_MEMORY_USAGE
108707 +#  define FSE_DEFAULT_MEMORY_USAGE 13
108708 +#endif
108709 +#if (FSE_DEFAULT_MEMORY_USAGE > FSE_MAX_MEMORY_USAGE)
108710 +#  error "FSE_DEFAULT_MEMORY_USAGE must be <= FSE_MAX_MEMORY_USAGE"
108711 +#endif
108713 +/*!FSE_MAX_SYMBOL_VALUE :
108714 +*  Maximum symbol value authorized.
108715 +*  Required for proper stack allocation */
108716 +#ifndef FSE_MAX_SYMBOL_VALUE
108717 +#  define FSE_MAX_SYMBOL_VALUE 255
108718 +#endif
108720 +/* **************************************************************
108721 +*  template functions type & suffix
108722 +****************************************************************/
108723 +#define FSE_FUNCTION_TYPE BYTE
108724 +#define FSE_FUNCTION_EXTENSION
108725 +#define FSE_DECODE_TYPE FSE_decode_t
108728 +#endif   /* !FSE_COMMONDEFS_ONLY */
108731 +/* ***************************************************************
108732 +*  Constants
108733 +*****************************************************************/
108734 +#define FSE_MAX_TABLELOG  (FSE_MAX_MEMORY_USAGE-2)
108735 +#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
108736 +#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
108737 +#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
108738 +#define FSE_MIN_TABLELOG 5
108740 +#define FSE_TABLELOG_ABSOLUTE_MAX 15
108741 +#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
108742 +#  error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
108743 +#endif
108745 +#define FSE_TABLESTEP(tableSize) (((tableSize)>>1) + ((tableSize)>>3) + 3)
108748 +#endif /* FSE_STATIC_LINKING_ONLY */
108749 diff --git a/lib/zstd/common/fse_decompress.c b/lib/zstd/common/fse_decompress.c
108750 new file mode 100644
108751 index 000000000000..2c8bbe3e4c14
108752 --- /dev/null
108753 +++ b/lib/zstd/common/fse_decompress.c
108754 @@ -0,0 +1,390 @@
108755 +/* ******************************************************************
108756 + * FSE : Finite State Entropy decoder
108757 + * Copyright (c) Yann Collet, Facebook, Inc.
108759 + *  You can contact the author at :
108760 + *  - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
108761 + *  - Public forum : https://groups.google.com/forum/#!forum/lz4c
108763 + * This source code is licensed under both the BSD-style license (found in the
108764 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
108765 + * in the COPYING file in the root directory of this source tree).
108766 + * You may select, at your option, one of the above-listed licenses.
108767 +****************************************************************** */
108770 +/* **************************************************************
108771 +*  Includes
108772 +****************************************************************/
108773 +#include "debug.h"      /* assert */
108774 +#include "bitstream.h"
108775 +#include "compiler.h"
108776 +#define FSE_STATIC_LINKING_ONLY
108777 +#include "fse.h"
108778 +#include "error_private.h"
108779 +#define ZSTD_DEPS_NEED_MALLOC
108780 +#include "zstd_deps.h"
108783 +/* **************************************************************
108784 +*  Error Management
108785 +****************************************************************/
108786 +#define FSE_isError ERR_isError
108787 +#define FSE_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)   /* use only *after* variable declarations */
108790 +/* **************************************************************
108791 +*  Templates
108792 +****************************************************************/
108794 +  designed to be included
108795 +  for type-specific functions (template emulation in C)
108796 +  Objective is to write these functions only once, for improved maintenance
108799 +/* safety checks */
108800 +#ifndef FSE_FUNCTION_EXTENSION
108801 +#  error "FSE_FUNCTION_EXTENSION must be defined"
108802 +#endif
108803 +#ifndef FSE_FUNCTION_TYPE
108804 +#  error "FSE_FUNCTION_TYPE must be defined"
108805 +#endif
108807 +/* Function names */
108808 +#define FSE_CAT(X,Y) X##Y
108809 +#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
108810 +#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
108813 +/* Function templates */
108814 +FSE_DTable* FSE_createDTable (unsigned tableLog)
108816 +    if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
108817 +    return (FSE_DTable*)ZSTD_malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
108820 +void FSE_freeDTable (FSE_DTable* dt)
108822 +    ZSTD_free(dt);
108825 +static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
108827 +    void* const tdPtr = dt+1;   /* because *dt is unsigned, 32-bits aligned on 32-bits */
108828 +    FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr);
108829 +    U16* symbolNext = (U16*)workSpace;
108830 +    BYTE* spread = (BYTE*)(symbolNext + maxSymbolValue + 1);
108832 +    U32 const maxSV1 = maxSymbolValue + 1;
108833 +    U32 const tableSize = 1 << tableLog;
108834 +    U32 highThreshold = tableSize-1;
108836 +    /* Sanity Checks */
108837 +    if (FSE_BUILD_DTABLE_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(maxSymbolValue_tooLarge);
108838 +    if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
108839 +    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
108841 +    /* Init, lay down lowprob symbols */
108842 +    {   FSE_DTableHeader DTableH;
108843 +        DTableH.tableLog = (U16)tableLog;
108844 +        DTableH.fastMode = 1;
108845 +        {   S16 const largeLimit= (S16)(1 << (tableLog-1));
108846 +            U32 s;
108847 +            for (s=0; s<maxSV1; s++) {
108848 +                if (normalizedCounter[s]==-1) {
108849 +                    tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
108850 +                    symbolNext[s] = 1;
108851 +                } else {
108852 +                    if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
108853 +                    symbolNext[s] = normalizedCounter[s];
108854 +        }   }   }
108855 +        ZSTD_memcpy(dt, &DTableH, sizeof(DTableH));
108856 +    }
108858 +    /* Spread symbols */
108859 +    if (highThreshold == tableSize - 1) {
108860 +        size_t const tableMask = tableSize-1;
108861 +        size_t const step = FSE_TABLESTEP(tableSize);
108862 +        /* First lay down the symbols in order.
108863 +         * We use a uint64_t to lay down 8 bytes at a time. This reduces branch
108864 +         * misses since small blocks generally have small table logs, so nearly
108865 +         * all symbols have counts <= 8. We ensure we have 8 bytes at the end of
108866 +         * our buffer to handle the over-write.
108867 +         */
108868 +        {
108869 +            U64 const add = 0x0101010101010101ull;
108870 +            size_t pos = 0;
108871 +            U64 sv = 0;
108872 +            U32 s;
108873 +            for (s=0; s<maxSV1; ++s, sv += add) {
108874 +                int i;
108875 +                int const n = normalizedCounter[s];
108876 +                MEM_write64(spread + pos, sv);
108877 +                for (i = 8; i < n; i += 8) {
108878 +                    MEM_write64(spread + pos + i, sv);
108879 +                }
108880 +                pos += n;
108881 +            }
108882 +        }
108883 +        /* Now we spread those positions across the table.
108884 +         * The benefit of doing it in two stages is that we avoid the the
108885 +         * variable size inner loop, which caused lots of branch misses.
108886 +         * Now we can run through all the positions without any branch misses.
108887 +         * We unroll the loop twice, since that is what emperically worked best.
108888 +         */
108889 +        {
108890 +            size_t position = 0;
108891 +            size_t s;
108892 +            size_t const unroll = 2;
108893 +            assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
108894 +            for (s = 0; s < (size_t)tableSize; s += unroll) {
108895 +                size_t u;
108896 +                for (u = 0; u < unroll; ++u) {
108897 +                    size_t const uPosition = (position + (u * step)) & tableMask;
108898 +                    tableDecode[uPosition].symbol = spread[s + u];
108899 +                }
108900 +                position = (position + (unroll * step)) & tableMask;
108901 +            }
108902 +            assert(position == 0);
108903 +        }
108904 +    } else {
108905 +        U32 const tableMask = tableSize-1;
108906 +        U32 const step = FSE_TABLESTEP(tableSize);
108907 +        U32 s, position = 0;
108908 +        for (s=0; s<maxSV1; s++) {
108909 +            int i;
108910 +            for (i=0; i<normalizedCounter[s]; i++) {
108911 +                tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
108912 +                position = (position + step) & tableMask;
108913 +                while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */
108914 +        }   }
108915 +        if (position!=0) return ERROR(GENERIC);   /* position must reach all cells once, otherwise normalizedCounter is incorrect */
108916 +    }
108918 +    /* Build Decoding table */
108919 +    {   U32 u;
108920 +        for (u=0; u<tableSize; u++) {
108921 +            FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
108922 +            U32 const nextState = symbolNext[symbol]++;
108923 +            tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
108924 +            tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
108925 +    }   }
108927 +    return 0;
108930 +size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
108932 +    return FSE_buildDTable_internal(dt, normalizedCounter, maxSymbolValue, tableLog, workSpace, wkspSize);
108936 +#ifndef FSE_COMMONDEFS_ONLY
108938 +/*-*******************************************************
108939 +*  Decompression (Byte symbols)
108940 +*********************************************************/
108941 +size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
108943 +    void* ptr = dt;
108944 +    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
108945 +    void* dPtr = dt + 1;
108946 +    FSE_decode_t* const cell = (FSE_decode_t*)dPtr;
108948 +    DTableH->tableLog = 0;
108949 +    DTableH->fastMode = 0;
108951 +    cell->newState = 0;
108952 +    cell->symbol = symbolValue;
108953 +    cell->nbBits = 0;
108955 +    return 0;
108959 +size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
108961 +    void* ptr = dt;
108962 +    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
108963 +    void* dPtr = dt + 1;
108964 +    FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr;
108965 +    const unsigned tableSize = 1 << nbBits;
108966 +    const unsigned tableMask = tableSize - 1;
108967 +    const unsigned maxSV1 = tableMask+1;
108968 +    unsigned s;
108970 +    /* Sanity checks */
108971 +    if (nbBits < 1) return ERROR(GENERIC);         /* min size */
108973 +    /* Build Decoding Table */
108974 +    DTableH->tableLog = (U16)nbBits;
108975 +    DTableH->fastMode = 1;
108976 +    for (s=0; s<maxSV1; s++) {
108977 +        dinfo[s].newState = 0;
108978 +        dinfo[s].symbol = (BYTE)s;
108979 +        dinfo[s].nbBits = (BYTE)nbBits;
108980 +    }
108982 +    return 0;
108985 +FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic(
108986 +          void* dst, size_t maxDstSize,
108987 +    const void* cSrc, size_t cSrcSize,
108988 +    const FSE_DTable* dt, const unsigned fast)
108990 +    BYTE* const ostart = (BYTE*) dst;
108991 +    BYTE* op = ostart;
108992 +    BYTE* const omax = op + maxDstSize;
108993 +    BYTE* const olimit = omax-3;
108995 +    BIT_DStream_t bitD;
108996 +    FSE_DState_t state1;
108997 +    FSE_DState_t state2;
108999 +    /* Init */
109000 +    CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize));
109002 +    FSE_initDState(&state1, &bitD, dt);
109003 +    FSE_initDState(&state2, &bitD, dt);
109005 +#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
109007 +    /* 4 symbols per loop */
109008 +    for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) & (op<olimit) ; op+=4) {
109009 +        op[0] = FSE_GETSYMBOL(&state1);
109011 +        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
109012 +            BIT_reloadDStream(&bitD);
109014 +        op[1] = FSE_GETSYMBOL(&state2);
109016 +        if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
109017 +            { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
109019 +        op[2] = FSE_GETSYMBOL(&state1);
109021 +        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
109022 +            BIT_reloadDStream(&bitD);
109024 +        op[3] = FSE_GETSYMBOL(&state2);
109025 +    }
109027 +    /* tail */
109028 +    /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
109029 +    while (1) {
109030 +        if (op>(omax-2)) return ERROR(dstSize_tooSmall);
109031 +        *op++ = FSE_GETSYMBOL(&state1);
109032 +        if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
109033 +            *op++ = FSE_GETSYMBOL(&state2);
109034 +            break;
109035 +        }
109037 +        if (op>(omax-2)) return ERROR(dstSize_tooSmall);
109038 +        *op++ = FSE_GETSYMBOL(&state2);
109039 +        if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
109040 +            *op++ = FSE_GETSYMBOL(&state1);
109041 +            break;
109042 +    }   }
109044 +    return op-ostart;
109048 +size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
109049 +                            const void* cSrc, size_t cSrcSize,
109050 +                            const FSE_DTable* dt)
109052 +    const void* ptr = dt;
109053 +    const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
109054 +    const U32 fastMode = DTableH->fastMode;
109056 +    /* select fast mode (static) */
109057 +    if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
109058 +    return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
109062 +size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
109064 +    return FSE_decompress_wksp_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, /* bmi2 */ 0);
109067 +typedef struct {
109068 +    short ncount[FSE_MAX_SYMBOL_VALUE + 1];
109069 +    FSE_DTable dtable[1]; /* Dynamically sized */
109070 +} FSE_DecompressWksp;
109073 +FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body(
109074 +        void* dst, size_t dstCapacity,
109075 +        const void* cSrc, size_t cSrcSize,
109076 +        unsigned maxLog, void* workSpace, size_t wkspSize,
109077 +        int bmi2)
109079 +    const BYTE* const istart = (const BYTE*)cSrc;
109080 +    const BYTE* ip = istart;
109081 +    unsigned tableLog;
109082 +    unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
109083 +    FSE_DecompressWksp* const wksp = (FSE_DecompressWksp*)workSpace;
109085 +    DEBUG_STATIC_ASSERT((FSE_MAX_SYMBOL_VALUE + 1) % 2 == 0);
109086 +    if (wkspSize < sizeof(*wksp)) return ERROR(GENERIC);
109088 +    /* normal FSE decoding mode */
109089 +    {
109090 +        size_t const NCountLength = FSE_readNCount_bmi2(wksp->ncount, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2);
109091 +        if (FSE_isError(NCountLength)) return NCountLength;
109092 +        if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
109093 +        assert(NCountLength <= cSrcSize);
109094 +        ip += NCountLength;
109095 +        cSrcSize -= NCountLength;
109096 +    }
109098 +    if (FSE_DECOMPRESS_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(tableLog_tooLarge);
109099 +    workSpace = wksp->dtable + FSE_DTABLE_SIZE_U32(tableLog);
109100 +    wkspSize -= sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog);
109102 +    CHECK_F( FSE_buildDTable_internal(wksp->dtable, wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize) );
109104 +    {
109105 +        const void* ptr = wksp->dtable;
109106 +        const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
109107 +        const U32 fastMode = DTableH->fastMode;
109109 +        /* select fast mode (static) */
109110 +        if (fastMode) return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 1);
109111 +        return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 0);
109112 +    }
109115 +/* Avoids the FORCE_INLINE of the _body() function. */
109116 +static size_t FSE_decompress_wksp_body_default(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
109118 +    return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 0);
109121 +#if DYNAMIC_BMI2
109122 +TARGET_ATTRIBUTE("bmi2") static size_t FSE_decompress_wksp_body_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
109124 +    return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 1);
109126 +#endif
109128 +size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2)
109130 +#if DYNAMIC_BMI2
109131 +    if (bmi2) {
109132 +        return FSE_decompress_wksp_body_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
109133 +    }
109134 +#endif
109135 +    (void)bmi2;
109136 +    return FSE_decompress_wksp_body_default(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
109140 +typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
109144 +#endif   /* FSE_COMMONDEFS_ONLY */
109145 diff --git a/lib/zstd/common/huf.h b/lib/zstd/common/huf.h
109146 new file mode 100644
109147 index 000000000000..b5dbd386c5e6
109148 --- /dev/null
109149 +++ b/lib/zstd/common/huf.h
109150 @@ -0,0 +1,355 @@
109151 +/* ******************************************************************
109152 + * huff0 huffman codec,
109153 + * part of Finite State Entropy library
109154 + * Copyright (c) Yann Collet, Facebook, Inc.
109156 + * You can contact the author at :
109157 + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
109159 + * This source code is licensed under both the BSD-style license (found in the
109160 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
109161 + * in the COPYING file in the root directory of this source tree).
109162 + * You may select, at your option, one of the above-listed licenses.
109163 +****************************************************************** */
109166 +#ifndef HUF_H_298734234
109167 +#define HUF_H_298734234
109169 +/* *** Dependencies *** */
109170 +#include "zstd_deps.h"    /* size_t */
109173 +/* *** library symbols visibility *** */
109174 +/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual,
109175 + *        HUF symbols remain "private" (internal symbols for library only).
109176 + *        Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */
109177 +#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
109178 +#  define HUF_PUBLIC_API __attribute__ ((visibility ("default")))
109179 +#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1)   /* Visual expected */
109180 +#  define HUF_PUBLIC_API __declspec(dllexport)
109181 +#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
109182 +#  define HUF_PUBLIC_API __declspec(dllimport)  /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */
109183 +#else
109184 +#  define HUF_PUBLIC_API
109185 +#endif
109188 +/* ========================== */
109189 +/* ***  simple functions  *** */
109190 +/* ========================== */
109192 +/** HUF_compress() :
109193 + *  Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'.
109194 + * 'dst' buffer must be already allocated.
109195 + *  Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize).
109196 + * `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB.
109197 + * @return : size of compressed data (<= `dstCapacity`).
109198 + *  Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
109199 + *                   if HUF_isError(return), compression failed (more details using HUF_getErrorName())
109200 + */
109201 +HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity,
109202 +                             const void* src, size_t srcSize);
109204 +/** HUF_decompress() :
109205 + *  Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
109206 + *  into already allocated buffer 'dst', of minimum size 'dstSize'.
109207 + * `originalSize` : **must** be the ***exact*** size of original (uncompressed) data.
109208 + *  Note : in contrast with FSE, HUF_decompress can regenerate
109209 + *         RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
109210 + *         because it knows size to regenerate (originalSize).
109211 + * @return : size of regenerated data (== originalSize),
109212 + *           or an error code, which can be tested using HUF_isError()
109213 + */
109214 +HUF_PUBLIC_API size_t HUF_decompress(void* dst,  size_t originalSize,
109215 +                               const void* cSrc, size_t cSrcSize);
109218 +/* ***   Tool functions *** */
109219 +#define HUF_BLOCKSIZE_MAX (128 * 1024)                  /**< maximum input size for a single block compressed with HUF_compress */
109220 +HUF_PUBLIC_API size_t HUF_compressBound(size_t size);   /**< maximum compressed size (worst case) */
109222 +/* Error Management */
109223 +HUF_PUBLIC_API unsigned    HUF_isError(size_t code);       /**< tells if a return value is an error code */
109224 +HUF_PUBLIC_API const char* HUF_getErrorName(size_t code);  /**< provides error code string (useful for debugging) */
109227 +/* ***   Advanced function   *** */
109229 +/** HUF_compress2() :
109230 + *  Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`.
109231 + * `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX .
109232 + * `tableLog` must be `<= HUF_TABLELOG_MAX` . */
109233 +HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity,
109234 +                               const void* src, size_t srcSize,
109235 +                               unsigned maxSymbolValue, unsigned tableLog);
109237 +/** HUF_compress4X_wksp() :
109238 + *  Same as HUF_compress2(), but uses externally allocated `workSpace`.
109239 + * `workspace` must have minimum alignment of 4, and be at least as large as HUF_WORKSPACE_SIZE */
109240 +#define HUF_WORKSPACE_SIZE ((6 << 10) + 256)
109241 +#define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32))
109242 +HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
109243 +                                     const void* src, size_t srcSize,
109244 +                                     unsigned maxSymbolValue, unsigned tableLog,
109245 +                                     void* workSpace, size_t wkspSize);
109247 +#endif   /* HUF_H_298734234 */
109249 +/* ******************************************************************
109250 + *  WARNING !!
109251 + *  The following section contains advanced and experimental definitions
109252 + *  which shall never be used in the context of a dynamic library,
109253 + *  because they are not guaranteed to remain stable in the future.
109254 + *  Only consider them in association with static linking.
109255 + * *****************************************************************/
109256 +#if !defined(HUF_H_HUF_STATIC_LINKING_ONLY)
109257 +#define HUF_H_HUF_STATIC_LINKING_ONLY
109259 +/* *** Dependencies *** */
109260 +#include "mem.h"   /* U32 */
109261 +#define FSE_STATIC_LINKING_ONLY
109262 +#include "fse.h"
109265 +/* *** Constants *** */
109266 +#define HUF_TABLELOG_MAX      12      /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
109267 +#define HUF_TABLELOG_DEFAULT  11      /* default tableLog value when none specified */
109268 +#define HUF_SYMBOLVALUE_MAX  255
109270 +#define HUF_TABLELOG_ABSOLUTEMAX  15  /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
109271 +#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
109272 +#  error "HUF_TABLELOG_MAX is too large !"
109273 +#endif
109276 +/* ****************************************
109277 +*  Static allocation
109278 +******************************************/
109279 +/* HUF buffer bounds */
109280 +#define HUF_CTABLEBOUND 129
109281 +#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8)   /* only true when incompressible is pre-filtered with fast heuristic */
109282 +#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
109284 +/* static allocation of HUF's Compression Table */
109285 +/* this is a private definition, just exposed for allocation and strict aliasing purpose. never EVER access its members directly */
109286 +struct HUF_CElt_s {
109287 +  U16  val;
109288 +  BYTE nbBits;
109289 +};   /* typedef'd to HUF_CElt */
109290 +typedef struct HUF_CElt_s HUF_CElt;   /* consider it an incomplete type */
109291 +#define HUF_CTABLE_SIZE_U32(maxSymbolValue)   ((maxSymbolValue)+1)   /* Use tables of U32, for proper alignment */
109292 +#define HUF_CTABLE_SIZE(maxSymbolValue)       (HUF_CTABLE_SIZE_U32(maxSymbolValue) * sizeof(U32))
109293 +#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
109294 +    HUF_CElt name[HUF_CTABLE_SIZE_U32(maxSymbolValue)] /* no final ; */
109296 +/* static allocation of HUF's DTable */
109297 +typedef U32 HUF_DTable;
109298 +#define HUF_DTABLE_SIZE(maxTableLog)   (1 + (1<<(maxTableLog)))
109299 +#define HUF_CREATE_STATIC_DTABLEX1(DTable, maxTableLog) \
109300 +        HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) }
109301 +#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
109302 +        HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) }
109305 +/* ****************************************
109306 +*  Advanced decompression functions
109307 +******************************************/
109308 +size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
109309 +#ifndef HUF_FORCE_DECOMPRESS_X1
109310 +size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
109311 +#endif
109313 +size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< decodes RLE and uncompressed */
109314 +size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */
109315 +size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */
109316 +size_t HUF_decompress4X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
109317 +size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< single-symbol decoder */
109318 +#ifndef HUF_FORCE_DECOMPRESS_X1
109319 +size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
109320 +size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< double-symbols decoder */
109321 +#endif
109324 +/* ****************************************
109325 + *  HUF detailed API
109326 + * ****************************************/
109328 +/*! HUF_compress() does the following:
109329 + *  1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h")
109330 + *  2. (optional) refine tableLog using HUF_optimalTableLog()
109331 + *  3. build Huffman table from count using HUF_buildCTable()
109332 + *  4. save Huffman table to memory buffer using HUF_writeCTable()
109333 + *  5. encode the data stream using HUF_compress4X_usingCTable()
109335 + *  The following API allows targeting specific sub-functions for advanced tasks.
109336 + *  For example, it's possible to compress several blocks using the same 'CTable',
109337 + *  or to save and regenerate 'CTable' using external methods.
109338 + */
109339 +unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
109340 +size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits);   /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */
109341 +size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog);
109342 +size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize);
109343 +size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
109344 +size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
109345 +int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
109347 +typedef enum {
109348 +   HUF_repeat_none,  /**< Cannot use the previous table */
109349 +   HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */
109350 +   HUF_repeat_valid  /**< Can use the previous table and it is assumed to be valid */
109351 + } HUF_repeat;
109352 +/** HUF_compress4X_repeat() :
109353 + *  Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
109354 + *  If it uses hufTable it does not modify hufTable or repeat.
109355 + *  If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
109356 + *  If preferRepeat then the old table will always be used if valid. */
109357 +size_t HUF_compress4X_repeat(void* dst, size_t dstSize,
109358 +                       const void* src, size_t srcSize,
109359 +                       unsigned maxSymbolValue, unsigned tableLog,
109360 +                       void* workSpace, size_t wkspSize,    /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
109361 +                       HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
109363 +/** HUF_buildCTable_wksp() :
109364 + *  Same as HUF_buildCTable(), but using externally allocated scratch buffer.
109365 + * `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE.
109366 + */
109367 +#define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1)
109368 +#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned))
109369 +size_t HUF_buildCTable_wksp (HUF_CElt* tree,
109370 +                       const unsigned* count, U32 maxSymbolValue, U32 maxNbBits,
109371 +                             void* workSpace, size_t wkspSize);
109373 +/*! HUF_readStats() :
109374 + *  Read compact Huffman tree, saved by HUF_writeCTable().
109375 + * `huffWeight` is destination buffer.
109376 + * @return : size read from `src` , or an error Code .
109377 + *  Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */
109378 +size_t HUF_readStats(BYTE* huffWeight, size_t hwSize,
109379 +                     U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
109380 +                     const void* src, size_t srcSize);
109382 +/*! HUF_readStats_wksp() :
109383 + * Same as HUF_readStats() but takes an external workspace which must be
109384 + * 4-byte aligned and its size must be >= HUF_READ_STATS_WORKSPACE_SIZE.
109385 + * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
109386 + */
109387 +#define HUF_READ_STATS_WORKSPACE_SIZE_U32 FSE_DECOMPRESS_WKSP_SIZE_U32(6, HUF_TABLELOG_MAX-1)
109388 +#define HUF_READ_STATS_WORKSPACE_SIZE (HUF_READ_STATS_WORKSPACE_SIZE_U32 * sizeof(unsigned))
109389 +size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize,
109390 +                          U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
109391 +                          const void* src, size_t srcSize,
109392 +                          void* workspace, size_t wkspSize,
109393 +                          int bmi2);
109395 +/** HUF_readCTable() :
109396 + *  Loading a CTable saved with HUF_writeCTable() */
109397 +size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned *hasZeroWeights);
109399 +/** HUF_getNbBits() :
109400 + *  Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX
109401 + *  Note 1 : is not inlined, as HUF_CElt definition is private
109402 + *  Note 2 : const void* used, so that it can provide a statically allocated table as argument (which uses type U32) */
109403 +U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue);
109406 + * HUF_decompress() does the following:
109407 + * 1. select the decompression algorithm (X1, X2) based on pre-computed heuristics
109408 + * 2. build Huffman table from save, using HUF_readDTableX?()
109409 + * 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable()
109410 + */
109412 +/** HUF_selectDecoder() :
109413 + *  Tells which decoder is likely to decode faster,
109414 + *  based on a set of pre-computed metrics.
109415 + * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
109416 + *  Assumption : 0 < dstSize <= 128 KB */
109417 +U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
109420 + *  The minimum workspace size for the `workSpace` used in
109421 + *  HUF_readDTableX1_wksp() and HUF_readDTableX2_wksp().
109423 + *  The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when
109424 + *  HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15.
109425 + *  Buffer overflow errors may potentially occur if code modifications result in
109426 + *  a required workspace size greater than that specified in the following
109427 + *  macro.
109428 + */
109429 +#define HUF_DECOMPRESS_WORKSPACE_SIZE ((2 << 10) + (1 << 9))
109430 +#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
109432 +#ifndef HUF_FORCE_DECOMPRESS_X2
109433 +size_t HUF_readDTableX1 (HUF_DTable* DTable, const void* src, size_t srcSize);
109434 +size_t HUF_readDTableX1_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
109435 +#endif
109436 +#ifndef HUF_FORCE_DECOMPRESS_X1
109437 +size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize);
109438 +size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
109439 +#endif
109441 +size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
109442 +#ifndef HUF_FORCE_DECOMPRESS_X2
109443 +size_t HUF_decompress4X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
109444 +#endif
109445 +#ifndef HUF_FORCE_DECOMPRESS_X1
109446 +size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
109447 +#endif
109450 +/* ====================== */
109451 +/* single stream variants */
109452 +/* ====================== */
109454 +size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
109455 +size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);  /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
109456 +size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
109457 +/** HUF_compress1X_repeat() :
109458 + *  Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
109459 + *  If it uses hufTable it does not modify hufTable or repeat.
109460 + *  If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
109461 + *  If preferRepeat then the old table will always be used if valid. */
109462 +size_t HUF_compress1X_repeat(void* dst, size_t dstSize,
109463 +                       const void* src, size_t srcSize,
109464 +                       unsigned maxSymbolValue, unsigned tableLog,
109465 +                       void* workSpace, size_t wkspSize,   /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
109466 +                       HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
109468 +size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */
109469 +#ifndef HUF_FORCE_DECOMPRESS_X1
109470 +size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbol decoder */
109471 +#endif
109473 +size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
109474 +size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);
109475 +#ifndef HUF_FORCE_DECOMPRESS_X2
109476 +size_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
109477 +size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< single-symbol decoder */
109478 +#endif
109479 +#ifndef HUF_FORCE_DECOMPRESS_X1
109480 +size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
109481 +size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< double-symbols decoder */
109482 +#endif
109484 +size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);   /**< automatic selection of sing or double symbol decoder, based on DTable */
109485 +#ifndef HUF_FORCE_DECOMPRESS_X2
109486 +size_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
109487 +#endif
109488 +#ifndef HUF_FORCE_DECOMPRESS_X1
109489 +size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
109490 +#endif
109492 +/* BMI2 variants.
109493 + * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
109494 + */
109495 +size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
109496 +#ifndef HUF_FORCE_DECOMPRESS_X2
109497 +size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
109498 +#endif
109499 +size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
109500 +size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
109501 +#ifndef HUF_FORCE_DECOMPRESS_X2
109502 +size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2);
109503 +#endif
109505 +#endif /* HUF_STATIC_LINKING_ONLY */
109506 diff --git a/lib/zstd/common/mem.h b/lib/zstd/common/mem.h
109507 new file mode 100644
109508 index 000000000000..4b5db5756a6f
109509 --- /dev/null
109510 +++ b/lib/zstd/common/mem.h
109511 @@ -0,0 +1,259 @@
109512 +/* SPDX-License-Identifier: GPL-2.0-only */
109514 + * Copyright (c) Yann Collet, Facebook, Inc.
109515 + * All rights reserved.
109517 + * This source code is licensed under both the BSD-style license (found in the
109518 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
109519 + * in the COPYING file in the root directory of this source tree).
109520 + * You may select, at your option, one of the above-listed licenses.
109521 + */
109523 +#ifndef MEM_H_MODULE
109524 +#define MEM_H_MODULE
109526 +/*-****************************************
109527 +*  Dependencies
109528 +******************************************/
109529 +#include <asm/unaligned.h>  /* get_unaligned, put_unaligned* */
109530 +#include <linux/compiler.h>  /* inline */
109531 +#include <linux/swab.h>  /* swab32, swab64 */
109532 +#include <linux/types.h>  /* size_t, ptrdiff_t */
109533 +#include "debug.h"  /* DEBUG_STATIC_ASSERT */
109535 +/*-****************************************
109536 +*  Compiler specifics
109537 +******************************************/
109538 +#define MEM_STATIC static inline
109540 +/*-**************************************************************
109541 +*  Basic Types
109542 +*****************************************************************/
109543 +typedef uint8_t  BYTE;
109544 +typedef uint16_t U16;
109545 +typedef int16_t  S16;
109546 +typedef uint32_t U32;
109547 +typedef int32_t  S32;
109548 +typedef uint64_t U64;
109549 +typedef int64_t  S64;
109551 +/*-**************************************************************
109552 +*  Memory I/O API
109553 +*****************************************************************/
109554 +/*=== Static platform detection ===*/
109555 +MEM_STATIC unsigned MEM_32bits(void);
109556 +MEM_STATIC unsigned MEM_64bits(void);
109557 +MEM_STATIC unsigned MEM_isLittleEndian(void);
109559 +/*=== Native unaligned read/write ===*/
109560 +MEM_STATIC U16 MEM_read16(const void* memPtr);
109561 +MEM_STATIC U32 MEM_read32(const void* memPtr);
109562 +MEM_STATIC U64 MEM_read64(const void* memPtr);
109563 +MEM_STATIC size_t MEM_readST(const void* memPtr);
109565 +MEM_STATIC void MEM_write16(void* memPtr, U16 value);
109566 +MEM_STATIC void MEM_write32(void* memPtr, U32 value);
109567 +MEM_STATIC void MEM_write64(void* memPtr, U64 value);
109569 +/*=== Little endian unaligned read/write ===*/
109570 +MEM_STATIC U16 MEM_readLE16(const void* memPtr);
109571 +MEM_STATIC U32 MEM_readLE24(const void* memPtr);
109572 +MEM_STATIC U32 MEM_readLE32(const void* memPtr);
109573 +MEM_STATIC U64 MEM_readLE64(const void* memPtr);
109574 +MEM_STATIC size_t MEM_readLEST(const void* memPtr);
109576 +MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val);
109577 +MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val);
109578 +MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32);
109579 +MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64);
109580 +MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val);
109582 +/*=== Big endian unaligned read/write ===*/
109583 +MEM_STATIC U32 MEM_readBE32(const void* memPtr);
109584 +MEM_STATIC U64 MEM_readBE64(const void* memPtr);
109585 +MEM_STATIC size_t MEM_readBEST(const void* memPtr);
109587 +MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32);
109588 +MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64);
109589 +MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val);
109591 +/*=== Byteswap ===*/
109592 +MEM_STATIC U32 MEM_swap32(U32 in);
109593 +MEM_STATIC U64 MEM_swap64(U64 in);
109594 +MEM_STATIC size_t MEM_swapST(size_t in);
109596 +/*-**************************************************************
109597 +*  Memory I/O Implementation
109598 +*****************************************************************/
109599 +MEM_STATIC unsigned MEM_32bits(void)
109601 +    return sizeof(size_t) == 4;
109604 +MEM_STATIC unsigned MEM_64bits(void)
109606 +    return sizeof(size_t) == 8;
109609 +#if defined(__LITTLE_ENDIAN)
109610 +#define MEM_LITTLE_ENDIAN 1
109611 +#else
109612 +#define MEM_LITTLE_ENDIAN 0
109613 +#endif
109615 +MEM_STATIC unsigned MEM_isLittleEndian(void)
109617 +    return MEM_LITTLE_ENDIAN;
109620 +MEM_STATIC U16 MEM_read16(const void *memPtr)
109622 +    return get_unaligned((const U16 *)memPtr);
109625 +MEM_STATIC U32 MEM_read32(const void *memPtr)
109627 +    return get_unaligned((const U32 *)memPtr);
109630 +MEM_STATIC U64 MEM_read64(const void *memPtr)
109632 +    return get_unaligned((const U64 *)memPtr);
109635 +MEM_STATIC size_t MEM_readST(const void *memPtr)
109637 +    return get_unaligned((const size_t *)memPtr);
109640 +MEM_STATIC void MEM_write16(void *memPtr, U16 value)
109642 +    put_unaligned(value, (U16 *)memPtr);
109645 +MEM_STATIC void MEM_write32(void *memPtr, U32 value)
109647 +    put_unaligned(value, (U32 *)memPtr);
109650 +MEM_STATIC void MEM_write64(void *memPtr, U64 value)
109652 +    put_unaligned(value, (U64 *)memPtr);
109655 +/*=== Little endian r/w ===*/
109657 +MEM_STATIC U16 MEM_readLE16(const void *memPtr)
109659 +    return get_unaligned_le16(memPtr);
109662 +MEM_STATIC void MEM_writeLE16(void *memPtr, U16 val)
109664 +    put_unaligned_le16(val, memPtr);
109667 +MEM_STATIC U32 MEM_readLE24(const void *memPtr)
109669 +    return MEM_readLE16(memPtr) + (((const BYTE *)memPtr)[2] << 16);
109672 +MEM_STATIC void MEM_writeLE24(void *memPtr, U32 val)
109674 +       MEM_writeLE16(memPtr, (U16)val);
109675 +       ((BYTE *)memPtr)[2] = (BYTE)(val >> 16);
109678 +MEM_STATIC U32 MEM_readLE32(const void *memPtr)
109680 +    return get_unaligned_le32(memPtr);
109683 +MEM_STATIC void MEM_writeLE32(void *memPtr, U32 val32)
109685 +    put_unaligned_le32(val32, memPtr);
109688 +MEM_STATIC U64 MEM_readLE64(const void *memPtr)
109690 +    return get_unaligned_le64(memPtr);
109693 +MEM_STATIC void MEM_writeLE64(void *memPtr, U64 val64)
109695 +    put_unaligned_le64(val64, memPtr);
109698 +MEM_STATIC size_t MEM_readLEST(const void *memPtr)
109700 +       if (MEM_32bits())
109701 +               return (size_t)MEM_readLE32(memPtr);
109702 +       else
109703 +               return (size_t)MEM_readLE64(memPtr);
109706 +MEM_STATIC void MEM_writeLEST(void *memPtr, size_t val)
109708 +       if (MEM_32bits())
109709 +               MEM_writeLE32(memPtr, (U32)val);
109710 +       else
109711 +               MEM_writeLE64(memPtr, (U64)val);
109714 +/*=== Big endian r/w ===*/
109716 +MEM_STATIC U32 MEM_readBE32(const void *memPtr)
109718 +    return get_unaligned_be32(memPtr);
109721 +MEM_STATIC void MEM_writeBE32(void *memPtr, U32 val32)
109723 +    put_unaligned_be32(val32, memPtr);
109726 +MEM_STATIC U64 MEM_readBE64(const void *memPtr)
109728 +    return get_unaligned_be64(memPtr);
109731 +MEM_STATIC void MEM_writeBE64(void *memPtr, U64 val64)
109733 +    put_unaligned_be64(val64, memPtr);
109736 +MEM_STATIC size_t MEM_readBEST(const void *memPtr)
109738 +       if (MEM_32bits())
109739 +               return (size_t)MEM_readBE32(memPtr);
109740 +       else
109741 +               return (size_t)MEM_readBE64(memPtr);
109744 +MEM_STATIC void MEM_writeBEST(void *memPtr, size_t val)
109746 +       if (MEM_32bits())
109747 +               MEM_writeBE32(memPtr, (U32)val);
109748 +       else
109749 +               MEM_writeBE64(memPtr, (U64)val);
109752 +MEM_STATIC U32 MEM_swap32(U32 in)
109754 +    return swab32(in);
109757 +MEM_STATIC U64 MEM_swap64(U64 in)
109759 +    return swab64(in);
109762 +MEM_STATIC size_t MEM_swapST(size_t in)
109764 +    if (MEM_32bits())
109765 +        return (size_t)MEM_swap32((U32)in);
109766 +    else
109767 +        return (size_t)MEM_swap64((U64)in);
109770 +#endif /* MEM_H_MODULE */
109771 diff --git a/lib/zstd/common/zstd_common.c b/lib/zstd/common/zstd_common.c
109772 new file mode 100644
109773 index 000000000000..3d7e35b309b5
109774 --- /dev/null
109775 +++ b/lib/zstd/common/zstd_common.c
109776 @@ -0,0 +1,83 @@
109778 + * Copyright (c) Yann Collet, Facebook, Inc.
109779 + * All rights reserved.
109781 + * This source code is licensed under both the BSD-style license (found in the
109782 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
109783 + * in the COPYING file in the root directory of this source tree).
109784 + * You may select, at your option, one of the above-listed licenses.
109785 + */
109789 +/*-*************************************
109790 +*  Dependencies
109791 +***************************************/
109792 +#define ZSTD_DEPS_NEED_MALLOC
109793 +#include "zstd_deps.h"   /* ZSTD_malloc, ZSTD_calloc, ZSTD_free, ZSTD_memset */
109794 +#include "error_private.h"
109795 +#include "zstd_internal.h"
109798 +/*-****************************************
109799 +*  Version
109800 +******************************************/
109801 +unsigned ZSTD_versionNumber(void) { return ZSTD_VERSION_NUMBER; }
109803 +const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; }
109806 +/*-****************************************
109807 +*  ZSTD Error Management
109808 +******************************************/
109809 +#undef ZSTD_isError   /* defined within zstd_internal.h */
109810 +/*! ZSTD_isError() :
109811 + *  tells if a return value is an error code
109812 + *  symbol is required for external callers */
109813 +unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
109815 +/*! ZSTD_getErrorName() :
109816 + *  provides error code string from function result (useful for debugging) */
109817 +const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); }
109819 +/*! ZSTD_getError() :
109820 + *  convert a `size_t` function result into a proper ZSTD_errorCode enum */
109821 +ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); }
109823 +/*! ZSTD_getErrorString() :
109824 + *  provides error code string from enum */
109825 +const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); }
109829 +/*=**************************************************************
109830 +*  Custom allocator
109831 +****************************************************************/
109832 +void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem)
109834 +    if (customMem.customAlloc)
109835 +        return customMem.customAlloc(customMem.opaque, size);
109836 +    return ZSTD_malloc(size);
109839 +void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem)
109841 +    if (customMem.customAlloc) {
109842 +        /* calloc implemented as malloc+memset;
109843 +         * not as efficient as calloc, but next best guess for custom malloc */
109844 +        void* const ptr = customMem.customAlloc(customMem.opaque, size);
109845 +        ZSTD_memset(ptr, 0, size);
109846 +        return ptr;
109847 +    }
109848 +    return ZSTD_calloc(1, size);
109851 +void ZSTD_customFree(void* ptr, ZSTD_customMem customMem)
109853 +    if (ptr!=NULL) {
109854 +        if (customMem.customFree)
109855 +            customMem.customFree(customMem.opaque, ptr);
109856 +        else
109857 +            ZSTD_free(ptr);
109858 +    }
109860 diff --git a/lib/zstd/common/zstd_deps.h b/lib/zstd/common/zstd_deps.h
109861 new file mode 100644
109862 index 000000000000..853b72426215
109863 --- /dev/null
109864 +++ b/lib/zstd/common/zstd_deps.h
109865 @@ -0,0 +1,125 @@
109866 +/* SPDX-License-Identifier: GPL-2.0-only */
109868 + * Copyright (c) Facebook, Inc.
109869 + * All rights reserved.
109871 + * This source code is licensed under both the BSD-style license (found in the
109872 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
109873 + * in the COPYING file in the root directory of this source tree).
109874 + * You may select, at your option, one of the above-listed licenses.
109875 + */
109878 + * This file provides common libc dependencies that zstd requires.
109879 + * The purpose is to allow replacing this file with a custom implementation
109880 + * to compile zstd without libc support.
109881 + */
109883 +/* Need:
109884 + * NULL
109885 + * INT_MAX
109886 + * UINT_MAX
109887 + * ZSTD_memcpy()
109888 + * ZSTD_memset()
109889 + * ZSTD_memmove()
109890 + */
109891 +#ifndef ZSTD_DEPS_COMMON
109892 +#define ZSTD_DEPS_COMMON
109894 +#include <linux/limits.h>
109895 +#include <linux/stddef.h>
109897 +#define ZSTD_memcpy(d,s,n) __builtin_memcpy((d),(s),(n))
109898 +#define ZSTD_memmove(d,s,n) __builtin_memmove((d),(s),(n))
109899 +#define ZSTD_memset(d,s,n) __builtin_memset((d),(s),(n))
109901 +#endif /* ZSTD_DEPS_COMMON */
109904 + * Define malloc as always failing. That means the user must
109905 + * either use ZSTD_customMem or statically allocate memory.
109906 + * Need:
109907 + * ZSTD_malloc()
109908 + * ZSTD_free()
109909 + * ZSTD_calloc()
109910 + */
109911 +#ifdef ZSTD_DEPS_NEED_MALLOC
109912 +#ifndef ZSTD_DEPS_MALLOC
109913 +#define ZSTD_DEPS_MALLOC
109915 +#define ZSTD_malloc(s) ({ (void)(s); NULL; })
109916 +#define ZSTD_free(p) ((void)(p))
109917 +#define ZSTD_calloc(n,s) ({ (void)(n); (void)(s); NULL; })
109919 +#endif /* ZSTD_DEPS_MALLOC */
109920 +#endif /* ZSTD_DEPS_NEED_MALLOC */
109923 + * Provides 64-bit math support.
109924 + * Need:
109925 + * U64 ZSTD_div64(U64 dividend, U32 divisor)
109926 + */
109927 +#ifdef ZSTD_DEPS_NEED_MATH64
109928 +#ifndef ZSTD_DEPS_MATH64
109929 +#define ZSTD_DEPS_MATH64
109931 +#include <linux/math64.h>
109933 +static uint64_t ZSTD_div64(uint64_t dividend, uint32_t divisor) {
109934 +  return div_u64(dividend, divisor);
109937 +#endif /* ZSTD_DEPS_MATH64 */
109938 +#endif /* ZSTD_DEPS_NEED_MATH64 */
109941 + * This is only requested when DEBUGLEVEL >= 1, meaning
109942 + * it is disabled in production.
109943 + * Need:
109944 + * assert()
109945 + */
109946 +#ifdef ZSTD_DEPS_NEED_ASSERT
109947 +#ifndef ZSTD_DEPS_ASSERT
109948 +#define ZSTD_DEPS_ASSERT
109950 +#include <linux/kernel.h>
109952 +#define assert(x) WARN_ON((x))
109954 +#endif /* ZSTD_DEPS_ASSERT */
109955 +#endif /* ZSTD_DEPS_NEED_ASSERT */
109958 + * This is only requested when DEBUGLEVEL >= 2, meaning
109959 + * it is disabled in production.
109960 + * Need:
109961 + * ZSTD_DEBUG_PRINT()
109962 + */
109963 +#ifdef ZSTD_DEPS_NEED_IO
109964 +#ifndef ZSTD_DEPS_IO
109965 +#define ZSTD_DEPS_IO
109967 +#include <linux/printk.h>
109969 +#define ZSTD_DEBUG_PRINT(...) pr_debug(__VA_ARGS__)
109971 +#endif /* ZSTD_DEPS_IO */
109972 +#endif /* ZSTD_DEPS_NEED_IO */
109975 + * Only requested when MSAN is enabled.
109976 + * Need:
109977 + * intptr_t
109978 + */
109979 +#ifdef ZSTD_DEPS_NEED_STDINT
109980 +#ifndef ZSTD_DEPS_STDINT
109981 +#define ZSTD_DEPS_STDINT
109984 + * The Linux Kernel doesn't provide intptr_t, only uintptr_t, which
109985 + * is an unsigned long.
109986 + */
109987 +typedef long intptr_t;
109989 +#endif /* ZSTD_DEPS_STDINT */
109990 +#endif /* ZSTD_DEPS_NEED_STDINT */
109991 diff --git a/lib/zstd/common/zstd_internal.h b/lib/zstd/common/zstd_internal.h
109992 new file mode 100644
109993 index 000000000000..1f939cbe05ed
109994 --- /dev/null
109995 +++ b/lib/zstd/common/zstd_internal.h
109996 @@ -0,0 +1,450 @@
109998 + * Copyright (c) Yann Collet, Facebook, Inc.
109999 + * All rights reserved.
110001 + * This source code is licensed under both the BSD-style license (found in the
110002 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
110003 + * in the COPYING file in the root directory of this source tree).
110004 + * You may select, at your option, one of the above-listed licenses.
110005 + */
110007 +#ifndef ZSTD_CCOMMON_H_MODULE
110008 +#define ZSTD_CCOMMON_H_MODULE
110010 +/* this module contains definitions which must be identical
110011 + * across compression, decompression and dictBuilder.
110012 + * It also contains a few functions useful to at least 2 of them
110013 + * and which benefit from being inlined */
110015 +/*-*************************************
110016 +*  Dependencies
110017 +***************************************/
110018 +#include "compiler.h"
110019 +#include "mem.h"
110020 +#include "debug.h"                 /* assert, DEBUGLOG, RAWLOG, g_debuglevel */
110021 +#include "error_private.h"
110022 +#define ZSTD_STATIC_LINKING_ONLY
110023 +#include <linux/zstd.h>
110024 +#define FSE_STATIC_LINKING_ONLY
110025 +#include "fse.h"
110026 +#define HUF_STATIC_LINKING_ONLY
110027 +#include "huf.h"
110028 +#include <linux/xxhash.h>                /* XXH_reset, update, digest */
110029 +#define ZSTD_TRACE 0
110032 +/* ---- static assert (debug) --- */
110033 +#define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)
110034 +#define ZSTD_isError ERR_isError   /* for inlining */
110035 +#define FSE_isError  ERR_isError
110036 +#define HUF_isError  ERR_isError
110039 +/*-*************************************
110040 +*  shared macros
110041 +***************************************/
110042 +#undef MIN
110043 +#undef MAX
110044 +#define MIN(a,b) ((a)<(b) ? (a) : (b))
110045 +#define MAX(a,b) ((a)>(b) ? (a) : (b))
110048 + * Ignore: this is an internal helper.
110050 + * This is a helper function to help force C99-correctness during compilation.
110051 + * Under strict compilation modes, variadic macro arguments can't be empty.
110052 + * However, variadic function arguments can be. Using a function therefore lets
110053 + * us statically check that at least one (string) argument was passed,
110054 + * independent of the compilation flags.
110055 + */
110056 +static INLINE_KEYWORD UNUSED_ATTR
110057 +void _force_has_format_string(const char *format, ...) {
110058 +  (void)format;
110062 + * Ignore: this is an internal helper.
110064 + * We want to force this function invocation to be syntactically correct, but
110065 + * we don't want to force runtime evaluation of its arguments.
110066 + */
110067 +#define _FORCE_HAS_FORMAT_STRING(...) \
110068 +  if (0) { \
110069 +    _force_has_format_string(__VA_ARGS__); \
110070 +  }
110073 + * Return the specified error if the condition evaluates to true.
110075 + * In debug modes, prints additional information.
110076 + * In order to do that (particularly, printing the conditional that failed),
110077 + * this can't just wrap RETURN_ERROR().
110078 + */
110079 +#define RETURN_ERROR_IF(cond, err, ...) \
110080 +  if (cond) { \
110081 +    RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \
110082 +           __FILE__, __LINE__, ZSTD_QUOTE(cond), ZSTD_QUOTE(ERROR(err))); \
110083 +    _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
110084 +    RAWLOG(3, ": " __VA_ARGS__); \
110085 +    RAWLOG(3, "\n"); \
110086 +    return ERROR(err); \
110087 +  }
110090 + * Unconditionally return the specified error.
110092 + * In debug modes, prints additional information.
110093 + */
110094 +#define RETURN_ERROR(err, ...) \
110095 +  do { \
110096 +    RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \
110097 +           __FILE__, __LINE__, ZSTD_QUOTE(ERROR(err))); \
110098 +    _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
110099 +    RAWLOG(3, ": " __VA_ARGS__); \
110100 +    RAWLOG(3, "\n"); \
110101 +    return ERROR(err); \
110102 +  } while(0);
110105 + * If the provided expression evaluates to an error code, returns that error code.
110107 + * In debug modes, prints additional information.
110108 + */
110109 +#define FORWARD_IF_ERROR(err, ...) \
110110 +  do { \
110111 +    size_t const err_code = (err); \
110112 +    if (ERR_isError(err_code)) { \
110113 +      RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \
110114 +             __FILE__, __LINE__, ZSTD_QUOTE(err), ERR_getErrorName(err_code)); \
110115 +      _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
110116 +      RAWLOG(3, ": " __VA_ARGS__); \
110117 +      RAWLOG(3, "\n"); \
110118 +      return err_code; \
110119 +    } \
110120 +  } while(0);
110123 +/*-*************************************
110124 +*  Common constants
110125 +***************************************/
110126 +#define ZSTD_OPT_NUM    (1<<12)
110128 +#define ZSTD_REP_NUM      3                 /* number of repcodes */
110129 +#define ZSTD_REP_MOVE     (ZSTD_REP_NUM-1)
110130 +static UNUSED_ATTR const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };
110132 +#define KB *(1 <<10)
110133 +#define MB *(1 <<20)
110134 +#define GB *(1U<<30)
110136 +#define BIT7 128
110137 +#define BIT6  64
110138 +#define BIT5  32
110139 +#define BIT4  16
110140 +#define BIT1   2
110141 +#define BIT0   1
110143 +#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
110144 +static UNUSED_ATTR const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };
110145 +static UNUSED_ATTR const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };
110147 +#define ZSTD_FRAMEIDSIZE 4   /* magic number size */
110149 +#define ZSTD_BLOCKHEADERSIZE 3   /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
110150 +static UNUSED_ATTR const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
110151 +typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
110153 +#define ZSTD_FRAMECHECKSUMSIZE 4
110155 +#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
110156 +#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */)   /* for a non-null block */
110158 +#define HufLog 12
110159 +typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;
110161 +#define LONGNBSEQ 0x7F00
110163 +#define MINMATCH 3
110165 +#define Litbits  8
110166 +#define MaxLit ((1<<Litbits) - 1)
110167 +#define MaxML   52
110168 +#define MaxLL   35
110169 +#define DefaultMaxOff 28
110170 +#define MaxOff  31
110171 +#define MaxSeq MAX(MaxLL, MaxML)   /* Assumption : MaxOff < MaxLL,MaxML */
110172 +#define MLFSELog    9
110173 +#define LLFSELog    9
110174 +#define OffFSELog   8
110175 +#define MaxFSELog  MAX(MAX(MLFSELog, LLFSELog), OffFSELog)
110177 +#define ZSTD_MAX_HUF_HEADER_SIZE 128 /* header + <= 127 byte tree description */
110178 +/* Each table cannot take more than #symbols * FSELog bits */
110179 +#define ZSTD_MAX_FSE_HEADERS_SIZE (((MaxML + 1) * MLFSELog + (MaxLL + 1) * LLFSELog + (MaxOff + 1) * OffFSELog + 7) / 8)
110181 +static UNUSED_ATTR const U32 LL_bits[MaxLL+1] = {
110182 +     0, 0, 0, 0, 0, 0, 0, 0,
110183 +     0, 0, 0, 0, 0, 0, 0, 0,
110184 +     1, 1, 1, 1, 2, 2, 3, 3,
110185 +     4, 6, 7, 8, 9,10,11,12,
110186 +    13,14,15,16
110188 +static UNUSED_ATTR const S16 LL_defaultNorm[MaxLL+1] = {
110189 +     4, 3, 2, 2, 2, 2, 2, 2,
110190 +     2, 2, 2, 2, 2, 1, 1, 1,
110191 +     2, 2, 2, 2, 2, 2, 2, 2,
110192 +     2, 3, 2, 1, 1, 1, 1, 1,
110193 +    -1,-1,-1,-1
110195 +#define LL_DEFAULTNORMLOG 6  /* for static allocation */
110196 +static UNUSED_ATTR const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
110198 +static UNUSED_ATTR const U32 ML_bits[MaxML+1] = {
110199 +     0, 0, 0, 0, 0, 0, 0, 0,
110200 +     0, 0, 0, 0, 0, 0, 0, 0,
110201 +     0, 0, 0, 0, 0, 0, 0, 0,
110202 +     0, 0, 0, 0, 0, 0, 0, 0,
110203 +     1, 1, 1, 1, 2, 2, 3, 3,
110204 +     4, 4, 5, 7, 8, 9,10,11,
110205 +    12,13,14,15,16
110207 +static UNUSED_ATTR const S16 ML_defaultNorm[MaxML+1] = {
110208 +     1, 4, 3, 2, 2, 2, 2, 2,
110209 +     2, 1, 1, 1, 1, 1, 1, 1,
110210 +     1, 1, 1, 1, 1, 1, 1, 1,
110211 +     1, 1, 1, 1, 1, 1, 1, 1,
110212 +     1, 1, 1, 1, 1, 1, 1, 1,
110213 +     1, 1, 1, 1, 1, 1,-1,-1,
110214 +    -1,-1,-1,-1,-1
110216 +#define ML_DEFAULTNORMLOG 6  /* for static allocation */
110217 +static UNUSED_ATTR const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
110219 +static UNUSED_ATTR const S16 OF_defaultNorm[DefaultMaxOff+1] = {
110220 +     1, 1, 1, 1, 1, 1, 2, 2,
110221 +     2, 1, 1, 1, 1, 1, 1, 1,
110222 +     1, 1, 1, 1, 1, 1, 1, 1,
110223 +    -1,-1,-1,-1,-1
110225 +#define OF_DEFAULTNORMLOG 5  /* for static allocation */
110226 +static UNUSED_ATTR const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
110229 +/*-*******************************************
110230 +*  Shared functions to include for inlining
110231 +*********************************************/
110232 +static void ZSTD_copy8(void* dst, const void* src) {
110233 +    ZSTD_memcpy(dst, src, 8);
110236 +#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
110237 +static void ZSTD_copy16(void* dst, const void* src) {
110238 +    ZSTD_memcpy(dst, src, 16);
110240 +#define COPY16(d,s) { ZSTD_copy16(d,s); d+=16; s+=16; }
110242 +#define WILDCOPY_OVERLENGTH 32
110243 +#define WILDCOPY_VECLEN 16
110245 +typedef enum {
110246 +    ZSTD_no_overlap,
110247 +    ZSTD_overlap_src_before_dst
110248 +    /*  ZSTD_overlap_dst_before_src, */
110249 +} ZSTD_overlap_e;
110251 +/*! ZSTD_wildcopy() :
110252 + *  Custom version of ZSTD_memcpy(), can over read/write up to WILDCOPY_OVERLENGTH bytes (if length==0)
110253 + *  @param ovtype controls the overlap detection
110254 + *         - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
110255 + *         - ZSTD_overlap_src_before_dst: The src and dst may overlap, but they MUST be at least 8 bytes apart.
110256 + *           The src buffer must be before the dst buffer.
110257 + */
110258 +MEM_STATIC FORCE_INLINE_ATTR
110259 +void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e const ovtype)
110261 +    ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;
110262 +    const BYTE* ip = (const BYTE*)src;
110263 +    BYTE* op = (BYTE*)dst;
110264 +    BYTE* const oend = op + length;
110266 +    assert(diff >= 8 || (ovtype == ZSTD_no_overlap && diff <= -WILDCOPY_VECLEN));
110268 +    if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {
110269 +        /* Handle short offset copies. */
110270 +        do {
110271 +            COPY8(op, ip)
110272 +        } while (op < oend);
110273 +    } else {
110274 +        assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);
110275 +        /* Separate out the first COPY16() call because the copy length is
110276 +         * almost certain to be short, so the branches have different
110277 +         * probabilities. Since it is almost certain to be short, only do
110278 +         * one COPY16() in the first call. Then, do two calls per loop since
110279 +         * at that point it is more likely to have a high trip count.
110280 +         */
110281 +#ifdef __aarch64__
110282 +        do {
110283 +            COPY16(op, ip);
110284 +        }
110285 +        while (op < oend);
110286 +#else
110287 +        ZSTD_copy16(op, ip);
110288 +        if (16 >= length) return;
110289 +        op += 16;
110290 +        ip += 16;
110291 +        do {
110292 +            COPY16(op, ip);
110293 +            COPY16(op, ip);
110294 +        }
110295 +        while (op < oend);
110296 +#endif
110297 +    }
110300 +MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
110302 +    size_t const length = MIN(dstCapacity, srcSize);
110303 +    if (length > 0) {
110304 +        ZSTD_memcpy(dst, src, length);
110305 +    }
110306 +    return length;
110309 +/* define "workspace is too large" as this number of times larger than needed */
110310 +#define ZSTD_WORKSPACETOOLARGE_FACTOR 3
110312 +/* when workspace is continuously too large
110313 + * during at least this number of times,
110314 + * context's memory usage is considered wasteful,
110315 + * because it's sized to handle a worst case scenario which rarely happens.
110316 + * In which case, resize it down to free some memory */
110317 +#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128
110319 +/* Controls whether the input/output buffer is buffered or stable. */
110320 +typedef enum {
110321 +    ZSTD_bm_buffered = 0,  /* Buffer the input/output */
110322 +    ZSTD_bm_stable = 1     /* ZSTD_inBuffer/ZSTD_outBuffer is stable */
110323 +} ZSTD_bufferMode_e;
110326 +/*-*******************************************
110327 +*  Private declarations
110328 +*********************************************/
110329 +typedef struct seqDef_s {
110330 +    U32 offset;         /* Offset code of the sequence */
110331 +    U16 litLength;
110332 +    U16 matchLength;
110333 +} seqDef;
110335 +typedef struct {
110336 +    seqDef* sequencesStart;
110337 +    seqDef* sequences;      /* ptr to end of sequences */
110338 +    BYTE* litStart;
110339 +    BYTE* lit;              /* ptr to end of literals */
110340 +    BYTE* llCode;
110341 +    BYTE* mlCode;
110342 +    BYTE* ofCode;
110343 +    size_t maxNbSeq;
110344 +    size_t maxNbLit;
110346 +    /* longLengthPos and longLengthID to allow us to represent either a single litLength or matchLength
110347 +     * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment
110348 +     * the existing value of the litLength or matchLength by 0x10000.
110349 +     */
110350 +    U32   longLengthID;   /* 0 == no longLength; 1 == Represent the long literal; 2 == Represent the long match; */
110351 +    U32   longLengthPos;  /* Index of the sequence to apply long length modification to */
110352 +} seqStore_t;
110354 +typedef struct {
110355 +    U32 litLength;
110356 +    U32 matchLength;
110357 +} ZSTD_sequenceLength;
110360 + * Returns the ZSTD_sequenceLength for the given sequences. It handles the decoding of long sequences
110361 + * indicated by longLengthPos and longLengthID, and adds MINMATCH back to matchLength.
110362 + */
110363 +MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore, seqDef const* seq)
110365 +    ZSTD_sequenceLength seqLen;
110366 +    seqLen.litLength = seq->litLength;
110367 +    seqLen.matchLength = seq->matchLength + MINMATCH;
110368 +    if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) {
110369 +        if (seqStore->longLengthID == 1) {
110370 +            seqLen.litLength += 0xFFFF;
110371 +        }
110372 +        if (seqStore->longLengthID == 2) {
110373 +            seqLen.matchLength += 0xFFFF;
110374 +        }
110375 +    }
110376 +    return seqLen;
110380 + * Contains the compressed frame size and an upper-bound for the decompressed frame size.
110381 + * Note: before using `compressedSize`, check for errors using ZSTD_isError().
110382 + *       similarly, before using `decompressedBound`, check for errors using:
110383 + *          `decompressedBound != ZSTD_CONTENTSIZE_ERROR`
110384 + */
110385 +typedef struct {
110386 +    size_t compressedSize;
110387 +    unsigned long long decompressedBound;
110388 +} ZSTD_frameSizeInfo;   /* decompress & legacy */
110390 +const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx);   /* compress & dictBuilder */
110391 +void ZSTD_seqToCodes(const seqStore_t* seqStorePtr);   /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
110393 +/* custom memory allocation functions */
110394 +void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem);
110395 +void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem);
110396 +void ZSTD_customFree(void* ptr, ZSTD_customMem customMem);
110399 +MEM_STATIC U32 ZSTD_highbit32(U32 val)   /* compress, dictBuilder, decodeCorpus */
110401 +    assert(val != 0);
110402 +    {
110403 +#   if (__GNUC__ >= 3)   /* GCC Intrinsic */
110404 +        return __builtin_clz (val) ^ 31;
110405 +#   else   /* Software version */
110406 +        static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
110407 +        U32 v = val;
110408 +        v |= v >> 1;
110409 +        v |= v >> 2;
110410 +        v |= v >> 4;
110411 +        v |= v >> 8;
110412 +        v |= v >> 16;
110413 +        return DeBruijnClz[(v * 0x07C4ACDDU) >> 27];
110414 +#   endif
110415 +    }
110419 +/* ZSTD_invalidateRepCodes() :
110420 + * ensures next compression will not use repcodes from previous block.
110421 + * Note : only works with regular variant;
110422 + *        do not use with extDict variant ! */
110423 +void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx);   /* zstdmt, adaptive_compression (shouldn't get this definition from here) */
110426 +typedef struct {
110427 +    blockType_e blockType;
110428 +    U32 lastBlock;
110429 +    U32 origSize;
110430 +} blockProperties_t;   /* declared here for decompress and fullbench */
110432 +/*! ZSTD_getcBlockSize() :
110433 + *  Provides the size of compressed block from block header `src` */
110434 +/* Used by: decompress, fullbench (does not get its definition from here) */
110435 +size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
110436 +                          blockProperties_t* bpPtr);
110438 +/*! ZSTD_decodeSeqHeaders() :
110439 + *  decode sequence header from src */
110440 +/* Used by: decompress, fullbench (does not get its definition from here) */
110441 +size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
110442 +                       const void* src, size_t srcSize);
110446 +#endif   /* ZSTD_CCOMMON_H_MODULE */
110447 diff --git a/lib/zstd/compress.c b/lib/zstd/compress.c
110448 deleted file mode 100644
110449 index b080264ed3ad..000000000000
110450 --- a/lib/zstd/compress.c
110451 +++ /dev/null
110452 @@ -1,3485 +0,0 @@
110454 - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
110455 - * All rights reserved.
110457 - * This source code is licensed under the BSD-style license found in the
110458 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
110459 - * An additional grant of patent rights can be found in the PATENTS file in the
110460 - * same directory.
110462 - * This program is free software; you can redistribute it and/or modify it under
110463 - * the terms of the GNU General Public License version 2 as published by the
110464 - * Free Software Foundation. This program is dual-licensed; you may select
110465 - * either version 2 of the GNU General Public License ("GPL") or BSD license
110466 - * ("BSD").
110467 - */
110469 -/*-*************************************
110470 -*  Dependencies
110471 -***************************************/
110472 -#include "fse.h"
110473 -#include "huf.h"
110474 -#include "mem.h"
110475 -#include "zstd_internal.h" /* includes zstd.h */
110476 -#include <linux/kernel.h>
110477 -#include <linux/module.h>
110478 -#include <linux/string.h> /* memset */
110480 -/*-*************************************
110481 -*  Constants
110482 -***************************************/
110483 -static const U32 g_searchStrength = 8; /* control skip over incompressible data */
110484 -#define HASH_READ_SIZE 8
110485 -typedef enum { ZSTDcs_created = 0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
110487 -/*-*************************************
110488 -*  Helper functions
110489 -***************************************/
110490 -size_t ZSTD_compressBound(size_t srcSize) { return FSE_compressBound(srcSize) + 12; }
110492 -/*-*************************************
110493 -*  Sequence storage
110494 -***************************************/
110495 -static void ZSTD_resetSeqStore(seqStore_t *ssPtr)
110497 -       ssPtr->lit = ssPtr->litStart;
110498 -       ssPtr->sequences = ssPtr->sequencesStart;
110499 -       ssPtr->longLengthID = 0;
110502 -/*-*************************************
110503 -*  Context memory management
110504 -***************************************/
110505 -struct ZSTD_CCtx_s {
110506 -       const BYTE *nextSrc;  /* next block here to continue on curr prefix */
110507 -       const BYTE *base;     /* All regular indexes relative to this position */
110508 -       const BYTE *dictBase; /* extDict indexes relative to this position */
110509 -       U32 dictLimit;  /* below that point, need extDict */
110510 -       U32 lowLimit;    /* below that point, no more data */
110511 -       U32 nextToUpdate;     /* index from which to continue dictionary update */
110512 -       U32 nextToUpdate3;    /* index from which to continue dictionary update */
110513 -       U32 hashLog3;    /* dispatch table : larger == faster, more memory */
110514 -       U32 loadedDictEnd;    /* index of end of dictionary */
110515 -       U32 forceWindow;      /* force back-references to respect limit of 1<<wLog, even for dictionary */
110516 -       U32 forceRawDict;     /* Force loading dictionary in "content-only" mode (no header analysis) */
110517 -       ZSTD_compressionStage_e stage;
110518 -       U32 rep[ZSTD_REP_NUM];
110519 -       U32 repToConfirm[ZSTD_REP_NUM];
110520 -       U32 dictID;
110521 -       ZSTD_parameters params;
110522 -       void *workSpace;
110523 -       size_t workSpaceSize;
110524 -       size_t blockSize;
110525 -       U64 frameContentSize;
110526 -       struct xxh64_state xxhState;
110527 -       ZSTD_customMem customMem;
110529 -       seqStore_t seqStore; /* sequences storage ptrs */
110530 -       U32 *hashTable;
110531 -       U32 *hashTable3;
110532 -       U32 *chainTable;
110533 -       HUF_CElt *hufTable;
110534 -       U32 flagStaticTables;
110535 -       HUF_repeat flagStaticHufTable;
110536 -       FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
110537 -       FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
110538 -       FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
110539 -       unsigned tmpCounters[HUF_COMPRESS_WORKSPACE_SIZE_U32];
110542 -size_t ZSTD_CCtxWorkspaceBound(ZSTD_compressionParameters cParams)
110544 -       size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << cParams.windowLog);
110545 -       U32 const divider = (cParams.searchLength == 3) ? 3 : 4;
110546 -       size_t const maxNbSeq = blockSize / divider;
110547 -       size_t const tokenSpace = blockSize + 11 * maxNbSeq;
110548 -       size_t const chainSize = (cParams.strategy == ZSTD_fast) ? 0 : (1 << cParams.chainLog);
110549 -       size_t const hSize = ((size_t)1) << cParams.hashLog;
110550 -       U32 const hashLog3 = (cParams.searchLength > 3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, cParams.windowLog);
110551 -       size_t const h3Size = ((size_t)1) << hashLog3;
110552 -       size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
110553 -       size_t const optSpace =
110554 -           ((MaxML + 1) + (MaxLL + 1) + (MaxOff + 1) + (1 << Litbits)) * sizeof(U32) + (ZSTD_OPT_NUM + 1) * (sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t));
110555 -       size_t const workspaceSize = tableSpace + (256 * sizeof(U32)) /* huffTable */ + tokenSpace +
110556 -                                    (((cParams.strategy == ZSTD_btopt) || (cParams.strategy == ZSTD_btopt2)) ? optSpace : 0);
110558 -       return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_CCtx)) + ZSTD_ALIGN(workspaceSize);
110561 -static ZSTD_CCtx *ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
110563 -       ZSTD_CCtx *cctx;
110564 -       if (!customMem.customAlloc || !customMem.customFree)
110565 -               return NULL;
110566 -       cctx = (ZSTD_CCtx *)ZSTD_malloc(sizeof(ZSTD_CCtx), customMem);
110567 -       if (!cctx)
110568 -               return NULL;
110569 -       memset(cctx, 0, sizeof(ZSTD_CCtx));
110570 -       cctx->customMem = customMem;
110571 -       return cctx;
110574 -ZSTD_CCtx *ZSTD_initCCtx(void *workspace, size_t workspaceSize)
110576 -       ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
110577 -       ZSTD_CCtx *cctx = ZSTD_createCCtx_advanced(stackMem);
110578 -       if (cctx) {
110579 -               cctx->workSpace = ZSTD_stackAllocAll(cctx->customMem.opaque, &cctx->workSpaceSize);
110580 -       }
110581 -       return cctx;
110584 -size_t ZSTD_freeCCtx(ZSTD_CCtx *cctx)
110586 -       if (cctx == NULL)
110587 -               return 0; /* support free on NULL */
110588 -       ZSTD_free(cctx->workSpace, cctx->customMem);
110589 -       ZSTD_free(cctx, cctx->customMem);
110590 -       return 0; /* reserved as a potential error code in the future */
110593 -const seqStore_t *ZSTD_getSeqStore(const ZSTD_CCtx *ctx) /* hidden interface */ { return &(ctx->seqStore); }
110595 -static ZSTD_parameters ZSTD_getParamsFromCCtx(const ZSTD_CCtx *cctx) { return cctx->params; }
110597 -/** ZSTD_checkParams() :
110598 -       ensure param values remain within authorized range.
110599 -       @return : 0, or an error code if one value is beyond authorized range */
110600 -size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
110602 -#define CLAMPCHECK(val, min, max)                                       \
110603 -       {                                                               \
110604 -               if ((val < min) | (val > max))                          \
110605 -                       return ERROR(compressionParameter_unsupported); \
110606 -       }
110607 -       CLAMPCHECK(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
110608 -       CLAMPCHECK(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
110609 -       CLAMPCHECK(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
110610 -       CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
110611 -       CLAMPCHECK(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
110612 -       CLAMPCHECK(cParams.targetLength, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX);
110613 -       if ((U32)(cParams.strategy) > (U32)ZSTD_btopt2)
110614 -               return ERROR(compressionParameter_unsupported);
110615 -       return 0;
110618 -/** ZSTD_cycleLog() :
110619 - *  condition for correct operation : hashLog > 1 */
110620 -static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
110622 -       U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
110623 -       return hashLog - btScale;
110626 -/** ZSTD_adjustCParams() :
110627 -       optimize `cPar` for a given input (`srcSize` and `dictSize`).
110628 -       mostly downsizing to reduce memory consumption and initialization.
110629 -       Both `srcSize` and `dictSize` are optional (use 0 if unknown),
110630 -       but if both are 0, no optimization can be done.
110631 -       Note : cPar is considered validated at this stage. Use ZSTD_checkParams() to ensure that. */
110632 -ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize)
110634 -       if (srcSize + dictSize == 0)
110635 -               return cPar; /* no size information available : no adjustment */
110637 -       /* resize params, to use less memory when necessary */
110638 -       {
110639 -               U32 const minSrcSize = (srcSize == 0) ? 500 : 0;
110640 -               U64 const rSize = srcSize + dictSize + minSrcSize;
110641 -               if (rSize < ((U64)1 << ZSTD_WINDOWLOG_MAX)) {
110642 -                       U32 const srcLog = MAX(ZSTD_HASHLOG_MIN, ZSTD_highbit32((U32)(rSize)-1) + 1);
110643 -                       if (cPar.windowLog > srcLog)
110644 -                               cPar.windowLog = srcLog;
110645 -               }
110646 -       }
110647 -       if (cPar.hashLog > cPar.windowLog)
110648 -               cPar.hashLog = cPar.windowLog;
110649 -       {
110650 -               U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
110651 -               if (cycleLog > cPar.windowLog)
110652 -                       cPar.chainLog -= (cycleLog - cPar.windowLog);
110653 -       }
110655 -       if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
110656 -               cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* required for frame header */
110658 -       return cPar;
110661 -static U32 ZSTD_equivalentParams(ZSTD_parameters param1, ZSTD_parameters param2)
110663 -       return (param1.cParams.hashLog == param2.cParams.hashLog) & (param1.cParams.chainLog == param2.cParams.chainLog) &
110664 -              (param1.cParams.strategy == param2.cParams.strategy) & ((param1.cParams.searchLength == 3) == (param2.cParams.searchLength == 3));
110667 -/*! ZSTD_continueCCtx() :
110668 -       reuse CCtx without reset (note : requires no dictionary) */
110669 -static size_t ZSTD_continueCCtx(ZSTD_CCtx *cctx, ZSTD_parameters params, U64 frameContentSize)
110671 -       U32 const end = (U32)(cctx->nextSrc - cctx->base);
110672 -       cctx->params = params;
110673 -       cctx->frameContentSize = frameContentSize;
110674 -       cctx->lowLimit = end;
110675 -       cctx->dictLimit = end;
110676 -       cctx->nextToUpdate = end + 1;
110677 -       cctx->stage = ZSTDcs_init;
110678 -       cctx->dictID = 0;
110679 -       cctx->loadedDictEnd = 0;
110680 -       {
110681 -               int i;
110682 -               for (i = 0; i < ZSTD_REP_NUM; i++)
110683 -                       cctx->rep[i] = repStartValue[i];
110684 -       }
110685 -       cctx->seqStore.litLengthSum = 0; /* force reset of btopt stats */
110686 -       xxh64_reset(&cctx->xxhState, 0);
110687 -       return 0;
110690 -typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset, ZSTDcrp_fullReset } ZSTD_compResetPolicy_e;
110692 -/*! ZSTD_resetCCtx_advanced() :
110693 -       note : `params` must be validated */
110694 -static size_t ZSTD_resetCCtx_advanced(ZSTD_CCtx *zc, ZSTD_parameters params, U64 frameContentSize, ZSTD_compResetPolicy_e const crp)
110696 -       if (crp == ZSTDcrp_continue)
110697 -               if (ZSTD_equivalentParams(params, zc->params)) {
110698 -                       zc->flagStaticTables = 0;
110699 -                       zc->flagStaticHufTable = HUF_repeat_none;
110700 -                       return ZSTD_continueCCtx(zc, params, frameContentSize);
110701 -               }
110703 -       {
110704 -               size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << params.cParams.windowLog);
110705 -               U32 const divider = (params.cParams.searchLength == 3) ? 3 : 4;
110706 -               size_t const maxNbSeq = blockSize / divider;
110707 -               size_t const tokenSpace = blockSize + 11 * maxNbSeq;
110708 -               size_t const chainSize = (params.cParams.strategy == ZSTD_fast) ? 0 : (1 << params.cParams.chainLog);
110709 -               size_t const hSize = ((size_t)1) << params.cParams.hashLog;
110710 -               U32 const hashLog3 = (params.cParams.searchLength > 3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, params.cParams.windowLog);
110711 -               size_t const h3Size = ((size_t)1) << hashLog3;
110712 -               size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
110713 -               void *ptr;
110715 -               /* Check if workSpace is large enough, alloc a new one if needed */
110716 -               {
110717 -                       size_t const optSpace = ((MaxML + 1) + (MaxLL + 1) + (MaxOff + 1) + (1 << Litbits)) * sizeof(U32) +
110718 -                                               (ZSTD_OPT_NUM + 1) * (sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t));
110719 -                       size_t const neededSpace = tableSpace + (256 * sizeof(U32)) /* huffTable */ + tokenSpace +
110720 -                                                  (((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) ? optSpace : 0);
110721 -                       if (zc->workSpaceSize < neededSpace) {
110722 -                               ZSTD_free(zc->workSpace, zc->customMem);
110723 -                               zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
110724 -                               if (zc->workSpace == NULL)
110725 -                                       return ERROR(memory_allocation);
110726 -                               zc->workSpaceSize = neededSpace;
110727 -                       }
110728 -               }
110730 -               if (crp != ZSTDcrp_noMemset)
110731 -                       memset(zc->workSpace, 0, tableSpace); /* reset tables only */
110732 -               xxh64_reset(&zc->xxhState, 0);
110733 -               zc->hashLog3 = hashLog3;
110734 -               zc->hashTable = (U32 *)(zc->workSpace);
110735 -               zc->chainTable = zc->hashTable + hSize;
110736 -               zc->hashTable3 = zc->chainTable + chainSize;
110737 -               ptr = zc->hashTable3 + h3Size;
110738 -               zc->hufTable = (HUF_CElt *)ptr;
110739 -               zc->flagStaticTables = 0;
110740 -               zc->flagStaticHufTable = HUF_repeat_none;
110741 -               ptr = ((U32 *)ptr) + 256; /* note : HUF_CElt* is incomplete type, size is simulated using U32 */
110743 -               zc->nextToUpdate = 1;
110744 -               zc->nextSrc = NULL;
110745 -               zc->base = NULL;
110746 -               zc->dictBase = NULL;
110747 -               zc->dictLimit = 0;
110748 -               zc->lowLimit = 0;
110749 -               zc->params = params;
110750 -               zc->blockSize = blockSize;
110751 -               zc->frameContentSize = frameContentSize;
110752 -               {
110753 -                       int i;
110754 -                       for (i = 0; i < ZSTD_REP_NUM; i++)
110755 -                               zc->rep[i] = repStartValue[i];
110756 -               }
110758 -               if ((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) {
110759 -                       zc->seqStore.litFreq = (U32 *)ptr;
110760 -                       zc->seqStore.litLengthFreq = zc->seqStore.litFreq + (1 << Litbits);
110761 -                       zc->seqStore.matchLengthFreq = zc->seqStore.litLengthFreq + (MaxLL + 1);
110762 -                       zc->seqStore.offCodeFreq = zc->seqStore.matchLengthFreq + (MaxML + 1);
110763 -                       ptr = zc->seqStore.offCodeFreq + (MaxOff + 1);
110764 -                       zc->seqStore.matchTable = (ZSTD_match_t *)ptr;
110765 -                       ptr = zc->seqStore.matchTable + ZSTD_OPT_NUM + 1;
110766 -                       zc->seqStore.priceTable = (ZSTD_optimal_t *)ptr;
110767 -                       ptr = zc->seqStore.priceTable + ZSTD_OPT_NUM + 1;
110768 -                       zc->seqStore.litLengthSum = 0;
110769 -               }
110770 -               zc->seqStore.sequencesStart = (seqDef *)ptr;
110771 -               ptr = zc->seqStore.sequencesStart + maxNbSeq;
110772 -               zc->seqStore.llCode = (BYTE *)ptr;
110773 -               zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq;
110774 -               zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq;
110775 -               zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq;
110777 -               zc->stage = ZSTDcs_init;
110778 -               zc->dictID = 0;
110779 -               zc->loadedDictEnd = 0;
110781 -               return 0;
110782 -       }
110785 -/* ZSTD_invalidateRepCodes() :
110786 - * ensures next compression will not use repcodes from previous block.
110787 - * Note : only works with regular variant;
110788 - *        do not use with extDict variant ! */
110789 -void ZSTD_invalidateRepCodes(ZSTD_CCtx *cctx)
110791 -       int i;
110792 -       for (i = 0; i < ZSTD_REP_NUM; i++)
110793 -               cctx->rep[i] = 0;
110796 -/*! ZSTD_copyCCtx() :
110797 -*   Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
110798 -*   Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
110799 -*   @return : 0, or an error code */
110800 -size_t ZSTD_copyCCtx(ZSTD_CCtx *dstCCtx, const ZSTD_CCtx *srcCCtx, unsigned long long pledgedSrcSize)
110802 -       if (srcCCtx->stage != ZSTDcs_init)
110803 -               return ERROR(stage_wrong);
110805 -       memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
110806 -       {
110807 -               ZSTD_parameters params = srcCCtx->params;
110808 -               params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
110809 -               ZSTD_resetCCtx_advanced(dstCCtx, params, pledgedSrcSize, ZSTDcrp_noMemset);
110810 -       }
110812 -       /* copy tables */
110813 -       {
110814 -               size_t const chainSize = (srcCCtx->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << srcCCtx->params.cParams.chainLog);
110815 -               size_t const hSize = ((size_t)1) << srcCCtx->params.cParams.hashLog;
110816 -               size_t const h3Size = (size_t)1 << srcCCtx->hashLog3;
110817 -               size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
110818 -               memcpy(dstCCtx->workSpace, srcCCtx->workSpace, tableSpace);
110819 -       }
110821 -       /* copy dictionary offsets */
110822 -       dstCCtx->nextToUpdate = srcCCtx->nextToUpdate;
110823 -       dstCCtx->nextToUpdate3 = srcCCtx->nextToUpdate3;
110824 -       dstCCtx->nextSrc = srcCCtx->nextSrc;
110825 -       dstCCtx->base = srcCCtx->base;
110826 -       dstCCtx->dictBase = srcCCtx->dictBase;
110827 -       dstCCtx->dictLimit = srcCCtx->dictLimit;
110828 -       dstCCtx->lowLimit = srcCCtx->lowLimit;
110829 -       dstCCtx->loadedDictEnd = srcCCtx->loadedDictEnd;
110830 -       dstCCtx->dictID = srcCCtx->dictID;
110832 -       /* copy entropy tables */
110833 -       dstCCtx->flagStaticTables = srcCCtx->flagStaticTables;
110834 -       dstCCtx->flagStaticHufTable = srcCCtx->flagStaticHufTable;
110835 -       if (srcCCtx->flagStaticTables) {
110836 -               memcpy(dstCCtx->litlengthCTable, srcCCtx->litlengthCTable, sizeof(dstCCtx->litlengthCTable));
110837 -               memcpy(dstCCtx->matchlengthCTable, srcCCtx->matchlengthCTable, sizeof(dstCCtx->matchlengthCTable));
110838 -               memcpy(dstCCtx->offcodeCTable, srcCCtx->offcodeCTable, sizeof(dstCCtx->offcodeCTable));
110839 -       }
110840 -       if (srcCCtx->flagStaticHufTable) {
110841 -               memcpy(dstCCtx->hufTable, srcCCtx->hufTable, 256 * 4);
110842 -       }
110844 -       return 0;
110847 -/*! ZSTD_reduceTable() :
110848 -*   reduce table indexes by `reducerValue` */
110849 -static void ZSTD_reduceTable(U32 *const table, U32 const size, U32 const reducerValue)
110851 -       U32 u;
110852 -       for (u = 0; u < size; u++) {
110853 -               if (table[u] < reducerValue)
110854 -                       table[u] = 0;
110855 -               else
110856 -                       table[u] -= reducerValue;
110857 -       }
110860 -/*! ZSTD_reduceIndex() :
110861 -*   rescale all indexes to avoid future overflow (indexes are U32) */
110862 -static void ZSTD_reduceIndex(ZSTD_CCtx *zc, const U32 reducerValue)
110864 -       {
110865 -               U32 const hSize = 1 << zc->params.cParams.hashLog;
110866 -               ZSTD_reduceTable(zc->hashTable, hSize, reducerValue);
110867 -       }
110869 -       {
110870 -               U32 const chainSize = (zc->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << zc->params.cParams.chainLog);
110871 -               ZSTD_reduceTable(zc->chainTable, chainSize, reducerValue);
110872 -       }
110874 -       {
110875 -               U32 const h3Size = (zc->hashLog3) ? 1 << zc->hashLog3 : 0;
110876 -               ZSTD_reduceTable(zc->hashTable3, h3Size, reducerValue);
110877 -       }
110880 -/*-*******************************************************
110881 -*  Block entropic compression
110882 -*********************************************************/
110884 -/* See doc/zstd_compression_format.md for detailed format description */
110886 -size_t ZSTD_noCompressBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
110888 -       if (srcSize + ZSTD_blockHeaderSize > dstCapacity)
110889 -               return ERROR(dstSize_tooSmall);
110890 -       memcpy((BYTE *)dst + ZSTD_blockHeaderSize, src, srcSize);
110891 -       ZSTD_writeLE24(dst, (U32)(srcSize << 2) + (U32)bt_raw);
110892 -       return ZSTD_blockHeaderSize + srcSize;
110895 -static size_t ZSTD_noCompressLiterals(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
110897 -       BYTE *const ostart = (BYTE * const)dst;
110898 -       U32 const flSize = 1 + (srcSize > 31) + (srcSize > 4095);
110900 -       if (srcSize + flSize > dstCapacity)
110901 -               return ERROR(dstSize_tooSmall);
110903 -       switch (flSize) {
110904 -       case 1: /* 2 - 1 - 5 */ ostart[0] = (BYTE)((U32)set_basic + (srcSize << 3)); break;
110905 -       case 2: /* 2 - 2 - 12 */ ZSTD_writeLE16(ostart, (U16)((U32)set_basic + (1 << 2) + (srcSize << 4))); break;
110906 -       default: /*note : should not be necessary : flSize is within {1,2,3} */
110907 -       case 3: /* 2 - 2 - 20 */ ZSTD_writeLE32(ostart, (U32)((U32)set_basic + (3 << 2) + (srcSize << 4))); break;
110908 -       }
110910 -       memcpy(ostart + flSize, src, srcSize);
110911 -       return srcSize + flSize;
110914 -static size_t ZSTD_compressRleLiteralsBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
110916 -       BYTE *const ostart = (BYTE * const)dst;
110917 -       U32 const flSize = 1 + (srcSize > 31) + (srcSize > 4095);
110919 -       (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */
110921 -       switch (flSize) {
110922 -       case 1: /* 2 - 1 - 5 */ ostart[0] = (BYTE)((U32)set_rle + (srcSize << 3)); break;
110923 -       case 2: /* 2 - 2 - 12 */ ZSTD_writeLE16(ostart, (U16)((U32)set_rle + (1 << 2) + (srcSize << 4))); break;
110924 -       default: /*note : should not be necessary : flSize is necessarily within {1,2,3} */
110925 -       case 3: /* 2 - 2 - 20 */ ZSTD_writeLE32(ostart, (U32)((U32)set_rle + (3 << 2) + (srcSize << 4))); break;
110926 -       }
110928 -       ostart[flSize] = *(const BYTE *)src;
110929 -       return flSize + 1;
110932 -static size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 2; }
110934 -static size_t ZSTD_compressLiterals(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
110936 -       size_t const minGain = ZSTD_minGain(srcSize);
110937 -       size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
110938 -       BYTE *const ostart = (BYTE *)dst;
110939 -       U32 singleStream = srcSize < 256;
110940 -       symbolEncodingType_e hType = set_compressed;
110941 -       size_t cLitSize;
110943 -/* small ? don't even attempt compression (speed opt) */
110944 -#define LITERAL_NOENTROPY 63
110945 -       {
110946 -               size_t const minLitSize = zc->flagStaticHufTable == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY;
110947 -               if (srcSize <= minLitSize)
110948 -                       return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
110949 -       }
110951 -       if (dstCapacity < lhSize + 1)
110952 -               return ERROR(dstSize_tooSmall); /* not enough space for compression */
110953 -       {
110954 -               HUF_repeat repeat = zc->flagStaticHufTable;
110955 -               int const preferRepeat = zc->params.cParams.strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
110956 -               if (repeat == HUF_repeat_valid && lhSize == 3)
110957 -                       singleStream = 1;
110958 -               cLitSize = singleStream ? HUF_compress1X_repeat(ostart + lhSize, dstCapacity - lhSize, src, srcSize, 255, 11, zc->tmpCounters,
110959 -                                                               sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat)
110960 -                                       : HUF_compress4X_repeat(ostart + lhSize, dstCapacity - lhSize, src, srcSize, 255, 11, zc->tmpCounters,
110961 -                                                               sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat);
110962 -               if (repeat != HUF_repeat_none) {
110963 -                       hType = set_repeat;
110964 -               } /* reused the existing table */
110965 -               else {
110966 -                       zc->flagStaticHufTable = HUF_repeat_check;
110967 -               } /* now have a table to reuse */
110968 -       }
110970 -       if ((cLitSize == 0) | (cLitSize >= srcSize - minGain)) {
110971 -               zc->flagStaticHufTable = HUF_repeat_none;
110972 -               return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
110973 -       }
110974 -       if (cLitSize == 1) {
110975 -               zc->flagStaticHufTable = HUF_repeat_none;
110976 -               return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
110977 -       }
110979 -       /* Build header */
110980 -       switch (lhSize) {
110981 -       case 3: /* 2 - 2 - 10 - 10 */
110982 -       {
110983 -               U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 14);
110984 -               ZSTD_writeLE24(ostart, lhc);
110985 -               break;
110986 -       }
110987 -       case 4: /* 2 - 2 - 14 - 14 */
110988 -       {
110989 -               U32 const lhc = hType + (2 << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 18);
110990 -               ZSTD_writeLE32(ostart, lhc);
110991 -               break;
110992 -       }
110993 -       default: /* should not be necessary, lhSize is only {3,4,5} */
110994 -       case 5:  /* 2 - 2 - 18 - 18 */
110995 -       {
110996 -               U32 const lhc = hType + (3 << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 22);
110997 -               ZSTD_writeLE32(ostart, lhc);
110998 -               ostart[4] = (BYTE)(cLitSize >> 10);
110999 -               break;
111000 -       }
111001 -       }
111002 -       return lhSize + cLitSize;
111005 -static const BYTE LL_Code[64] = {0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 16, 16, 17, 17, 18, 18,
111006 -                                19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23,
111007 -                                23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24};
111009 -static const BYTE ML_Code[128] = {0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
111010 -                                 26, 27, 28, 29, 30, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, 38, 38, 38, 38,
111011 -                                 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
111012 -                                 40, 40, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 42, 42, 42, 42, 42, 42, 42, 42,
111013 -                                 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42};
111015 -void ZSTD_seqToCodes(const seqStore_t *seqStorePtr)
111017 -       BYTE const LL_deltaCode = 19;
111018 -       BYTE const ML_deltaCode = 36;
111019 -       const seqDef *const sequences = seqStorePtr->sequencesStart;
111020 -       BYTE *const llCodeTable = seqStorePtr->llCode;
111021 -       BYTE *const ofCodeTable = seqStorePtr->ofCode;
111022 -       BYTE *const mlCodeTable = seqStorePtr->mlCode;
111023 -       U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
111024 -       U32 u;
111025 -       for (u = 0; u < nbSeq; u++) {
111026 -               U32 const llv = sequences[u].litLength;
111027 -               U32 const mlv = sequences[u].matchLength;
111028 -               llCodeTable[u] = (llv > 63) ? (BYTE)ZSTD_highbit32(llv) + LL_deltaCode : LL_Code[llv];
111029 -               ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
111030 -               mlCodeTable[u] = (mlv > 127) ? (BYTE)ZSTD_highbit32(mlv) + ML_deltaCode : ML_Code[mlv];
111031 -       }
111032 -       if (seqStorePtr->longLengthID == 1)
111033 -               llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
111034 -       if (seqStorePtr->longLengthID == 2)
111035 -               mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
111038 -ZSTD_STATIC size_t ZSTD_compressSequences_internal(ZSTD_CCtx *zc, void *dst, size_t dstCapacity)
111040 -       const int longOffsets = zc->params.cParams.windowLog > STREAM_ACCUMULATOR_MIN;
111041 -       const seqStore_t *seqStorePtr = &(zc->seqStore);
111042 -       FSE_CTable *CTable_LitLength = zc->litlengthCTable;
111043 -       FSE_CTable *CTable_OffsetBits = zc->offcodeCTable;
111044 -       FSE_CTable *CTable_MatchLength = zc->matchlengthCTable;
111045 -       U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */
111046 -       const seqDef *const sequences = seqStorePtr->sequencesStart;
111047 -       const BYTE *const ofCodeTable = seqStorePtr->ofCode;
111048 -       const BYTE *const llCodeTable = seqStorePtr->llCode;
111049 -       const BYTE *const mlCodeTable = seqStorePtr->mlCode;
111050 -       BYTE *const ostart = (BYTE *)dst;
111051 -       BYTE *const oend = ostart + dstCapacity;
111052 -       BYTE *op = ostart;
111053 -       size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
111054 -       BYTE *seqHead;
111056 -       U32 *count;
111057 -       S16 *norm;
111058 -       U32 *workspace;
111059 -       size_t workspaceSize = sizeof(zc->tmpCounters);
111060 -       {
111061 -               size_t spaceUsed32 = 0;
111062 -               count = (U32 *)zc->tmpCounters + spaceUsed32;
111063 -               spaceUsed32 += MaxSeq + 1;
111064 -               norm = (S16 *)((U32 *)zc->tmpCounters + spaceUsed32);
111065 -               spaceUsed32 += ALIGN(sizeof(S16) * (MaxSeq + 1), sizeof(U32)) >> 2;
111067 -               workspace = (U32 *)zc->tmpCounters + spaceUsed32;
111068 -               workspaceSize -= (spaceUsed32 << 2);
111069 -       }
111071 -       /* Compress literals */
111072 -       {
111073 -               const BYTE *const literals = seqStorePtr->litStart;
111074 -               size_t const litSize = seqStorePtr->lit - literals;
111075 -               size_t const cSize = ZSTD_compressLiterals(zc, op, dstCapacity, literals, litSize);
111076 -               if (ZSTD_isError(cSize))
111077 -                       return cSize;
111078 -               op += cSize;
111079 -       }
111081 -       /* Sequences Header */
111082 -       if ((oend - op) < 3 /*max nbSeq Size*/ + 1 /*seqHead */)
111083 -               return ERROR(dstSize_tooSmall);
111084 -       if (nbSeq < 0x7F)
111085 -               *op++ = (BYTE)nbSeq;
111086 -       else if (nbSeq < LONGNBSEQ)
111087 -               op[0] = (BYTE)((nbSeq >> 8) + 0x80), op[1] = (BYTE)nbSeq, op += 2;
111088 -       else
111089 -               op[0] = 0xFF, ZSTD_writeLE16(op + 1, (U16)(nbSeq - LONGNBSEQ)), op += 3;
111090 -       if (nbSeq == 0)
111091 -               return op - ostart;
111093 -       /* seqHead : flags for FSE encoding type */
111094 -       seqHead = op++;
111096 -#define MIN_SEQ_FOR_DYNAMIC_FSE 64
111097 -#define MAX_SEQ_FOR_STATIC_FSE 1000
111099 -       /* convert length/distances into codes */
111100 -       ZSTD_seqToCodes(seqStorePtr);
111102 -       /* CTable for Literal Lengths */
111103 -       {
111104 -               U32 max = MaxLL;
111105 -               size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace);
111106 -               if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
111107 -                       *op++ = llCodeTable[0];
111108 -                       FSE_buildCTable_rle(CTable_LitLength, (BYTE)max);
111109 -                       LLtype = set_rle;
111110 -               } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
111111 -                       LLtype = set_repeat;
111112 -               } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (LL_defaultNormLog - 1)))) {
111113 -                       FSE_buildCTable_wksp(CTable_LitLength, LL_defaultNorm, MaxLL, LL_defaultNormLog, workspace, workspaceSize);
111114 -                       LLtype = set_basic;
111115 -               } else {
111116 -                       size_t nbSeq_1 = nbSeq;
111117 -                       const U32 tableLog = FSE_optimalTableLog(LLFSELog, nbSeq, max);
111118 -                       if (count[llCodeTable[nbSeq - 1]] > 1) {
111119 -                               count[llCodeTable[nbSeq - 1]]--;
111120 -                               nbSeq_1--;
111121 -                       }
111122 -                       FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
111123 -                       {
111124 -                               size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
111125 -                               if (FSE_isError(NCountSize))
111126 -                                       return NCountSize;
111127 -                               op += NCountSize;
111128 -                       }
111129 -                       FSE_buildCTable_wksp(CTable_LitLength, norm, max, tableLog, workspace, workspaceSize);
111130 -                       LLtype = set_compressed;
111131 -               }
111132 -       }
111134 -       /* CTable for Offsets */
111135 -       {
111136 -               U32 max = MaxOff;
111137 -               size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace);
111138 -               if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
111139 -                       *op++ = ofCodeTable[0];
111140 -                       FSE_buildCTable_rle(CTable_OffsetBits, (BYTE)max);
111141 -                       Offtype = set_rle;
111142 -               } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
111143 -                       Offtype = set_repeat;
111144 -               } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (OF_defaultNormLog - 1)))) {
111145 -                       FSE_buildCTable_wksp(CTable_OffsetBits, OF_defaultNorm, MaxOff, OF_defaultNormLog, workspace, workspaceSize);
111146 -                       Offtype = set_basic;
111147 -               } else {
111148 -                       size_t nbSeq_1 = nbSeq;
111149 -                       const U32 tableLog = FSE_optimalTableLog(OffFSELog, nbSeq, max);
111150 -                       if (count[ofCodeTable[nbSeq - 1]] > 1) {
111151 -                               count[ofCodeTable[nbSeq - 1]]--;
111152 -                               nbSeq_1--;
111153 -                       }
111154 -                       FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
111155 -                       {
111156 -                               size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
111157 -                               if (FSE_isError(NCountSize))
111158 -                                       return NCountSize;
111159 -                               op += NCountSize;
111160 -                       }
111161 -                       FSE_buildCTable_wksp(CTable_OffsetBits, norm, max, tableLog, workspace, workspaceSize);
111162 -                       Offtype = set_compressed;
111163 -               }
111164 -       }
111166 -       /* CTable for MatchLengths */
111167 -       {
111168 -               U32 max = MaxML;
111169 -               size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace);
111170 -               if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
111171 -                       *op++ = *mlCodeTable;
111172 -                       FSE_buildCTable_rle(CTable_MatchLength, (BYTE)max);
111173 -                       MLtype = set_rle;
111174 -               } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
111175 -                       MLtype = set_repeat;
111176 -               } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (ML_defaultNormLog - 1)))) {
111177 -                       FSE_buildCTable_wksp(CTable_MatchLength, ML_defaultNorm, MaxML, ML_defaultNormLog, workspace, workspaceSize);
111178 -                       MLtype = set_basic;
111179 -               } else {
111180 -                       size_t nbSeq_1 = nbSeq;
111181 -                       const U32 tableLog = FSE_optimalTableLog(MLFSELog, nbSeq, max);
111182 -                       if (count[mlCodeTable[nbSeq - 1]] > 1) {
111183 -                               count[mlCodeTable[nbSeq - 1]]--;
111184 -                               nbSeq_1--;
111185 -                       }
111186 -                       FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
111187 -                       {
111188 -                               size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
111189 -                               if (FSE_isError(NCountSize))
111190 -                                       return NCountSize;
111191 -                               op += NCountSize;
111192 -                       }
111193 -                       FSE_buildCTable_wksp(CTable_MatchLength, norm, max, tableLog, workspace, workspaceSize);
111194 -                       MLtype = set_compressed;
111195 -               }
111196 -       }
111198 -       *seqHead = (BYTE)((LLtype << 6) + (Offtype << 4) + (MLtype << 2));
111199 -       zc->flagStaticTables = 0;
111201 -       /* Encoding Sequences */
111202 -       {
111203 -               BIT_CStream_t blockStream;
111204 -               FSE_CState_t stateMatchLength;
111205 -               FSE_CState_t stateOffsetBits;
111206 -               FSE_CState_t stateLitLength;
111208 -               CHECK_E(BIT_initCStream(&blockStream, op, oend - op), dstSize_tooSmall); /* not enough space remaining */
111210 -               /* first symbols */
111211 -               FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq - 1]);
111212 -               FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq - 1]);
111213 -               FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq - 1]);
111214 -               BIT_addBits(&blockStream, sequences[nbSeq - 1].litLength, LL_bits[llCodeTable[nbSeq - 1]]);
111215 -               if (ZSTD_32bits())
111216 -                       BIT_flushBits(&blockStream);
111217 -               BIT_addBits(&blockStream, sequences[nbSeq - 1].matchLength, ML_bits[mlCodeTable[nbSeq - 1]]);
111218 -               if (ZSTD_32bits())
111219 -                       BIT_flushBits(&blockStream);
111220 -               if (longOffsets) {
111221 -                       U32 const ofBits = ofCodeTable[nbSeq - 1];
111222 -                       int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN - 1);
111223 -                       if (extraBits) {
111224 -                               BIT_addBits(&blockStream, sequences[nbSeq - 1].offset, extraBits);
111225 -                               BIT_flushBits(&blockStream);
111226 -                       }
111227 -                       BIT_addBits(&blockStream, sequences[nbSeq - 1].offset >> extraBits, ofBits - extraBits);
111228 -               } else {
111229 -                       BIT_addBits(&blockStream, sequences[nbSeq - 1].offset, ofCodeTable[nbSeq - 1]);
111230 -               }
111231 -               BIT_flushBits(&blockStream);
111233 -               {
111234 -                       size_t n;
111235 -                       for (n = nbSeq - 2; n < nbSeq; n--) { /* intentional underflow */
111236 -                               BYTE const llCode = llCodeTable[n];
111237 -                               BYTE const ofCode = ofCodeTable[n];
111238 -                               BYTE const mlCode = mlCodeTable[n];
111239 -                               U32 const llBits = LL_bits[llCode];
111240 -                               U32 const ofBits = ofCode; /* 32b*/ /* 64b*/
111241 -                               U32 const mlBits = ML_bits[mlCode];
111242 -                               /* (7)*/                                                            /* (7)*/
111243 -                               FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */  /* 15 */
111244 -                               FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode); /* 24 */ /* 24 */
111245 -                               if (ZSTD_32bits())
111246 -                                       BIT_flushBits(&blockStream);                              /* (7)*/
111247 -                               FSE_encodeSymbol(&blockStream, &stateLitLength, llCode); /* 16 */ /* 33 */
111248 -                               if (ZSTD_32bits() || (ofBits + mlBits + llBits >= 64 - 7 - (LLFSELog + MLFSELog + OffFSELog)))
111249 -                                       BIT_flushBits(&blockStream); /* (7)*/
111250 -                               BIT_addBits(&blockStream, sequences[n].litLength, llBits);
111251 -                               if (ZSTD_32bits() && ((llBits + mlBits) > 24))
111252 -                                       BIT_flushBits(&blockStream);
111253 -                               BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
111254 -                               if (ZSTD_32bits())
111255 -                                       BIT_flushBits(&blockStream); /* (7)*/
111256 -                               if (longOffsets) {
111257 -                                       int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN - 1);
111258 -                                       if (extraBits) {
111259 -                                               BIT_addBits(&blockStream, sequences[n].offset, extraBits);
111260 -                                               BIT_flushBits(&blockStream); /* (7)*/
111261 -                                       }
111262 -                                       BIT_addBits(&blockStream, sequences[n].offset >> extraBits, ofBits - extraBits); /* 31 */
111263 -                               } else {
111264 -                                       BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */
111265 -                               }
111266 -                               BIT_flushBits(&blockStream); /* (7)*/
111267 -                       }
111268 -               }
111270 -               FSE_flushCState(&blockStream, &stateMatchLength);
111271 -               FSE_flushCState(&blockStream, &stateOffsetBits);
111272 -               FSE_flushCState(&blockStream, &stateLitLength);
111274 -               {
111275 -                       size_t const streamSize = BIT_closeCStream(&blockStream);
111276 -                       if (streamSize == 0)
111277 -                               return ERROR(dstSize_tooSmall); /* not enough space */
111278 -                       op += streamSize;
111279 -               }
111280 -       }
111281 -       return op - ostart;
111284 -ZSTD_STATIC size_t ZSTD_compressSequences(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, size_t srcSize)
111286 -       size_t const cSize = ZSTD_compressSequences_internal(zc, dst, dstCapacity);
111287 -       size_t const minGain = ZSTD_minGain(srcSize);
111288 -       size_t const maxCSize = srcSize - minGain;
111289 -       /* If the srcSize <= dstCapacity, then there is enough space to write a
111290 -        * raw uncompressed block. Since we ran out of space, the block must not
111291 -        * be compressible, so fall back to a raw uncompressed block.
111292 -        */
111293 -       int const uncompressibleError = cSize == ERROR(dstSize_tooSmall) && srcSize <= dstCapacity;
111294 -       int i;
111296 -       if (ZSTD_isError(cSize) && !uncompressibleError)
111297 -               return cSize;
111298 -       if (cSize >= maxCSize || uncompressibleError) {
111299 -               zc->flagStaticHufTable = HUF_repeat_none;
111300 -               return 0;
111301 -       }
111302 -       /* confirm repcodes */
111303 -       for (i = 0; i < ZSTD_REP_NUM; i++)
111304 -               zc->rep[i] = zc->repToConfirm[i];
111305 -       return cSize;
111308 -/*! ZSTD_storeSeq() :
111309 -       Store a sequence (literal length, literals, offset code and match length code) into seqStore_t.
111310 -       `offsetCode` : distance to match, or 0 == repCode.
111311 -       `matchCode` : matchLength - MINMATCH
111313 -ZSTD_STATIC void ZSTD_storeSeq(seqStore_t *seqStorePtr, size_t litLength, const void *literals, U32 offsetCode, size_t matchCode)
111315 -       /* copy Literals */
111316 -       ZSTD_wildcopy(seqStorePtr->lit, literals, litLength);
111317 -       seqStorePtr->lit += litLength;
111319 -       /* literal Length */
111320 -       if (litLength > 0xFFFF) {
111321 -               seqStorePtr->longLengthID = 1;
111322 -               seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
111323 -       }
111324 -       seqStorePtr->sequences[0].litLength = (U16)litLength;
111326 -       /* match offset */
111327 -       seqStorePtr->sequences[0].offset = offsetCode + 1;
111329 -       /* match Length */
111330 -       if (matchCode > 0xFFFF) {
111331 -               seqStorePtr->longLengthID = 2;
111332 -               seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
111333 -       }
111334 -       seqStorePtr->sequences[0].matchLength = (U16)matchCode;
111336 -       seqStorePtr->sequences++;
111339 -/*-*************************************
111340 -*  Match length counter
111341 -***************************************/
111342 -static unsigned ZSTD_NbCommonBytes(register size_t val)
111344 -       if (ZSTD_isLittleEndian()) {
111345 -               if (ZSTD_64bits()) {
111346 -                       return (__builtin_ctzll((U64)val) >> 3);
111347 -               } else { /* 32 bits */
111348 -                       return (__builtin_ctz((U32)val) >> 3);
111349 -               }
111350 -       } else { /* Big Endian CPU */
111351 -               if (ZSTD_64bits()) {
111352 -                       return (__builtin_clzll(val) >> 3);
111353 -               } else { /* 32 bits */
111354 -                       return (__builtin_clz((U32)val) >> 3);
111355 -               }
111356 -       }
111359 -static size_t ZSTD_count(const BYTE *pIn, const BYTE *pMatch, const BYTE *const pInLimit)
111361 -       const BYTE *const pStart = pIn;
111362 -       const BYTE *const pInLoopLimit = pInLimit - (sizeof(size_t) - 1);
111364 -       while (pIn < pInLoopLimit) {
111365 -               size_t const diff = ZSTD_readST(pMatch) ^ ZSTD_readST(pIn);
111366 -               if (!diff) {
111367 -                       pIn += sizeof(size_t);
111368 -                       pMatch += sizeof(size_t);
111369 -                       continue;
111370 -               }
111371 -               pIn += ZSTD_NbCommonBytes(diff);
111372 -               return (size_t)(pIn - pStart);
111373 -       }
111374 -       if (ZSTD_64bits())
111375 -               if ((pIn < (pInLimit - 3)) && (ZSTD_read32(pMatch) == ZSTD_read32(pIn))) {
111376 -                       pIn += 4;
111377 -                       pMatch += 4;
111378 -               }
111379 -       if ((pIn < (pInLimit - 1)) && (ZSTD_read16(pMatch) == ZSTD_read16(pIn))) {
111380 -               pIn += 2;
111381 -               pMatch += 2;
111382 -       }
111383 -       if ((pIn < pInLimit) && (*pMatch == *pIn))
111384 -               pIn++;
111385 -       return (size_t)(pIn - pStart);
111388 -/** ZSTD_count_2segments() :
111389 -*   can count match length with `ip` & `match` in 2 different segments.
111390 -*   convention : on reaching mEnd, match count continue starting from iStart
111392 -static size_t ZSTD_count_2segments(const BYTE *ip, const BYTE *match, const BYTE *iEnd, const BYTE *mEnd, const BYTE *iStart)
111394 -       const BYTE *const vEnd = MIN(ip + (mEnd - match), iEnd);
111395 -       size_t const matchLength = ZSTD_count(ip, match, vEnd);
111396 -       if (match + matchLength != mEnd)
111397 -               return matchLength;
111398 -       return matchLength + ZSTD_count(ip + matchLength, iStart, iEnd);
111401 -/*-*************************************
111402 -*  Hashes
111403 -***************************************/
111404 -static const U32 prime3bytes = 506832829U;
111405 -static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32 - 24)) * prime3bytes) >> (32 - h); }
111406 -ZSTD_STATIC size_t ZSTD_hash3Ptr(const void *ptr, U32 h) { return ZSTD_hash3(ZSTD_readLE32(ptr), h); } /* only in zstd_opt.h */
111408 -static const U32 prime4bytes = 2654435761U;
111409 -static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32 - h); }
111410 -static size_t ZSTD_hash4Ptr(const void *ptr, U32 h) { return ZSTD_hash4(ZSTD_read32(ptr), h); }
111412 -static const U64 prime5bytes = 889523592379ULL;
111413 -static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64 - 40)) * prime5bytes) >> (64 - h)); }
111414 -static size_t ZSTD_hash5Ptr(const void *p, U32 h) { return ZSTD_hash5(ZSTD_readLE64(p), h); }
111416 -static const U64 prime6bytes = 227718039650203ULL;
111417 -static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64 - 48)) * prime6bytes) >> (64 - h)); }
111418 -static size_t ZSTD_hash6Ptr(const void *p, U32 h) { return ZSTD_hash6(ZSTD_readLE64(p), h); }
111420 -static const U64 prime7bytes = 58295818150454627ULL;
111421 -static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64 - 56)) * prime7bytes) >> (64 - h)); }
111422 -static size_t ZSTD_hash7Ptr(const void *p, U32 h) { return ZSTD_hash7(ZSTD_readLE64(p), h); }
111424 -static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
111425 -static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u)*prime8bytes) >> (64 - h)); }
111426 -static size_t ZSTD_hash8Ptr(const void *p, U32 h) { return ZSTD_hash8(ZSTD_readLE64(p), h); }
111428 -static size_t ZSTD_hashPtr(const void *p, U32 hBits, U32 mls)
111430 -       switch (mls) {
111431 -       // case 3: return ZSTD_hash3Ptr(p, hBits);
111432 -       default:
111433 -       case 4: return ZSTD_hash4Ptr(p, hBits);
111434 -       case 5: return ZSTD_hash5Ptr(p, hBits);
111435 -       case 6: return ZSTD_hash6Ptr(p, hBits);
111436 -       case 7: return ZSTD_hash7Ptr(p, hBits);
111437 -       case 8: return ZSTD_hash8Ptr(p, hBits);
111438 -       }
111441 -/*-*************************************
111442 -*  Fast Scan
111443 -***************************************/
111444 -static void ZSTD_fillHashTable(ZSTD_CCtx *zc, const void *end, const U32 mls)
111446 -       U32 *const hashTable = zc->hashTable;
111447 -       U32 const hBits = zc->params.cParams.hashLog;
111448 -       const BYTE *const base = zc->base;
111449 -       const BYTE *ip = base + zc->nextToUpdate;
111450 -       const BYTE *const iend = ((const BYTE *)end) - HASH_READ_SIZE;
111451 -       const size_t fastHashFillStep = 3;
111453 -       while (ip <= iend) {
111454 -               hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base);
111455 -               ip += fastHashFillStep;
111456 -       }
111459 -FORCE_INLINE
111460 -void ZSTD_compressBlock_fast_generic(ZSTD_CCtx *cctx, const void *src, size_t srcSize, const U32 mls)
111462 -       U32 *const hashTable = cctx->hashTable;
111463 -       U32 const hBits = cctx->params.cParams.hashLog;
111464 -       seqStore_t *seqStorePtr = &(cctx->seqStore);
111465 -       const BYTE *const base = cctx->base;
111466 -       const BYTE *const istart = (const BYTE *)src;
111467 -       const BYTE *ip = istart;
111468 -       const BYTE *anchor = istart;
111469 -       const U32 lowestIndex = cctx->dictLimit;
111470 -       const BYTE *const lowest = base + lowestIndex;
111471 -       const BYTE *const iend = istart + srcSize;
111472 -       const BYTE *const ilimit = iend - HASH_READ_SIZE;
111473 -       U32 offset_1 = cctx->rep[0], offset_2 = cctx->rep[1];
111474 -       U32 offsetSaved = 0;
111476 -       /* init */
111477 -       ip += (ip == lowest);
111478 -       {
111479 -               U32 const maxRep = (U32)(ip - lowest);
111480 -               if (offset_2 > maxRep)
111481 -                       offsetSaved = offset_2, offset_2 = 0;
111482 -               if (offset_1 > maxRep)
111483 -                       offsetSaved = offset_1, offset_1 = 0;
111484 -       }
111486 -       /* Main Search Loop */
111487 -       while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
111488 -               size_t mLength;
111489 -               size_t const h = ZSTD_hashPtr(ip, hBits, mls);
111490 -               U32 const curr = (U32)(ip - base);
111491 -               U32 const matchIndex = hashTable[h];
111492 -               const BYTE *match = base + matchIndex;
111493 -               hashTable[h] = curr; /* update hash table */
111495 -               if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) {
111496 -                       mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4;
111497 -                       ip++;
111498 -                       ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
111499 -               } else {
111500 -                       U32 offset;
111501 -                       if ((matchIndex <= lowestIndex) || (ZSTD_read32(match) != ZSTD_read32(ip))) {
111502 -                               ip += ((ip - anchor) >> g_searchStrength) + 1;
111503 -                               continue;
111504 -                       }
111505 -                       mLength = ZSTD_count(ip + 4, match + 4, iend) + 4;
111506 -                       offset = (U32)(ip - match);
111507 -                       while (((ip > anchor) & (match > lowest)) && (ip[-1] == match[-1])) {
111508 -                               ip--;
111509 -                               match--;
111510 -                               mLength++;
111511 -                       } /* catch up */
111512 -                       offset_2 = offset_1;
111513 -                       offset_1 = offset;
111515 -                       ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
111516 -               }
111518 -               /* match found */
111519 -               ip += mLength;
111520 -               anchor = ip;
111522 -               if (ip <= ilimit) {
111523 -                       /* Fill Table */
111524 -                       hashTable[ZSTD_hashPtr(base + curr + 2, hBits, mls)] = curr + 2; /* here because curr+2 could be > iend-8 */
111525 -                       hashTable[ZSTD_hashPtr(ip - 2, hBits, mls)] = (U32)(ip - 2 - base);
111526 -                       /* check immediate repcode */
111527 -                       while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
111528 -                               /* store sequence */
111529 -                               size_t const rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4;
111530 -                               {
111531 -                                       U32 const tmpOff = offset_2;
111532 -                                       offset_2 = offset_1;
111533 -                                       offset_1 = tmpOff;
111534 -                               } /* swap offset_2 <=> offset_1 */
111535 -                               hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base);
111536 -                               ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength - MINMATCH);
111537 -                               ip += rLength;
111538 -                               anchor = ip;
111539 -                               continue; /* faster when present ... (?) */
111540 -                       }
111541 -               }
111542 -       }
111544 -       /* save reps for next block */
111545 -       cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
111546 -       cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
111548 -       /* Last Literals */
111549 -       {
111550 -               size_t const lastLLSize = iend - anchor;
111551 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
111552 -               seqStorePtr->lit += lastLLSize;
111553 -       }
111556 -static void ZSTD_compressBlock_fast(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
111558 -       const U32 mls = ctx->params.cParams.searchLength;
111559 -       switch (mls) {
111560 -       default: /* includes case 3 */
111561 -       case 4: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 4); return;
111562 -       case 5: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 5); return;
111563 -       case 6: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 6); return;
111564 -       case 7: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 7); return;
111565 -       }
111568 -static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 mls)
111570 -       U32 *hashTable = ctx->hashTable;
111571 -       const U32 hBits = ctx->params.cParams.hashLog;
111572 -       seqStore_t *seqStorePtr = &(ctx->seqStore);
111573 -       const BYTE *const base = ctx->base;
111574 -       const BYTE *const dictBase = ctx->dictBase;
111575 -       const BYTE *const istart = (const BYTE *)src;
111576 -       const BYTE *ip = istart;
111577 -       const BYTE *anchor = istart;
111578 -       const U32 lowestIndex = ctx->lowLimit;
111579 -       const BYTE *const dictStart = dictBase + lowestIndex;
111580 -       const U32 dictLimit = ctx->dictLimit;
111581 -       const BYTE *const lowPrefixPtr = base + dictLimit;
111582 -       const BYTE *const dictEnd = dictBase + dictLimit;
111583 -       const BYTE *const iend = istart + srcSize;
111584 -       const BYTE *const ilimit = iend - 8;
111585 -       U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
111587 -       /* Search Loop */
111588 -       while (ip < ilimit) { /* < instead of <=, because (ip+1) */
111589 -               const size_t h = ZSTD_hashPtr(ip, hBits, mls);
111590 -               const U32 matchIndex = hashTable[h];
111591 -               const BYTE *matchBase = matchIndex < dictLimit ? dictBase : base;
111592 -               const BYTE *match = matchBase + matchIndex;
111593 -               const U32 curr = (U32)(ip - base);
111594 -               const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */
111595 -               const BYTE *repBase = repIndex < dictLimit ? dictBase : base;
111596 -               const BYTE *repMatch = repBase + repIndex;
111597 -               size_t mLength;
111598 -               hashTable[h] = curr; /* update hash table */
111600 -               if ((((U32)((dictLimit - 1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) &&
111601 -                   (ZSTD_read32(repMatch) == ZSTD_read32(ip + 1))) {
111602 -                       const BYTE *repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
111603 -                       mLength = ZSTD_count_2segments(ip + 1 + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repMatchEnd, lowPrefixPtr) + EQUAL_READ32;
111604 -                       ip++;
111605 -                       ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
111606 -               } else {
111607 -                       if ((matchIndex < lowestIndex) || (ZSTD_read32(match) != ZSTD_read32(ip))) {
111608 -                               ip += ((ip - anchor) >> g_searchStrength) + 1;
111609 -                               continue;
111610 -                       }
111611 -                       {
111612 -                               const BYTE *matchEnd = matchIndex < dictLimit ? dictEnd : iend;
111613 -                               const BYTE *lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
111614 -                               U32 offset;
111615 -                               mLength = ZSTD_count_2segments(ip + EQUAL_READ32, match + EQUAL_READ32, iend, matchEnd, lowPrefixPtr) + EQUAL_READ32;
111616 -                               while (((ip > anchor) & (match > lowMatchPtr)) && (ip[-1] == match[-1])) {
111617 -                                       ip--;
111618 -                                       match--;
111619 -                                       mLength++;
111620 -                               } /* catch up */
111621 -                               offset = curr - matchIndex;
111622 -                               offset_2 = offset_1;
111623 -                               offset_1 = offset;
111624 -                               ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
111625 -                       }
111626 -               }
111628 -               /* found a match : store it */
111629 -               ip += mLength;
111630 -               anchor = ip;
111632 -               if (ip <= ilimit) {
111633 -                       /* Fill Table */
111634 -                       hashTable[ZSTD_hashPtr(base + curr + 2, hBits, mls)] = curr + 2;
111635 -                       hashTable[ZSTD_hashPtr(ip - 2, hBits, mls)] = (U32)(ip - 2 - base);
111636 -                       /* check immediate repcode */
111637 -                       while (ip <= ilimit) {
111638 -                               U32 const curr2 = (U32)(ip - base);
111639 -                               U32 const repIndex2 = curr2 - offset_2;
111640 -                               const BYTE *repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
111641 -                               if ((((U32)((dictLimit - 1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */
111642 -                                   && (ZSTD_read32(repMatch2) == ZSTD_read32(ip))) {
111643 -                                       const BYTE *const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
111644 -                                       size_t repLength2 =
111645 -                                           ZSTD_count_2segments(ip + EQUAL_READ32, repMatch2 + EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32;
111646 -                                       U32 tmpOffset = offset_2;
111647 -                                       offset_2 = offset_1;
111648 -                                       offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
111649 -                                       ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2 - MINMATCH);
111650 -                                       hashTable[ZSTD_hashPtr(ip, hBits, mls)] = curr2;
111651 -                                       ip += repLength2;
111652 -                                       anchor = ip;
111653 -                                       continue;
111654 -                               }
111655 -                               break;
111656 -                       }
111657 -               }
111658 -       }
111660 -       /* save reps for next block */
111661 -       ctx->repToConfirm[0] = offset_1;
111662 -       ctx->repToConfirm[1] = offset_2;
111664 -       /* Last Literals */
111665 -       {
111666 -               size_t const lastLLSize = iend - anchor;
111667 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
111668 -               seqStorePtr->lit += lastLLSize;
111669 -       }
111672 -static void ZSTD_compressBlock_fast_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
111674 -       U32 const mls = ctx->params.cParams.searchLength;
111675 -       switch (mls) {
111676 -       default: /* includes case 3 */
111677 -       case 4: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 4); return;
111678 -       case 5: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 5); return;
111679 -       case 6: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 6); return;
111680 -       case 7: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 7); return;
111681 -       }
111684 -/*-*************************************
111685 -*  Double Fast
111686 -***************************************/
111687 -static void ZSTD_fillDoubleHashTable(ZSTD_CCtx *cctx, const void *end, const U32 mls)
111689 -       U32 *const hashLarge = cctx->hashTable;
111690 -       U32 const hBitsL = cctx->params.cParams.hashLog;
111691 -       U32 *const hashSmall = cctx->chainTable;
111692 -       U32 const hBitsS = cctx->params.cParams.chainLog;
111693 -       const BYTE *const base = cctx->base;
111694 -       const BYTE *ip = base + cctx->nextToUpdate;
111695 -       const BYTE *const iend = ((const BYTE *)end) - HASH_READ_SIZE;
111696 -       const size_t fastHashFillStep = 3;
111698 -       while (ip <= iend) {
111699 -               hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base);
111700 -               hashLarge[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base);
111701 -               ip += fastHashFillStep;
111702 -       }
111705 -FORCE_INLINE
111706 -void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx *cctx, const void *src, size_t srcSize, const U32 mls)
111708 -       U32 *const hashLong = cctx->hashTable;
111709 -       const U32 hBitsL = cctx->params.cParams.hashLog;
111710 -       U32 *const hashSmall = cctx->chainTable;
111711 -       const U32 hBitsS = cctx->params.cParams.chainLog;
111712 -       seqStore_t *seqStorePtr = &(cctx->seqStore);
111713 -       const BYTE *const base = cctx->base;
111714 -       const BYTE *const istart = (const BYTE *)src;
111715 -       const BYTE *ip = istart;
111716 -       const BYTE *anchor = istart;
111717 -       const U32 lowestIndex = cctx->dictLimit;
111718 -       const BYTE *const lowest = base + lowestIndex;
111719 -       const BYTE *const iend = istart + srcSize;
111720 -       const BYTE *const ilimit = iend - HASH_READ_SIZE;
111721 -       U32 offset_1 = cctx->rep[0], offset_2 = cctx->rep[1];
111722 -       U32 offsetSaved = 0;
111724 -       /* init */
111725 -       ip += (ip == lowest);
111726 -       {
111727 -               U32 const maxRep = (U32)(ip - lowest);
111728 -               if (offset_2 > maxRep)
111729 -                       offsetSaved = offset_2, offset_2 = 0;
111730 -               if (offset_1 > maxRep)
111731 -                       offsetSaved = offset_1, offset_1 = 0;
111732 -       }
111734 -       /* Main Search Loop */
111735 -       while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
111736 -               size_t mLength;
111737 -               size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
111738 -               size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
111739 -               U32 const curr = (U32)(ip - base);
111740 -               U32 const matchIndexL = hashLong[h2];
111741 -               U32 const matchIndexS = hashSmall[h];
111742 -               const BYTE *matchLong = base + matchIndexL;
111743 -               const BYTE *match = base + matchIndexS;
111744 -               hashLong[h2] = hashSmall[h] = curr; /* update hash tables */
111746 -               if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) { /* note : by construction, offset_1 <= curr */
111747 -                       mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4;
111748 -                       ip++;
111749 -                       ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
111750 -               } else {
111751 -                       U32 offset;
111752 -                       if ((matchIndexL > lowestIndex) && (ZSTD_read64(matchLong) == ZSTD_read64(ip))) {
111753 -                               mLength = ZSTD_count(ip + 8, matchLong + 8, iend) + 8;
111754 -                               offset = (U32)(ip - matchLong);
111755 -                               while (((ip > anchor) & (matchLong > lowest)) && (ip[-1] == matchLong[-1])) {
111756 -                                       ip--;
111757 -                                       matchLong--;
111758 -                                       mLength++;
111759 -                               } /* catch up */
111760 -                       } else if ((matchIndexS > lowestIndex) && (ZSTD_read32(match) == ZSTD_read32(ip))) {
111761 -                               size_t const h3 = ZSTD_hashPtr(ip + 1, hBitsL, 8);
111762 -                               U32 const matchIndex3 = hashLong[h3];
111763 -                               const BYTE *match3 = base + matchIndex3;
111764 -                               hashLong[h3] = curr + 1;
111765 -                               if ((matchIndex3 > lowestIndex) && (ZSTD_read64(match3) == ZSTD_read64(ip + 1))) {
111766 -                                       mLength = ZSTD_count(ip + 9, match3 + 8, iend) + 8;
111767 -                                       ip++;
111768 -                                       offset = (U32)(ip - match3);
111769 -                                       while (((ip > anchor) & (match3 > lowest)) && (ip[-1] == match3[-1])) {
111770 -                                               ip--;
111771 -                                               match3--;
111772 -                                               mLength++;
111773 -                                       } /* catch up */
111774 -                               } else {
111775 -                                       mLength = ZSTD_count(ip + 4, match + 4, iend) + 4;
111776 -                                       offset = (U32)(ip - match);
111777 -                                       while (((ip > anchor) & (match > lowest)) && (ip[-1] == match[-1])) {
111778 -                                               ip--;
111779 -                                               match--;
111780 -                                               mLength++;
111781 -                                       } /* catch up */
111782 -                               }
111783 -                       } else {
111784 -                               ip += ((ip - anchor) >> g_searchStrength) + 1;
111785 -                               continue;
111786 -                       }
111788 -                       offset_2 = offset_1;
111789 -                       offset_1 = offset;
111791 -                       ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
111792 -               }
111794 -               /* match found */
111795 -               ip += mLength;
111796 -               anchor = ip;
111798 -               if (ip <= ilimit) {
111799 -                       /* Fill Table */
111800 -                       hashLong[ZSTD_hashPtr(base + curr + 2, hBitsL, 8)] = hashSmall[ZSTD_hashPtr(base + curr + 2, hBitsS, mls)] =
111801 -                           curr + 2; /* here because curr+2 could be > iend-8 */
111802 -                       hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = hashSmall[ZSTD_hashPtr(ip - 2, hBitsS, mls)] = (U32)(ip - 2 - base);
111804 -                       /* check immediate repcode */
111805 -                       while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
111806 -                               /* store sequence */
111807 -                               size_t const rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4;
111808 -                               {
111809 -                                       U32 const tmpOff = offset_2;
111810 -                                       offset_2 = offset_1;
111811 -                                       offset_1 = tmpOff;
111812 -                               } /* swap offset_2 <=> offset_1 */
111813 -                               hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base);
111814 -                               hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base);
111815 -                               ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength - MINMATCH);
111816 -                               ip += rLength;
111817 -                               anchor = ip;
111818 -                               continue; /* faster when present ... (?) */
111819 -                       }
111820 -               }
111821 -       }
111823 -       /* save reps for next block */
111824 -       cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
111825 -       cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
111827 -       /* Last Literals */
111828 -       {
111829 -               size_t const lastLLSize = iend - anchor;
111830 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
111831 -               seqStorePtr->lit += lastLLSize;
111832 -       }
111835 -static void ZSTD_compressBlock_doubleFast(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
111837 -       const U32 mls = ctx->params.cParams.searchLength;
111838 -       switch (mls) {
111839 -       default: /* includes case 3 */
111840 -       case 4: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 4); return;
111841 -       case 5: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 5); return;
111842 -       case 6: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 6); return;
111843 -       case 7: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 7); return;
111844 -       }
111847 -static void ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 mls)
111849 -       U32 *const hashLong = ctx->hashTable;
111850 -       U32 const hBitsL = ctx->params.cParams.hashLog;
111851 -       U32 *const hashSmall = ctx->chainTable;
111852 -       U32 const hBitsS = ctx->params.cParams.chainLog;
111853 -       seqStore_t *seqStorePtr = &(ctx->seqStore);
111854 -       const BYTE *const base = ctx->base;
111855 -       const BYTE *const dictBase = ctx->dictBase;
111856 -       const BYTE *const istart = (const BYTE *)src;
111857 -       const BYTE *ip = istart;
111858 -       const BYTE *anchor = istart;
111859 -       const U32 lowestIndex = ctx->lowLimit;
111860 -       const BYTE *const dictStart = dictBase + lowestIndex;
111861 -       const U32 dictLimit = ctx->dictLimit;
111862 -       const BYTE *const lowPrefixPtr = base + dictLimit;
111863 -       const BYTE *const dictEnd = dictBase + dictLimit;
111864 -       const BYTE *const iend = istart + srcSize;
111865 -       const BYTE *const ilimit = iend - 8;
111866 -       U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
111868 -       /* Search Loop */
111869 -       while (ip < ilimit) { /* < instead of <=, because (ip+1) */
111870 -               const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
111871 -               const U32 matchIndex = hashSmall[hSmall];
111872 -               const BYTE *matchBase = matchIndex < dictLimit ? dictBase : base;
111873 -               const BYTE *match = matchBase + matchIndex;
111875 -               const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
111876 -               const U32 matchLongIndex = hashLong[hLong];
111877 -               const BYTE *matchLongBase = matchLongIndex < dictLimit ? dictBase : base;
111878 -               const BYTE *matchLong = matchLongBase + matchLongIndex;
111880 -               const U32 curr = (U32)(ip - base);
111881 -               const U32 repIndex = curr + 1 - offset_1; /* offset_1 expected <= curr +1 */
111882 -               const BYTE *repBase = repIndex < dictLimit ? dictBase : base;
111883 -               const BYTE *repMatch = repBase + repIndex;
111884 -               size_t mLength;
111885 -               hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */
111887 -               if ((((U32)((dictLimit - 1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) &&
111888 -                   (ZSTD_read32(repMatch) == ZSTD_read32(ip + 1))) {
111889 -                       const BYTE *repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
111890 -                       mLength = ZSTD_count_2segments(ip + 1 + 4, repMatch + 4, iend, repMatchEnd, lowPrefixPtr) + 4;
111891 -                       ip++;
111892 -                       ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
111893 -               } else {
111894 -                       if ((matchLongIndex > lowestIndex) && (ZSTD_read64(matchLong) == ZSTD_read64(ip))) {
111895 -                               const BYTE *matchEnd = matchLongIndex < dictLimit ? dictEnd : iend;
111896 -                               const BYTE *lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr;
111897 -                               U32 offset;
111898 -                               mLength = ZSTD_count_2segments(ip + 8, matchLong + 8, iend, matchEnd, lowPrefixPtr) + 8;
111899 -                               offset = curr - matchLongIndex;
111900 -                               while (((ip > anchor) & (matchLong > lowMatchPtr)) && (ip[-1] == matchLong[-1])) {
111901 -                                       ip--;
111902 -                                       matchLong--;
111903 -                                       mLength++;
111904 -                               } /* catch up */
111905 -                               offset_2 = offset_1;
111906 -                               offset_1 = offset;
111907 -                               ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
111909 -                       } else if ((matchIndex > lowestIndex) && (ZSTD_read32(match) == ZSTD_read32(ip))) {
111910 -                               size_t const h3 = ZSTD_hashPtr(ip + 1, hBitsL, 8);
111911 -                               U32 const matchIndex3 = hashLong[h3];
111912 -                               const BYTE *const match3Base = matchIndex3 < dictLimit ? dictBase : base;
111913 -                               const BYTE *match3 = match3Base + matchIndex3;
111914 -                               U32 offset;
111915 -                               hashLong[h3] = curr + 1;
111916 -                               if ((matchIndex3 > lowestIndex) && (ZSTD_read64(match3) == ZSTD_read64(ip + 1))) {
111917 -                                       const BYTE *matchEnd = matchIndex3 < dictLimit ? dictEnd : iend;
111918 -                                       const BYTE *lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr;
111919 -                                       mLength = ZSTD_count_2segments(ip + 9, match3 + 8, iend, matchEnd, lowPrefixPtr) + 8;
111920 -                                       ip++;
111921 -                                       offset = curr + 1 - matchIndex3;
111922 -                                       while (((ip > anchor) & (match3 > lowMatchPtr)) && (ip[-1] == match3[-1])) {
111923 -                                               ip--;
111924 -                                               match3--;
111925 -                                               mLength++;
111926 -                                       } /* catch up */
111927 -                               } else {
111928 -                                       const BYTE *matchEnd = matchIndex < dictLimit ? dictEnd : iend;
111929 -                                       const BYTE *lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
111930 -                                       mLength = ZSTD_count_2segments(ip + 4, match + 4, iend, matchEnd, lowPrefixPtr) + 4;
111931 -                                       offset = curr - matchIndex;
111932 -                                       while (((ip > anchor) & (match > lowMatchPtr)) && (ip[-1] == match[-1])) {
111933 -                                               ip--;
111934 -                                               match--;
111935 -                                               mLength++;
111936 -                                       } /* catch up */
111937 -                               }
111938 -                               offset_2 = offset_1;
111939 -                               offset_1 = offset;
111940 -                               ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
111942 -                       } else {
111943 -                               ip += ((ip - anchor) >> g_searchStrength) + 1;
111944 -                               continue;
111945 -                       }
111946 -               }
111948 -               /* found a match : store it */
111949 -               ip += mLength;
111950 -               anchor = ip;
111952 -               if (ip <= ilimit) {
111953 -                       /* Fill Table */
111954 -                       hashSmall[ZSTD_hashPtr(base + curr + 2, hBitsS, mls)] = curr + 2;
111955 -                       hashLong[ZSTD_hashPtr(base + curr + 2, hBitsL, 8)] = curr + 2;
111956 -                       hashSmall[ZSTD_hashPtr(ip - 2, hBitsS, mls)] = (U32)(ip - 2 - base);
111957 -                       hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = (U32)(ip - 2 - base);
111958 -                       /* check immediate repcode */
111959 -                       while (ip <= ilimit) {
111960 -                               U32 const curr2 = (U32)(ip - base);
111961 -                               U32 const repIndex2 = curr2 - offset_2;
111962 -                               const BYTE *repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
111963 -                               if ((((U32)((dictLimit - 1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */
111964 -                                   && (ZSTD_read32(repMatch2) == ZSTD_read32(ip))) {
111965 -                                       const BYTE *const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
111966 -                                       size_t const repLength2 =
111967 -                                           ZSTD_count_2segments(ip + EQUAL_READ32, repMatch2 + EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32;
111968 -                                       U32 tmpOffset = offset_2;
111969 -                                       offset_2 = offset_1;
111970 -                                       offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
111971 -                                       ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2 - MINMATCH);
111972 -                                       hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = curr2;
111973 -                                       hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = curr2;
111974 -                                       ip += repLength2;
111975 -                                       anchor = ip;
111976 -                                       continue;
111977 -                               }
111978 -                               break;
111979 -                       }
111980 -               }
111981 -       }
111983 -       /* save reps for next block */
111984 -       ctx->repToConfirm[0] = offset_1;
111985 -       ctx->repToConfirm[1] = offset_2;
111987 -       /* Last Literals */
111988 -       {
111989 -               size_t const lastLLSize = iend - anchor;
111990 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
111991 -               seqStorePtr->lit += lastLLSize;
111992 -       }
111995 -static void ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
111997 -       U32 const mls = ctx->params.cParams.searchLength;
111998 -       switch (mls) {
111999 -       default: /* includes case 3 */
112000 -       case 4: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 4); return;
112001 -       case 5: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 5); return;
112002 -       case 6: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 6); return;
112003 -       case 7: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 7); return;
112004 -       }
112007 -/*-*************************************
112008 -*  Binary Tree search
112009 -***************************************/
112010 -/** ZSTD_insertBt1() : add one or multiple positions to tree.
112011 -*   ip : assumed <= iend-8 .
112012 -*   @return : nb of positions added */
112013 -static U32 ZSTD_insertBt1(ZSTD_CCtx *zc, const BYTE *const ip, const U32 mls, const BYTE *const iend, U32 nbCompares, U32 extDict)
112015 -       U32 *const hashTable = zc->hashTable;
112016 -       U32 const hashLog = zc->params.cParams.hashLog;
112017 -       size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
112018 -       U32 *const bt = zc->chainTable;
112019 -       U32 const btLog = zc->params.cParams.chainLog - 1;
112020 -       U32 const btMask = (1 << btLog) - 1;
112021 -       U32 matchIndex = hashTable[h];
112022 -       size_t commonLengthSmaller = 0, commonLengthLarger = 0;
112023 -       const BYTE *const base = zc->base;
112024 -       const BYTE *const dictBase = zc->dictBase;
112025 -       const U32 dictLimit = zc->dictLimit;
112026 -       const BYTE *const dictEnd = dictBase + dictLimit;
112027 -       const BYTE *const prefixStart = base + dictLimit;
112028 -       const BYTE *match;
112029 -       const U32 curr = (U32)(ip - base);
112030 -       const U32 btLow = btMask >= curr ? 0 : curr - btMask;
112031 -       U32 *smallerPtr = bt + 2 * (curr & btMask);
112032 -       U32 *largerPtr = smallerPtr + 1;
112033 -       U32 dummy32; /* to be nullified at the end */
112034 -       U32 const windowLow = zc->lowLimit;
112035 -       U32 matchEndIdx = curr + 8;
112036 -       size_t bestLength = 8;
112038 -       hashTable[h] = curr; /* Update Hash Table */
112040 -       while (nbCompares-- && (matchIndex > windowLow)) {
112041 -               U32 *const nextPtr = bt + 2 * (matchIndex & btMask);
112042 -               size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
112044 -               if ((!extDict) || (matchIndex + matchLength >= dictLimit)) {
112045 -                       match = base + matchIndex;
112046 -                       if (match[matchLength] == ip[matchLength])
112047 -                               matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iend) + 1;
112048 -               } else {
112049 -                       match = dictBase + matchIndex;
112050 -                       matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart);
112051 -                       if (matchIndex + matchLength >= dictLimit)
112052 -                               match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
112053 -               }
112055 -               if (matchLength > bestLength) {
112056 -                       bestLength = matchLength;
112057 -                       if (matchLength > matchEndIdx - matchIndex)
112058 -                               matchEndIdx = matchIndex + (U32)matchLength;
112059 -               }
112061 -               if (ip + matchLength == iend) /* equal : no way to know if inf or sup */
112062 -                       break;                /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt the tree */
112064 -               if (match[matchLength] < ip[matchLength]) { /* necessarily within correct buffer */
112065 -                       /* match is smaller than curr */
112066 -                       *smallerPtr = matchIndex;         /* update smaller idx */
112067 -                       commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
112068 -                       if (matchIndex <= btLow) {
112069 -                               smallerPtr = &dummy32;
112070 -                               break;
112071 -                       }                         /* beyond tree size, stop the search */
112072 -                       smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */
112073 -                       matchIndex = nextPtr[1];  /* new matchIndex larger than previous (closer to curr) */
112074 -               } else {
112075 -                       /* match is larger than curr */
112076 -                       *largerPtr = matchIndex;
112077 -                       commonLengthLarger = matchLength;
112078 -                       if (matchIndex <= btLow) {
112079 -                               largerPtr = &dummy32;
112080 -                               break;
112081 -                       } /* beyond tree size, stop the search */
112082 -                       largerPtr = nextPtr;
112083 -                       matchIndex = nextPtr[0];
112084 -               }
112085 -       }
112087 -       *smallerPtr = *largerPtr = 0;
112088 -       if (bestLength > 384)
112089 -               return MIN(192, (U32)(bestLength - 384)); /* speed optimization */
112090 -       if (matchEndIdx > curr + 8)
112091 -               return matchEndIdx - curr - 8;
112092 -       return 1;
112095 -static size_t ZSTD_insertBtAndFindBestMatch(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, size_t *offsetPtr, U32 nbCompares, const U32 mls,
112096 -                                           U32 extDict)
112098 -       U32 *const hashTable = zc->hashTable;
112099 -       U32 const hashLog = zc->params.cParams.hashLog;
112100 -       size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
112101 -       U32 *const bt = zc->chainTable;
112102 -       U32 const btLog = zc->params.cParams.chainLog - 1;
112103 -       U32 const btMask = (1 << btLog) - 1;
112104 -       U32 matchIndex = hashTable[h];
112105 -       size_t commonLengthSmaller = 0, commonLengthLarger = 0;
112106 -       const BYTE *const base = zc->base;
112107 -       const BYTE *const dictBase = zc->dictBase;
112108 -       const U32 dictLimit = zc->dictLimit;
112109 -       const BYTE *const dictEnd = dictBase + dictLimit;
112110 -       const BYTE *const prefixStart = base + dictLimit;
112111 -       const U32 curr = (U32)(ip - base);
112112 -       const U32 btLow = btMask >= curr ? 0 : curr - btMask;
112113 -       const U32 windowLow = zc->lowLimit;
112114 -       U32 *smallerPtr = bt + 2 * (curr & btMask);
112115 -       U32 *largerPtr = bt + 2 * (curr & btMask) + 1;
112116 -       U32 matchEndIdx = curr + 8;
112117 -       U32 dummy32; /* to be nullified at the end */
112118 -       size_t bestLength = 0;
112120 -       hashTable[h] = curr; /* Update Hash Table */
112122 -       while (nbCompares-- && (matchIndex > windowLow)) {
112123 -               U32 *const nextPtr = bt + 2 * (matchIndex & btMask);
112124 -               size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
112125 -               const BYTE *match;
112127 -               if ((!extDict) || (matchIndex + matchLength >= dictLimit)) {
112128 -                       match = base + matchIndex;
112129 -                       if (match[matchLength] == ip[matchLength])
112130 -                               matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iend) + 1;
112131 -               } else {
112132 -                       match = dictBase + matchIndex;
112133 -                       matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart);
112134 -                       if (matchIndex + matchLength >= dictLimit)
112135 -                               match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
112136 -               }
112138 -               if (matchLength > bestLength) {
112139 -                       if (matchLength > matchEndIdx - matchIndex)
112140 -                               matchEndIdx = matchIndex + (U32)matchLength;
112141 -                       if ((4 * (int)(matchLength - bestLength)) > (int)(ZSTD_highbit32(curr - matchIndex + 1) - ZSTD_highbit32((U32)offsetPtr[0] + 1)))
112142 -                               bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
112143 -                       if (ip + matchLength == iend) /* equal : no way to know if inf or sup */
112144 -                               break;                /* drop, to guarantee consistency (miss a little bit of compression) */
112145 -               }
112147 -               if (match[matchLength] < ip[matchLength]) {
112148 -                       /* match is smaller than curr */
112149 -                       *smallerPtr = matchIndex;         /* update smaller idx */
112150 -                       commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
112151 -                       if (matchIndex <= btLow) {
112152 -                               smallerPtr = &dummy32;
112153 -                               break;
112154 -                       }                         /* beyond tree size, stop the search */
112155 -                       smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */
112156 -                       matchIndex = nextPtr[1];  /* new matchIndex larger than previous (closer to curr) */
112157 -               } else {
112158 -                       /* match is larger than curr */
112159 -                       *largerPtr = matchIndex;
112160 -                       commonLengthLarger = matchLength;
112161 -                       if (matchIndex <= btLow) {
112162 -                               largerPtr = &dummy32;
112163 -                               break;
112164 -                       } /* beyond tree size, stop the search */
112165 -                       largerPtr = nextPtr;
112166 -                       matchIndex = nextPtr[0];
112167 -               }
112168 -       }
112170 -       *smallerPtr = *largerPtr = 0;
112172 -       zc->nextToUpdate = (matchEndIdx > curr + 8) ? matchEndIdx - 8 : curr + 1;
112173 -       return bestLength;
112176 -static void ZSTD_updateTree(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, const U32 nbCompares, const U32 mls)
112178 -       const BYTE *const base = zc->base;
112179 -       const U32 target = (U32)(ip - base);
112180 -       U32 idx = zc->nextToUpdate;
112182 -       while (idx < target)
112183 -               idx += ZSTD_insertBt1(zc, base + idx, mls, iend, nbCompares, 0);
112186 -/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
112187 -static size_t ZSTD_BtFindBestMatch(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 mls)
112189 -       if (ip < zc->base + zc->nextToUpdate)
112190 -               return 0; /* skipped area */
112191 -       ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
112192 -       return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 0);
112195 -static size_t ZSTD_BtFindBestMatch_selectMLS(ZSTD_CCtx *zc, /* Index table will be updated */
112196 -                                            const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 matchLengthSearch)
112198 -       switch (matchLengthSearch) {
112199 -       default: /* includes case 3 */
112200 -       case 4: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
112201 -       case 5: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
112202 -       case 7:
112203 -       case 6: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
112204 -       }
112207 -static void ZSTD_updateTree_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, const U32 nbCompares, const U32 mls)
112209 -       const BYTE *const base = zc->base;
112210 -       const U32 target = (U32)(ip - base);
112211 -       U32 idx = zc->nextToUpdate;
112213 -       while (idx < target)
112214 -               idx += ZSTD_insertBt1(zc, base + idx, mls, iend, nbCompares, 1);
112217 -/** Tree updater, providing best match */
112218 -static size_t ZSTD_BtFindBestMatch_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
112219 -                                          const U32 mls)
112221 -       if (ip < zc->base + zc->nextToUpdate)
112222 -               return 0; /* skipped area */
112223 -       ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls);
112224 -       return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 1);
112227 -static size_t ZSTD_BtFindBestMatch_selectMLS_extDict(ZSTD_CCtx *zc, /* Index table will be updated */
112228 -                                                    const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
112229 -                                                    const U32 matchLengthSearch)
112231 -       switch (matchLengthSearch) {
112232 -       default: /* includes case 3 */
112233 -       case 4: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
112234 -       case 5: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
112235 -       case 7:
112236 -       case 6: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
112237 -       }
112240 -/* *********************************
112241 -*  Hash Chain
112242 -***********************************/
112243 -#define NEXT_IN_CHAIN(d, mask) chainTable[(d)&mask]
112245 -/* Update chains up to ip (excluded)
112246 -   Assumption : always within prefix (i.e. not within extDict) */
112247 -FORCE_INLINE
112248 -U32 ZSTD_insertAndFindFirstIndex(ZSTD_CCtx *zc, const BYTE *ip, U32 mls)
112250 -       U32 *const hashTable = zc->hashTable;
112251 -       const U32 hashLog = zc->params.cParams.hashLog;
112252 -       U32 *const chainTable = zc->chainTable;
112253 -       const U32 chainMask = (1 << zc->params.cParams.chainLog) - 1;
112254 -       const BYTE *const base = zc->base;
112255 -       const U32 target = (U32)(ip - base);
112256 -       U32 idx = zc->nextToUpdate;
112258 -       while (idx < target) { /* catch up */
112259 -               size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls);
112260 -               NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
112261 -               hashTable[h] = idx;
112262 -               idx++;
112263 -       }
112265 -       zc->nextToUpdate = target;
112266 -       return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
112269 -/* inlining is important to hardwire a hot branch (template emulation) */
112270 -FORCE_INLINE
112271 -size_t ZSTD_HcFindBestMatch_generic(ZSTD_CCtx *zc, /* Index table will be updated */
112272 -                                   const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 mls,
112273 -                                   const U32 extDict)
112275 -       U32 *const chainTable = zc->chainTable;
112276 -       const U32 chainSize = (1 << zc->params.cParams.chainLog);
112277 -       const U32 chainMask = chainSize - 1;
112278 -       const BYTE *const base = zc->base;
112279 -       const BYTE *const dictBase = zc->dictBase;
112280 -       const U32 dictLimit = zc->dictLimit;
112281 -       const BYTE *const prefixStart = base + dictLimit;
112282 -       const BYTE *const dictEnd = dictBase + dictLimit;
112283 -       const U32 lowLimit = zc->lowLimit;
112284 -       const U32 curr = (U32)(ip - base);
112285 -       const U32 minChain = curr > chainSize ? curr - chainSize : 0;
112286 -       int nbAttempts = maxNbAttempts;
112287 -       size_t ml = EQUAL_READ32 - 1;
112289 -       /* HC4 match finder */
112290 -       U32 matchIndex = ZSTD_insertAndFindFirstIndex(zc, ip, mls);
112292 -       for (; (matchIndex > lowLimit) & (nbAttempts > 0); nbAttempts--) {
112293 -               const BYTE *match;
112294 -               size_t currMl = 0;
112295 -               if ((!extDict) || matchIndex >= dictLimit) {
112296 -                       match = base + matchIndex;
112297 -                       if (match[ml] == ip[ml]) /* potentially better */
112298 -                               currMl = ZSTD_count(ip, match, iLimit);
112299 -               } else {
112300 -                       match = dictBase + matchIndex;
112301 -                       if (ZSTD_read32(match) == ZSTD_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
112302 -                               currMl = ZSTD_count_2segments(ip + EQUAL_READ32, match + EQUAL_READ32, iLimit, dictEnd, prefixStart) + EQUAL_READ32;
112303 -               }
112305 -               /* save best solution */
112306 -               if (currMl > ml) {
112307 -                       ml = currMl;
112308 -                       *offsetPtr = curr - matchIndex + ZSTD_REP_MOVE;
112309 -                       if (ip + currMl == iLimit)
112310 -                               break; /* best possible, and avoid read overflow*/
112311 -               }
112313 -               if (matchIndex <= minChain)
112314 -                       break;
112315 -               matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
112316 -       }
112318 -       return ml;
112321 -FORCE_INLINE size_t ZSTD_HcFindBestMatch_selectMLS(ZSTD_CCtx *zc, const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
112322 -                                                  const U32 matchLengthSearch)
112324 -       switch (matchLengthSearch) {
112325 -       default: /* includes case 3 */
112326 -       case 4: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 0);
112327 -       case 5: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 0);
112328 -       case 7:
112329 -       case 6: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 0);
112330 -       }
112333 -FORCE_INLINE size_t ZSTD_HcFindBestMatch_extDict_selectMLS(ZSTD_CCtx *zc, const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
112334 -                                                          const U32 matchLengthSearch)
112336 -       switch (matchLengthSearch) {
112337 -       default: /* includes case 3 */
112338 -       case 4: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 1);
112339 -       case 5: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 1);
112340 -       case 7:
112341 -       case 6: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 1);
112342 -       }
112345 -/* *******************************
112346 -*  Common parser - lazy strategy
112347 -*********************************/
112348 -FORCE_INLINE
112349 -void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 searchMethod, const U32 depth)
112351 -       seqStore_t *seqStorePtr = &(ctx->seqStore);
112352 -       const BYTE *const istart = (const BYTE *)src;
112353 -       const BYTE *ip = istart;
112354 -       const BYTE *anchor = istart;
112355 -       const BYTE *const iend = istart + srcSize;
112356 -       const BYTE *const ilimit = iend - 8;
112357 -       const BYTE *const base = ctx->base + ctx->dictLimit;
112359 -       U32 const maxSearches = 1 << ctx->params.cParams.searchLog;
112360 -       U32 const mls = ctx->params.cParams.searchLength;
112362 -       typedef size_t (*searchMax_f)(ZSTD_CCtx * zc, const BYTE *ip, const BYTE *iLimit, size_t *offsetPtr, U32 maxNbAttempts, U32 matchLengthSearch);
112363 -       searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS;
112364 -       U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1], savedOffset = 0;
112366 -       /* init */
112367 -       ip += (ip == base);
112368 -       ctx->nextToUpdate3 = ctx->nextToUpdate;
112369 -       {
112370 -               U32 const maxRep = (U32)(ip - base);
112371 -               if (offset_2 > maxRep)
112372 -                       savedOffset = offset_2, offset_2 = 0;
112373 -               if (offset_1 > maxRep)
112374 -                       savedOffset = offset_1, offset_1 = 0;
112375 -       }
112377 -       /* Match Loop */
112378 -       while (ip < ilimit) {
112379 -               size_t matchLength = 0;
112380 -               size_t offset = 0;
112381 -               const BYTE *start = ip + 1;
112383 -               /* check repCode */
112384 -               if ((offset_1 > 0) & (ZSTD_read32(ip + 1) == ZSTD_read32(ip + 1 - offset_1))) {
112385 -                       /* repcode : we take it */
112386 -                       matchLength = ZSTD_count(ip + 1 + EQUAL_READ32, ip + 1 + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
112387 -                       if (depth == 0)
112388 -                               goto _storeSequence;
112389 -               }
112391 -               /* first search (depth 0) */
112392 -               {
112393 -                       size_t offsetFound = 99999999;
112394 -                       size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
112395 -                       if (ml2 > matchLength)
112396 -                               matchLength = ml2, start = ip, offset = offsetFound;
112397 -               }
112399 -               if (matchLength < EQUAL_READ32) {
112400 -                       ip += ((ip - anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */
112401 -                       continue;
112402 -               }
112404 -               /* let's try to find a better solution */
112405 -               if (depth >= 1)
112406 -                       while (ip < ilimit) {
112407 -                               ip++;
112408 -                               if ((offset) && ((offset_1 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) {
112409 -                                       size_t const mlRep = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
112410 -                                       int const gain2 = (int)(mlRep * 3);
112411 -                                       int const gain1 = (int)(matchLength * 3 - ZSTD_highbit32((U32)offset + 1) + 1);
112412 -                                       if ((mlRep >= EQUAL_READ32) && (gain2 > gain1))
112413 -                                               matchLength = mlRep, offset = 0, start = ip;
112414 -                               }
112415 -                               {
112416 -                                       size_t offset2 = 99999999;
112417 -                                       size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
112418 -                                       int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
112419 -                                       int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 4);
112420 -                                       if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
112421 -                                               matchLength = ml2, offset = offset2, start = ip;
112422 -                                               continue; /* search a better one */
112423 -                                       }
112424 -                               }
112426 -                               /* let's find an even better one */
112427 -                               if ((depth == 2) && (ip < ilimit)) {
112428 -                                       ip++;
112429 -                                       if ((offset) && ((offset_1 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) {
112430 -                                               size_t const ml2 = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
112431 -                                               int const gain2 = (int)(ml2 * 4);
112432 -                                               int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 1);
112433 -                                               if ((ml2 >= EQUAL_READ32) && (gain2 > gain1))
112434 -                                                       matchLength = ml2, offset = 0, start = ip;
112435 -                                       }
112436 -                                       {
112437 -                                               size_t offset2 = 99999999;
112438 -                                               size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
112439 -                                               int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
112440 -                                               int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 7);
112441 -                                               if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
112442 -                                                       matchLength = ml2, offset = offset2, start = ip;
112443 -                                                       continue;
112444 -                                               }
112445 -                                       }
112446 -                               }
112447 -                               break; /* nothing found : store previous solution */
112448 -                       }
112450 -               /* NOTE:
112451 -                * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior.
112452 -                * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which
112453 -                * overflows the pointer, which is undefined behavior.
112454 -                */
112455 -               /* catch up */
112456 -               if (offset) {
112457 -                       while ((start > anchor) && (start > base + offset - ZSTD_REP_MOVE) &&
112458 -                              (start[-1] == (start-offset+ZSTD_REP_MOVE)[-1])) /* only search for offset within prefix */
112459 -                       {
112460 -                               start--;
112461 -                               matchLength++;
112462 -                       }
112463 -                       offset_2 = offset_1;
112464 -                       offset_1 = (U32)(offset - ZSTD_REP_MOVE);
112465 -               }
112467 -       /* store sequence */
112468 -_storeSequence:
112469 -               {
112470 -                       size_t const litLength = start - anchor;
112471 -                       ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength - MINMATCH);
112472 -                       anchor = ip = start + matchLength;
112473 -               }
112475 -               /* check immediate repcode */
112476 -               while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
112477 -                       /* store sequence */
112478 -                       matchLength = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_2, iend) + EQUAL_READ32;
112479 -                       offset = offset_2;
112480 -                       offset_2 = offset_1;
112481 -                       offset_1 = (U32)offset; /* swap repcodes */
112482 -                       ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength - MINMATCH);
112483 -                       ip += matchLength;
112484 -                       anchor = ip;
112485 -                       continue; /* faster when present ... (?) */
112486 -               }
112487 -       }
112489 -       /* Save reps for next block */
112490 -       ctx->repToConfirm[0] = offset_1 ? offset_1 : savedOffset;
112491 -       ctx->repToConfirm[1] = offset_2 ? offset_2 : savedOffset;
112493 -       /* Last Literals */
112494 -       {
112495 -               size_t const lastLLSize = iend - anchor;
112496 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
112497 -               seqStorePtr->lit += lastLLSize;
112498 -       }
112501 -static void ZSTD_compressBlock_btlazy2(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 1, 2); }
112503 -static void ZSTD_compressBlock_lazy2(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 2); }
112505 -static void ZSTD_compressBlock_lazy(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 1); }
112507 -static void ZSTD_compressBlock_greedy(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 0); }
112509 -FORCE_INLINE
112510 -void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 searchMethod, const U32 depth)
112512 -       seqStore_t *seqStorePtr = &(ctx->seqStore);
112513 -       const BYTE *const istart = (const BYTE *)src;
112514 -       const BYTE *ip = istart;
112515 -       const BYTE *anchor = istart;
112516 -       const BYTE *const iend = istart + srcSize;
112517 -       const BYTE *const ilimit = iend - 8;
112518 -       const BYTE *const base = ctx->base;
112519 -       const U32 dictLimit = ctx->dictLimit;
112520 -       const U32 lowestIndex = ctx->lowLimit;
112521 -       const BYTE *const prefixStart = base + dictLimit;
112522 -       const BYTE *const dictBase = ctx->dictBase;
112523 -       const BYTE *const dictEnd = dictBase + dictLimit;
112524 -       const BYTE *const dictStart = dictBase + ctx->lowLimit;
112526 -       const U32 maxSearches = 1 << ctx->params.cParams.searchLog;
112527 -       const U32 mls = ctx->params.cParams.searchLength;
112529 -       typedef size_t (*searchMax_f)(ZSTD_CCtx * zc, const BYTE *ip, const BYTE *iLimit, size_t *offsetPtr, U32 maxNbAttempts, U32 matchLengthSearch);
112530 -       searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS;
112532 -       U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
112534 -       /* init */
112535 -       ctx->nextToUpdate3 = ctx->nextToUpdate;
112536 -       ip += (ip == prefixStart);
112538 -       /* Match Loop */
112539 -       while (ip < ilimit) {
112540 -               size_t matchLength = 0;
112541 -               size_t offset = 0;
112542 -               const BYTE *start = ip + 1;
112543 -               U32 curr = (U32)(ip - base);
112545 -               /* check repCode */
112546 -               {
112547 -                       const U32 repIndex = (U32)(curr + 1 - offset_1);
112548 -                       const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
112549 -                       const BYTE *const repMatch = repBase + repIndex;
112550 -                       if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
112551 -                               if (ZSTD_read32(ip + 1) == ZSTD_read32(repMatch)) {
112552 -                                       /* repcode detected we should take it */
112553 -                                       const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
112554 -                                       matchLength =
112555 -                                           ZSTD_count_2segments(ip + 1 + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
112556 -                                       if (depth == 0)
112557 -                                               goto _storeSequence;
112558 -                               }
112559 -               }
112561 -               /* first search (depth 0) */
112562 -               {
112563 -                       size_t offsetFound = 99999999;
112564 -                       size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
112565 -                       if (ml2 > matchLength)
112566 -                               matchLength = ml2, start = ip, offset = offsetFound;
112567 -               }
112569 -               if (matchLength < EQUAL_READ32) {
112570 -                       ip += ((ip - anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */
112571 -                       continue;
112572 -               }
112574 -               /* let's try to find a better solution */
112575 -               if (depth >= 1)
112576 -                       while (ip < ilimit) {
112577 -                               ip++;
112578 -                               curr++;
112579 -                               /* check repCode */
112580 -                               if (offset) {
112581 -                                       const U32 repIndex = (U32)(curr - offset_1);
112582 -                                       const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
112583 -                                       const BYTE *const repMatch = repBase + repIndex;
112584 -                                       if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
112585 -                                               if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) {
112586 -                                                       /* repcode detected */
112587 -                                                       const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
112588 -                                                       size_t const repLength =
112589 -                                                           ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) +
112590 -                                                           EQUAL_READ32;
112591 -                                                       int const gain2 = (int)(repLength * 3);
112592 -                                                       int const gain1 = (int)(matchLength * 3 - ZSTD_highbit32((U32)offset + 1) + 1);
112593 -                                                       if ((repLength >= EQUAL_READ32) && (gain2 > gain1))
112594 -                                                               matchLength = repLength, offset = 0, start = ip;
112595 -                                               }
112596 -                               }
112598 -                               /* search match, depth 1 */
112599 -                               {
112600 -                                       size_t offset2 = 99999999;
112601 -                                       size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
112602 -                                       int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
112603 -                                       int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 4);
112604 -                                       if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
112605 -                                               matchLength = ml2, offset = offset2, start = ip;
112606 -                                               continue; /* search a better one */
112607 -                                       }
112608 -                               }
112610 -                               /* let's find an even better one */
112611 -                               if ((depth == 2) && (ip < ilimit)) {
112612 -                                       ip++;
112613 -                                       curr++;
112614 -                                       /* check repCode */
112615 -                                       if (offset) {
112616 -                                               const U32 repIndex = (U32)(curr - offset_1);
112617 -                                               const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
112618 -                                               const BYTE *const repMatch = repBase + repIndex;
112619 -                                               if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
112620 -                                                       if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) {
112621 -                                                               /* repcode detected */
112622 -                                                               const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
112623 -                                                               size_t repLength = ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend,
112624 -                                                                                                       repEnd, prefixStart) +
112625 -                                                                                  EQUAL_READ32;
112626 -                                                               int gain2 = (int)(repLength * 4);
112627 -                                                               int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 1);
112628 -                                                               if ((repLength >= EQUAL_READ32) && (gain2 > gain1))
112629 -                                                                       matchLength = repLength, offset = 0, start = ip;
112630 -                                                       }
112631 -                                       }
112633 -                                       /* search match, depth 2 */
112634 -                                       {
112635 -                                               size_t offset2 = 99999999;
112636 -                                               size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
112637 -                                               int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1)); /* raw approx */
112638 -                                               int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 7);
112639 -                                               if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
112640 -                                                       matchLength = ml2, offset = offset2, start = ip;
112641 -                                                       continue;
112642 -                                               }
112643 -                                       }
112644 -                               }
112645 -                               break; /* nothing found : store previous solution */
112646 -                       }
112648 -               /* catch up */
112649 -               if (offset) {
112650 -                       U32 const matchIndex = (U32)((start - base) - (offset - ZSTD_REP_MOVE));
112651 -                       const BYTE *match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
112652 -                       const BYTE *const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
112653 -                       while ((start > anchor) && (match > mStart) && (start[-1] == match[-1])) {
112654 -                               start--;
112655 -                               match--;
112656 -                               matchLength++;
112657 -                       } /* catch up */
112658 -                       offset_2 = offset_1;
112659 -                       offset_1 = (U32)(offset - ZSTD_REP_MOVE);
112660 -               }
112662 -       /* store sequence */
112663 -       _storeSequence : {
112664 -               size_t const litLength = start - anchor;
112665 -               ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength - MINMATCH);
112666 -               anchor = ip = start + matchLength;
112667 -       }
112669 -               /* check immediate repcode */
112670 -               while (ip <= ilimit) {
112671 -                       const U32 repIndex = (U32)((ip - base) - offset_2);
112672 -                       const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
112673 -                       const BYTE *const repMatch = repBase + repIndex;
112674 -                       if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
112675 -                               if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) {
112676 -                                       /* repcode detected we should take it */
112677 -                                       const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
112678 -                                       matchLength =
112679 -                                           ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
112680 -                                       offset = offset_2;
112681 -                                       offset_2 = offset_1;
112682 -                                       offset_1 = (U32)offset; /* swap offset history */
112683 -                                       ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength - MINMATCH);
112684 -                                       ip += matchLength;
112685 -                                       anchor = ip;
112686 -                                       continue; /* faster when present ... (?) */
112687 -                               }
112688 -                       break;
112689 -               }
112690 -       }
112692 -       /* Save reps for next block */
112693 -       ctx->repToConfirm[0] = offset_1;
112694 -       ctx->repToConfirm[1] = offset_2;
112696 -       /* Last Literals */
112697 -       {
112698 -               size_t const lastLLSize = iend - anchor;
112699 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
112700 -               seqStorePtr->lit += lastLLSize;
112701 -       }
112704 -void ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 0); }
112706 -static void ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
112708 -       ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 1);
112711 -static void ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
112713 -       ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 2);
112716 -static void ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
112718 -       ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 1, 2);
112721 -/* The optimal parser */
112722 -#include "zstd_opt.h"
112724 -static void ZSTD_compressBlock_btopt(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
112726 -#ifdef ZSTD_OPT_H_91842398743
112727 -       ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 0);
112728 -#else
112729 -       (void)ctx;
112730 -       (void)src;
112731 -       (void)srcSize;
112732 -       return;
112733 -#endif
112736 -static void ZSTD_compressBlock_btopt2(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
112738 -#ifdef ZSTD_OPT_H_91842398743
112739 -       ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 1);
112740 -#else
112741 -       (void)ctx;
112742 -       (void)src;
112743 -       (void)srcSize;
112744 -       return;
112745 -#endif
112748 -static void ZSTD_compressBlock_btopt_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
112750 -#ifdef ZSTD_OPT_H_91842398743
112751 -       ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 0);
112752 -#else
112753 -       (void)ctx;
112754 -       (void)src;
112755 -       (void)srcSize;
112756 -       return;
112757 -#endif
112760 -static void ZSTD_compressBlock_btopt2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
112762 -#ifdef ZSTD_OPT_H_91842398743
112763 -       ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 1);
112764 -#else
112765 -       (void)ctx;
112766 -       (void)src;
112767 -       (void)srcSize;
112768 -       return;
112769 -#endif
112772 -typedef void (*ZSTD_blockCompressor)(ZSTD_CCtx *ctx, const void *src, size_t srcSize);
112774 -static ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict)
112776 -       static const ZSTD_blockCompressor blockCompressor[2][8] = {
112777 -           {ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy, ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2,
112778 -            ZSTD_compressBlock_btlazy2, ZSTD_compressBlock_btopt, ZSTD_compressBlock_btopt2},
112779 -           {ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict, ZSTD_compressBlock_lazy_extDict,
112780 -            ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict, ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btopt2_extDict}};
112782 -       return blockCompressor[extDict][(U32)strat];
112785 -static size_t ZSTD_compressBlock_internal(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
112787 -       ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->params.cParams.strategy, zc->lowLimit < zc->dictLimit);
112788 -       const BYTE *const base = zc->base;
112789 -       const BYTE *const istart = (const BYTE *)src;
112790 -       const U32 curr = (U32)(istart - base);
112791 -       if (srcSize < MIN_CBLOCK_SIZE + ZSTD_blockHeaderSize + 1)
112792 -               return 0; /* don't even attempt compression below a certain srcSize */
112793 -       ZSTD_resetSeqStore(&(zc->seqStore));
112794 -       if (curr > zc->nextToUpdate + 384)
112795 -               zc->nextToUpdate = curr - MIN(192, (U32)(curr - zc->nextToUpdate - 384)); /* update tree not updated after finding very long rep matches */
112796 -       blockCompressor(zc, src, srcSize);
112797 -       return ZSTD_compressSequences(zc, dst, dstCapacity, srcSize);
112800 -/*! ZSTD_compress_generic() :
112801 -*   Compress a chunk of data into one or multiple blocks.
112802 -*   All blocks will be terminated, all input will be consumed.
112803 -*   Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
112804 -*   Frame is supposed already started (header already produced)
112805 -*   @return : compressed size, or an error code
112807 -static size_t ZSTD_compress_generic(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 lastFrameChunk)
112809 -       size_t blockSize = cctx->blockSize;
112810 -       size_t remaining = srcSize;
112811 -       const BYTE *ip = (const BYTE *)src;
112812 -       BYTE *const ostart = (BYTE *)dst;
112813 -       BYTE *op = ostart;
112814 -       U32 const maxDist = 1 << cctx->params.cParams.windowLog;
112816 -       if (cctx->params.fParams.checksumFlag && srcSize)
112817 -               xxh64_update(&cctx->xxhState, src, srcSize);
112819 -       while (remaining) {
112820 -               U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
112821 -               size_t cSize;
112823 -               if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE)
112824 -                       return ERROR(dstSize_tooSmall); /* not enough space to store compressed block */
112825 -               if (remaining < blockSize)
112826 -                       blockSize = remaining;
112828 -               /* preemptive overflow correction */
112829 -               if (cctx->lowLimit > (3U << 29)) {
112830 -                       U32 const cycleMask = (1 << ZSTD_cycleLog(cctx->params.cParams.hashLog, cctx->params.cParams.strategy)) - 1;
112831 -                       U32 const curr = (U32)(ip - cctx->base);
112832 -                       U32 const newCurr = (curr & cycleMask) + (1 << cctx->params.cParams.windowLog);
112833 -                       U32 const correction = curr - newCurr;
112834 -                       ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_64 <= 30);
112835 -                       ZSTD_reduceIndex(cctx, correction);
112836 -                       cctx->base += correction;
112837 -                       cctx->dictBase += correction;
112838 -                       cctx->lowLimit -= correction;
112839 -                       cctx->dictLimit -= correction;
112840 -                       if (cctx->nextToUpdate < correction)
112841 -                               cctx->nextToUpdate = 0;
112842 -                       else
112843 -                               cctx->nextToUpdate -= correction;
112844 -               }
112846 -               if ((U32)(ip + blockSize - cctx->base) > cctx->loadedDictEnd + maxDist) {
112847 -                       /* enforce maxDist */
112848 -                       U32 const newLowLimit = (U32)(ip + blockSize - cctx->base) - maxDist;
112849 -                       if (cctx->lowLimit < newLowLimit)
112850 -                               cctx->lowLimit = newLowLimit;
112851 -                       if (cctx->dictLimit < cctx->lowLimit)
112852 -                               cctx->dictLimit = cctx->lowLimit;
112853 -               }
112855 -               cSize = ZSTD_compressBlock_internal(cctx, op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize, ip, blockSize);
112856 -               if (ZSTD_isError(cSize))
112857 -                       return cSize;
112859 -               if (cSize == 0) { /* block is not compressible */
112860 -                       U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw) << 1) + (U32)(blockSize << 3);
112861 -                       if (blockSize + ZSTD_blockHeaderSize > dstCapacity)
112862 -                               return ERROR(dstSize_tooSmall);
112863 -                       ZSTD_writeLE32(op, cBlockHeader24); /* no pb, 4th byte will be overwritten */
112864 -                       memcpy(op + ZSTD_blockHeaderSize, ip, blockSize);
112865 -                       cSize = ZSTD_blockHeaderSize + blockSize;
112866 -               } else {
112867 -                       U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed) << 1) + (U32)(cSize << 3);
112868 -                       ZSTD_writeLE24(op, cBlockHeader24);
112869 -                       cSize += ZSTD_blockHeaderSize;
112870 -               }
112872 -               remaining -= blockSize;
112873 -               dstCapacity -= cSize;
112874 -               ip += blockSize;
112875 -               op += cSize;
112876 -       }
112878 -       if (lastFrameChunk && (op > ostart))
112879 -               cctx->stage = ZSTDcs_ending;
112880 -       return op - ostart;
112883 -static size_t ZSTD_writeFrameHeader(void *dst, size_t dstCapacity, ZSTD_parameters params, U64 pledgedSrcSize, U32 dictID)
112885 -       BYTE *const op = (BYTE *)dst;
112886 -       U32 const dictIDSizeCode = (dictID > 0) + (dictID >= 256) + (dictID >= 65536); /* 0-3 */
112887 -       U32 const checksumFlag = params.fParams.checksumFlag > 0;
112888 -       U32 const windowSize = 1U << params.cParams.windowLog;
112889 -       U32 const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
112890 -       BYTE const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
112891 -       U32 const fcsCode =
112892 -           params.fParams.contentSizeFlag ? (pledgedSrcSize >= 256) + (pledgedSrcSize >= 65536 + 256) + (pledgedSrcSize >= 0xFFFFFFFFU) : 0; /* 0-3 */
112893 -       BYTE const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag << 2) + (singleSegment << 5) + (fcsCode << 6));
112894 -       size_t pos;
112896 -       if (dstCapacity < ZSTD_frameHeaderSize_max)
112897 -               return ERROR(dstSize_tooSmall);
112899 -       ZSTD_writeLE32(dst, ZSTD_MAGICNUMBER);
112900 -       op[4] = frameHeaderDecriptionByte;
112901 -       pos = 5;
112902 -       if (!singleSegment)
112903 -               op[pos++] = windowLogByte;
112904 -       switch (dictIDSizeCode) {
112905 -       default: /* impossible */
112906 -       case 0: break;
112907 -       case 1:
112908 -               op[pos] = (BYTE)(dictID);
112909 -               pos++;
112910 -               break;
112911 -       case 2:
112912 -               ZSTD_writeLE16(op + pos, (U16)dictID);
112913 -               pos += 2;
112914 -               break;
112915 -       case 3:
112916 -               ZSTD_writeLE32(op + pos, dictID);
112917 -               pos += 4;
112918 -               break;
112919 -       }
112920 -       switch (fcsCode) {
112921 -       default: /* impossible */
112922 -       case 0:
112923 -               if (singleSegment)
112924 -                       op[pos++] = (BYTE)(pledgedSrcSize);
112925 -               break;
112926 -       case 1:
112927 -               ZSTD_writeLE16(op + pos, (U16)(pledgedSrcSize - 256));
112928 -               pos += 2;
112929 -               break;
112930 -       case 2:
112931 -               ZSTD_writeLE32(op + pos, (U32)(pledgedSrcSize));
112932 -               pos += 4;
112933 -               break;
112934 -       case 3:
112935 -               ZSTD_writeLE64(op + pos, (U64)(pledgedSrcSize));
112936 -               pos += 8;
112937 -               break;
112938 -       }
112939 -       return pos;
112942 -static size_t ZSTD_compressContinue_internal(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 frame, U32 lastFrameChunk)
112944 -       const BYTE *const ip = (const BYTE *)src;
112945 -       size_t fhSize = 0;
112947 -       if (cctx->stage == ZSTDcs_created)
112948 -               return ERROR(stage_wrong); /* missing init (ZSTD_compressBegin) */
112950 -       if (frame && (cctx->stage == ZSTDcs_init)) {
112951 -               fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, cctx->frameContentSize, cctx->dictID);
112952 -               if (ZSTD_isError(fhSize))
112953 -                       return fhSize;
112954 -               dstCapacity -= fhSize;
112955 -               dst = (char *)dst + fhSize;
112956 -               cctx->stage = ZSTDcs_ongoing;
112957 -       }
112959 -       /* Check if blocks follow each other */
112960 -       if (src != cctx->nextSrc) {
112961 -               /* not contiguous */
112962 -               ptrdiff_t const delta = cctx->nextSrc - ip;
112963 -               cctx->lowLimit = cctx->dictLimit;
112964 -               cctx->dictLimit = (U32)(cctx->nextSrc - cctx->base);
112965 -               cctx->dictBase = cctx->base;
112966 -               cctx->base -= delta;
112967 -               cctx->nextToUpdate = cctx->dictLimit;
112968 -               if (cctx->dictLimit - cctx->lowLimit < HASH_READ_SIZE)
112969 -                       cctx->lowLimit = cctx->dictLimit; /* too small extDict */
112970 -       }
112972 -       /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
112973 -       if ((ip + srcSize > cctx->dictBase + cctx->lowLimit) & (ip < cctx->dictBase + cctx->dictLimit)) {
112974 -               ptrdiff_t const highInputIdx = (ip + srcSize) - cctx->dictBase;
112975 -               U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)cctx->dictLimit) ? cctx->dictLimit : (U32)highInputIdx;
112976 -               cctx->lowLimit = lowLimitMax;
112977 -       }
112979 -       cctx->nextSrc = ip + srcSize;
112981 -       if (srcSize) {
112982 -               size_t const cSize = frame ? ZSTD_compress_generic(cctx, dst, dstCapacity, src, srcSize, lastFrameChunk)
112983 -                                          : ZSTD_compressBlock_internal(cctx, dst, dstCapacity, src, srcSize);
112984 -               if (ZSTD_isError(cSize))
112985 -                       return cSize;
112986 -               return cSize + fhSize;
112987 -       } else
112988 -               return fhSize;
112991 -size_t ZSTD_compressContinue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
112993 -       return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 0);
112996 -size_t ZSTD_getBlockSizeMax(ZSTD_CCtx *cctx) { return MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, 1 << cctx->params.cParams.windowLog); }
112998 -size_t ZSTD_compressBlock(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
113000 -       size_t const blockSizeMax = ZSTD_getBlockSizeMax(cctx);
113001 -       if (srcSize > blockSizeMax)
113002 -               return ERROR(srcSize_wrong);
113003 -       return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0, 0);
113006 -/*! ZSTD_loadDictionaryContent() :
113007 - *  @return : 0, or an error code
113008 - */
113009 -static size_t ZSTD_loadDictionaryContent(ZSTD_CCtx *zc, const void *src, size_t srcSize)
113011 -       const BYTE *const ip = (const BYTE *)src;
113012 -       const BYTE *const iend = ip + srcSize;
113014 -       /* input becomes curr prefix */
113015 -       zc->lowLimit = zc->dictLimit;
113016 -       zc->dictLimit = (U32)(zc->nextSrc - zc->base);
113017 -       zc->dictBase = zc->base;
113018 -       zc->base += ip - zc->nextSrc;
113019 -       zc->nextToUpdate = zc->dictLimit;
113020 -       zc->loadedDictEnd = zc->forceWindow ? 0 : (U32)(iend - zc->base);
113022 -       zc->nextSrc = iend;
113023 -       if (srcSize <= HASH_READ_SIZE)
113024 -               return 0;
113026 -       switch (zc->params.cParams.strategy) {
113027 -       case ZSTD_fast: ZSTD_fillHashTable(zc, iend, zc->params.cParams.searchLength); break;
113029 -       case ZSTD_dfast: ZSTD_fillDoubleHashTable(zc, iend, zc->params.cParams.searchLength); break;
113031 -       case ZSTD_greedy:
113032 -       case ZSTD_lazy:
113033 -       case ZSTD_lazy2:
113034 -               if (srcSize >= HASH_READ_SIZE)
113035 -                       ZSTD_insertAndFindFirstIndex(zc, iend - HASH_READ_SIZE, zc->params.cParams.searchLength);
113036 -               break;
113038 -       case ZSTD_btlazy2:
113039 -       case ZSTD_btopt:
113040 -       case ZSTD_btopt2:
113041 -               if (srcSize >= HASH_READ_SIZE)
113042 -                       ZSTD_updateTree(zc, iend - HASH_READ_SIZE, iend, 1 << zc->params.cParams.searchLog, zc->params.cParams.searchLength);
113043 -               break;
113045 -       default:
113046 -               return ERROR(GENERIC); /* strategy doesn't exist; impossible */
113047 -       }
113049 -       zc->nextToUpdate = (U32)(iend - zc->base);
113050 -       return 0;
113053 -/* Dictionaries that assign zero probability to symbols that show up causes problems
113054 -   when FSE encoding.  Refuse dictionaries that assign zero probability to symbols
113055 -   that we may encounter during compression.
113056 -   NOTE: This behavior is not standard and could be improved in the future. */
113057 -static size_t ZSTD_checkDictNCount(short *normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue)
113059 -       U32 s;
113060 -       if (dictMaxSymbolValue < maxSymbolValue)
113061 -               return ERROR(dictionary_corrupted);
113062 -       for (s = 0; s <= maxSymbolValue; ++s) {
113063 -               if (normalizedCounter[s] == 0)
113064 -                       return ERROR(dictionary_corrupted);
113065 -       }
113066 -       return 0;
113069 -/* Dictionary format :
113070 - * See :
113071 - * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
113072 - */
113073 -/*! ZSTD_loadZstdDictionary() :
113074 - * @return : 0, or an error code
113075 - *  assumptions : magic number supposed already checked
113076 - *                dictSize supposed > 8
113077 - */
113078 -static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx *cctx, const void *dict, size_t dictSize)
113080 -       const BYTE *dictPtr = (const BYTE *)dict;
113081 -       const BYTE *const dictEnd = dictPtr + dictSize;
113082 -       short offcodeNCount[MaxOff + 1];
113083 -       unsigned offcodeMaxValue = MaxOff;
113085 -       dictPtr += 4; /* skip magic number */
113086 -       cctx->dictID = cctx->params.fParams.noDictIDFlag ? 0 : ZSTD_readLE32(dictPtr);
113087 -       dictPtr += 4;
113089 -       {
113090 -               size_t const hufHeaderSize = HUF_readCTable_wksp(cctx->hufTable, 255, dictPtr, dictEnd - dictPtr, cctx->tmpCounters, sizeof(cctx->tmpCounters));
113091 -               if (HUF_isError(hufHeaderSize))
113092 -                       return ERROR(dictionary_corrupted);
113093 -               dictPtr += hufHeaderSize;
113094 -       }
113096 -       {
113097 -               unsigned offcodeLog;
113098 -               size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd - dictPtr);
113099 -               if (FSE_isError(offcodeHeaderSize))
113100 -                       return ERROR(dictionary_corrupted);
113101 -               if (offcodeLog > OffFSELog)
113102 -                       return ERROR(dictionary_corrupted);
113103 -               /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
113104 -               CHECK_E(FSE_buildCTable_wksp(cctx->offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)),
113105 -                       dictionary_corrupted);
113106 -               dictPtr += offcodeHeaderSize;
113107 -       }
113109 -       {
113110 -               short matchlengthNCount[MaxML + 1];
113111 -               unsigned matchlengthMaxValue = MaxML, matchlengthLog;
113112 -               size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd - dictPtr);
113113 -               if (FSE_isError(matchlengthHeaderSize))
113114 -                       return ERROR(dictionary_corrupted);
113115 -               if (matchlengthLog > MLFSELog)
113116 -                       return ERROR(dictionary_corrupted);
113117 -               /* Every match length code must have non-zero probability */
113118 -               CHECK_F(ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
113119 -               CHECK_E(
113120 -                   FSE_buildCTable_wksp(cctx->matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)),
113121 -                   dictionary_corrupted);
113122 -               dictPtr += matchlengthHeaderSize;
113123 -       }
113125 -       {
113126 -               short litlengthNCount[MaxLL + 1];
113127 -               unsigned litlengthMaxValue = MaxLL, litlengthLog;
113128 -               size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd - dictPtr);
113129 -               if (FSE_isError(litlengthHeaderSize))
113130 -                       return ERROR(dictionary_corrupted);
113131 -               if (litlengthLog > LLFSELog)
113132 -                       return ERROR(dictionary_corrupted);
113133 -               /* Every literal length code must have non-zero probability */
113134 -               CHECK_F(ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
113135 -               CHECK_E(FSE_buildCTable_wksp(cctx->litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)),
113136 -                       dictionary_corrupted);
113137 -               dictPtr += litlengthHeaderSize;
113138 -       }
113140 -       if (dictPtr + 12 > dictEnd)
113141 -               return ERROR(dictionary_corrupted);
113142 -       cctx->rep[0] = ZSTD_readLE32(dictPtr + 0);
113143 -       cctx->rep[1] = ZSTD_readLE32(dictPtr + 4);
113144 -       cctx->rep[2] = ZSTD_readLE32(dictPtr + 8);
113145 -       dictPtr += 12;
113147 -       {
113148 -               size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
113149 -               U32 offcodeMax = MaxOff;
113150 -               if (dictContentSize <= ((U32)-1) - 128 KB) {
113151 -                       U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
113152 -                       offcodeMax = ZSTD_highbit32(maxOffset);              /* Calculate minimum offset code required to represent maxOffset */
113153 -               }
113154 -               /* All offset values <= dictContentSize + 128 KB must be representable */
113155 -               CHECK_F(ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
113156 -               /* All repCodes must be <= dictContentSize and != 0*/
113157 -               {
113158 -                       U32 u;
113159 -                       for (u = 0; u < 3; u++) {
113160 -                               if (cctx->rep[u] == 0)
113161 -                                       return ERROR(dictionary_corrupted);
113162 -                               if (cctx->rep[u] > dictContentSize)
113163 -                                       return ERROR(dictionary_corrupted);
113164 -                       }
113165 -               }
113167 -               cctx->flagStaticTables = 1;
113168 -               cctx->flagStaticHufTable = HUF_repeat_valid;
113169 -               return ZSTD_loadDictionaryContent(cctx, dictPtr, dictContentSize);
113170 -       }
113173 -/** ZSTD_compress_insertDictionary() :
113174 -*   @return : 0, or an error code */
113175 -static size_t ZSTD_compress_insertDictionary(ZSTD_CCtx *cctx, const void *dict, size_t dictSize)
113177 -       if ((dict == NULL) || (dictSize <= 8))
113178 -               return 0;
113180 -       /* dict as pure content */
113181 -       if ((ZSTD_readLE32(dict) != ZSTD_DICT_MAGIC) || (cctx->forceRawDict))
113182 -               return ZSTD_loadDictionaryContent(cctx, dict, dictSize);
113184 -       /* dict as zstd dictionary */
113185 -       return ZSTD_loadZstdDictionary(cctx, dict, dictSize);
113188 -/*! ZSTD_compressBegin_internal() :
113189 -*   @return : 0, or an error code */
113190 -static size_t ZSTD_compressBegin_internal(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_parameters params, U64 pledgedSrcSize)
113192 -       ZSTD_compResetPolicy_e const crp = dictSize ? ZSTDcrp_fullReset : ZSTDcrp_continue;
113193 -       CHECK_F(ZSTD_resetCCtx_advanced(cctx, params, pledgedSrcSize, crp));
113194 -       return ZSTD_compress_insertDictionary(cctx, dict, dictSize);
113197 -/*! ZSTD_compressBegin_advanced() :
113198 -*   @return : 0, or an error code */
113199 -size_t ZSTD_compressBegin_advanced(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize)
113201 -       /* compression parameters verification and optimization */
113202 -       CHECK_F(ZSTD_checkCParams(params.cParams));
113203 -       return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, pledgedSrcSize);
113206 -size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, int compressionLevel)
113208 -       ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize);
113209 -       return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, 0);
113212 -size_t ZSTD_compressBegin(ZSTD_CCtx *cctx, int compressionLevel) { return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel); }
113214 -/*! ZSTD_writeEpilogue() :
113215 -*   Ends a frame.
113216 -*   @return : nb of bytes written into dst (or an error code) */
113217 -static size_t ZSTD_writeEpilogue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity)
113219 -       BYTE *const ostart = (BYTE *)dst;
113220 -       BYTE *op = ostart;
113221 -       size_t fhSize = 0;
113223 -       if (cctx->stage == ZSTDcs_created)
113224 -               return ERROR(stage_wrong); /* init missing */
113226 -       /* special case : empty frame */
113227 -       if (cctx->stage == ZSTDcs_init) {
113228 -               fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, 0, 0);
113229 -               if (ZSTD_isError(fhSize))
113230 -                       return fhSize;
113231 -               dstCapacity -= fhSize;
113232 -               op += fhSize;
113233 -               cctx->stage = ZSTDcs_ongoing;
113234 -       }
113236 -       if (cctx->stage != ZSTDcs_ending) {
113237 -               /* write one last empty block, make it the "last" block */
113238 -               U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw) << 1) + 0;
113239 -               if (dstCapacity < 4)
113240 -                       return ERROR(dstSize_tooSmall);
113241 -               ZSTD_writeLE32(op, cBlockHeader24);
113242 -               op += ZSTD_blockHeaderSize;
113243 -               dstCapacity -= ZSTD_blockHeaderSize;
113244 -       }
113246 -       if (cctx->params.fParams.checksumFlag) {
113247 -               U32 const checksum = (U32)xxh64_digest(&cctx->xxhState);
113248 -               if (dstCapacity < 4)
113249 -                       return ERROR(dstSize_tooSmall);
113250 -               ZSTD_writeLE32(op, checksum);
113251 -               op += 4;
113252 -       }
113254 -       cctx->stage = ZSTDcs_created; /* return to "created but no init" status */
113255 -       return op - ostart;
113258 -size_t ZSTD_compressEnd(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
113260 -       size_t endResult;
113261 -       size_t const cSize = ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 1);
113262 -       if (ZSTD_isError(cSize))
113263 -               return cSize;
113264 -       endResult = ZSTD_writeEpilogue(cctx, (char *)dst + cSize, dstCapacity - cSize);
113265 -       if (ZSTD_isError(endResult))
113266 -               return endResult;
113267 -       return cSize + endResult;
113270 -static size_t ZSTD_compress_internal(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize,
113271 -                                    ZSTD_parameters params)
113273 -       CHECK_F(ZSTD_compressBegin_internal(cctx, dict, dictSize, params, srcSize));
113274 -       return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
113277 -size_t ZSTD_compress_usingDict(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize,
113278 -                              ZSTD_parameters params)
113280 -       return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params);
113283 -size_t ZSTD_compressCCtx(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, ZSTD_parameters params)
113285 -       return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, NULL, 0, params);
113288 -/* =====  Dictionary API  ===== */
113290 -struct ZSTD_CDict_s {
113291 -       void *dictBuffer;
113292 -       const void *dictContent;
113293 -       size_t dictContentSize;
113294 -       ZSTD_CCtx *refContext;
113295 -}; /* typedef'd tp ZSTD_CDict within "zstd.h" */
113297 -size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams) { return ZSTD_CCtxWorkspaceBound(cParams) + ZSTD_ALIGN(sizeof(ZSTD_CDict)); }
113299 -static ZSTD_CDict *ZSTD_createCDict_advanced(const void *dictBuffer, size_t dictSize, unsigned byReference, ZSTD_parameters params, ZSTD_customMem customMem)
113301 -       if (!customMem.customAlloc || !customMem.customFree)
113302 -               return NULL;
113304 -       {
113305 -               ZSTD_CDict *const cdict = (ZSTD_CDict *)ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
113306 -               ZSTD_CCtx *const cctx = ZSTD_createCCtx_advanced(customMem);
113308 -               if (!cdict || !cctx) {
113309 -                       ZSTD_free(cdict, customMem);
113310 -                       ZSTD_freeCCtx(cctx);
113311 -                       return NULL;
113312 -               }
113314 -               if ((byReference) || (!dictBuffer) || (!dictSize)) {
113315 -                       cdict->dictBuffer = NULL;
113316 -                       cdict->dictContent = dictBuffer;
113317 -               } else {
113318 -                       void *const internalBuffer = ZSTD_malloc(dictSize, customMem);
113319 -                       if (!internalBuffer) {
113320 -                               ZSTD_free(cctx, customMem);
113321 -                               ZSTD_free(cdict, customMem);
113322 -                               return NULL;
113323 -                       }
113324 -                       memcpy(internalBuffer, dictBuffer, dictSize);
113325 -                       cdict->dictBuffer = internalBuffer;
113326 -                       cdict->dictContent = internalBuffer;
113327 -               }
113329 -               {
113330 -                       size_t const errorCode = ZSTD_compressBegin_advanced(cctx, cdict->dictContent, dictSize, params, 0);
113331 -                       if (ZSTD_isError(errorCode)) {
113332 -                               ZSTD_free(cdict->dictBuffer, customMem);
113333 -                               ZSTD_free(cdict, customMem);
113334 -                               ZSTD_freeCCtx(cctx);
113335 -                               return NULL;
113336 -                       }
113337 -               }
113339 -               cdict->refContext = cctx;
113340 -               cdict->dictContentSize = dictSize;
113341 -               return cdict;
113342 -       }
113345 -ZSTD_CDict *ZSTD_initCDict(const void *dict, size_t dictSize, ZSTD_parameters params, void *workspace, size_t workspaceSize)
113347 -       ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
113348 -       return ZSTD_createCDict_advanced(dict, dictSize, 1, params, stackMem);
113351 -size_t ZSTD_freeCDict(ZSTD_CDict *cdict)
113353 -       if (cdict == NULL)
113354 -               return 0; /* support free on NULL */
113355 -       {
113356 -               ZSTD_customMem const cMem = cdict->refContext->customMem;
113357 -               ZSTD_freeCCtx(cdict->refContext);
113358 -               ZSTD_free(cdict->dictBuffer, cMem);
113359 -               ZSTD_free(cdict, cMem);
113360 -               return 0;
113361 -       }
113364 -static ZSTD_parameters ZSTD_getParamsFromCDict(const ZSTD_CDict *cdict) { return ZSTD_getParamsFromCCtx(cdict->refContext); }
113366 -size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx *cctx, const ZSTD_CDict *cdict, unsigned long long pledgedSrcSize)
113368 -       if (cdict->dictContentSize)
113369 -               CHECK_F(ZSTD_copyCCtx(cctx, cdict->refContext, pledgedSrcSize))
113370 -       else {
113371 -               ZSTD_parameters params = cdict->refContext->params;
113372 -               params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
113373 -               CHECK_F(ZSTD_compressBegin_advanced(cctx, NULL, 0, params, pledgedSrcSize));
113374 -       }
113375 -       return 0;
113378 -/*! ZSTD_compress_usingCDict() :
113379 -*   Compression using a digested Dictionary.
113380 -*   Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
113381 -*   Note that compression level is decided during dictionary creation */
113382 -size_t ZSTD_compress_usingCDict(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const ZSTD_CDict *cdict)
113384 -       CHECK_F(ZSTD_compressBegin_usingCDict(cctx, cdict, srcSize));
113386 -       if (cdict->refContext->params.fParams.contentSizeFlag == 1) {
113387 -               cctx->params.fParams.contentSizeFlag = 1;
113388 -               cctx->frameContentSize = srcSize;
113389 -       } else {
113390 -               cctx->params.fParams.contentSizeFlag = 0;
113391 -       }
113393 -       return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
113396 -/* ******************************************************************
113397 -*  Streaming
113398 -********************************************************************/
113400 -typedef enum { zcss_init, zcss_load, zcss_flush, zcss_final } ZSTD_cStreamStage;
113402 -struct ZSTD_CStream_s {
113403 -       ZSTD_CCtx *cctx;
113404 -       ZSTD_CDict *cdictLocal;
113405 -       const ZSTD_CDict *cdict;
113406 -       char *inBuff;
113407 -       size_t inBuffSize;
113408 -       size_t inToCompress;
113409 -       size_t inBuffPos;
113410 -       size_t inBuffTarget;
113411 -       size_t blockSize;
113412 -       char *outBuff;
113413 -       size_t outBuffSize;
113414 -       size_t outBuffContentSize;
113415 -       size_t outBuffFlushedSize;
113416 -       ZSTD_cStreamStage stage;
113417 -       U32 checksum;
113418 -       U32 frameEnded;
113419 -       U64 pledgedSrcSize;
113420 -       U64 inputProcessed;
113421 -       ZSTD_parameters params;
113422 -       ZSTD_customMem customMem;
113423 -}; /* typedef'd to ZSTD_CStream within "zstd.h" */
113425 -size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams)
113427 -       size_t const inBuffSize = (size_t)1 << cParams.windowLog;
113428 -       size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, inBuffSize);
113429 -       size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1;
113431 -       return ZSTD_CCtxWorkspaceBound(cParams) + ZSTD_ALIGN(sizeof(ZSTD_CStream)) + ZSTD_ALIGN(inBuffSize) + ZSTD_ALIGN(outBuffSize);
113434 -ZSTD_CStream *ZSTD_createCStream_advanced(ZSTD_customMem customMem)
113436 -       ZSTD_CStream *zcs;
113438 -       if (!customMem.customAlloc || !customMem.customFree)
113439 -               return NULL;
113441 -       zcs = (ZSTD_CStream *)ZSTD_malloc(sizeof(ZSTD_CStream), customMem);
113442 -       if (zcs == NULL)
113443 -               return NULL;
113444 -       memset(zcs, 0, sizeof(ZSTD_CStream));
113445 -       memcpy(&zcs->customMem, &customMem, sizeof(ZSTD_customMem));
113446 -       zcs->cctx = ZSTD_createCCtx_advanced(customMem);
113447 -       if (zcs->cctx == NULL) {
113448 -               ZSTD_freeCStream(zcs);
113449 -               return NULL;
113450 -       }
113451 -       return zcs;
113454 -size_t ZSTD_freeCStream(ZSTD_CStream *zcs)
113456 -       if (zcs == NULL)
113457 -               return 0; /* support free on NULL */
113458 -       {
113459 -               ZSTD_customMem const cMem = zcs->customMem;
113460 -               ZSTD_freeCCtx(zcs->cctx);
113461 -               zcs->cctx = NULL;
113462 -               ZSTD_freeCDict(zcs->cdictLocal);
113463 -               zcs->cdictLocal = NULL;
113464 -               ZSTD_free(zcs->inBuff, cMem);
113465 -               zcs->inBuff = NULL;
113466 -               ZSTD_free(zcs->outBuff, cMem);
113467 -               zcs->outBuff = NULL;
113468 -               ZSTD_free(zcs, cMem);
113469 -               return 0;
113470 -       }
113473 -/*======   Initialization   ======*/
113475 -size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; }
113476 -size_t ZSTD_CStreamOutSize(void) { return ZSTD_compressBound(ZSTD_BLOCKSIZE_ABSOLUTEMAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */; }
113478 -static size_t ZSTD_resetCStream_internal(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize)
113480 -       if (zcs->inBuffSize == 0)
113481 -               return ERROR(stage_wrong); /* zcs has not been init at least once => can't reset */
113483 -       if (zcs->cdict)
113484 -               CHECK_F(ZSTD_compressBegin_usingCDict(zcs->cctx, zcs->cdict, pledgedSrcSize))
113485 -       else
113486 -               CHECK_F(ZSTD_compressBegin_advanced(zcs->cctx, NULL, 0, zcs->params, pledgedSrcSize));
113488 -       zcs->inToCompress = 0;
113489 -       zcs->inBuffPos = 0;
113490 -       zcs->inBuffTarget = zcs->blockSize;
113491 -       zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
113492 -       zcs->stage = zcss_load;
113493 -       zcs->frameEnded = 0;
113494 -       zcs->pledgedSrcSize = pledgedSrcSize;
113495 -       zcs->inputProcessed = 0;
113496 -       return 0; /* ready to go */
113499 -size_t ZSTD_resetCStream(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize)
113502 -       zcs->params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
113504 -       return ZSTD_resetCStream_internal(zcs, pledgedSrcSize);
113507 -static size_t ZSTD_initCStream_advanced(ZSTD_CStream *zcs, const void *dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize)
113509 -       /* allocate buffers */
113510 -       {
113511 -               size_t const neededInBuffSize = (size_t)1 << params.cParams.windowLog;
113512 -               if (zcs->inBuffSize < neededInBuffSize) {
113513 -                       zcs->inBuffSize = neededInBuffSize;
113514 -                       ZSTD_free(zcs->inBuff, zcs->customMem);
113515 -                       zcs->inBuff = (char *)ZSTD_malloc(neededInBuffSize, zcs->customMem);
113516 -                       if (zcs->inBuff == NULL)
113517 -                               return ERROR(memory_allocation);
113518 -               }
113519 -               zcs->blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, neededInBuffSize);
113520 -       }
113521 -       if (zcs->outBuffSize < ZSTD_compressBound(zcs->blockSize) + 1) {
113522 -               zcs->outBuffSize = ZSTD_compressBound(zcs->blockSize) + 1;
113523 -               ZSTD_free(zcs->outBuff, zcs->customMem);
113524 -               zcs->outBuff = (char *)ZSTD_malloc(zcs->outBuffSize, zcs->customMem);
113525 -               if (zcs->outBuff == NULL)
113526 -                       return ERROR(memory_allocation);
113527 -       }
113529 -       if (dict && dictSize >= 8) {
113530 -               ZSTD_freeCDict(zcs->cdictLocal);
113531 -               zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, 0, params, zcs->customMem);
113532 -               if (zcs->cdictLocal == NULL)
113533 -                       return ERROR(memory_allocation);
113534 -               zcs->cdict = zcs->cdictLocal;
113535 -       } else
113536 -               zcs->cdict = NULL;
113538 -       zcs->checksum = params.fParams.checksumFlag > 0;
113539 -       zcs->params = params;
113541 -       return ZSTD_resetCStream_internal(zcs, pledgedSrcSize);
113544 -ZSTD_CStream *ZSTD_initCStream(ZSTD_parameters params, unsigned long long pledgedSrcSize, void *workspace, size_t workspaceSize)
113546 -       ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
113547 -       ZSTD_CStream *const zcs = ZSTD_createCStream_advanced(stackMem);
113548 -       if (zcs) {
113549 -               size_t const code = ZSTD_initCStream_advanced(zcs, NULL, 0, params, pledgedSrcSize);
113550 -               if (ZSTD_isError(code)) {
113551 -                       return NULL;
113552 -               }
113553 -       }
113554 -       return zcs;
113557 -ZSTD_CStream *ZSTD_initCStream_usingCDict(const ZSTD_CDict *cdict, unsigned long long pledgedSrcSize, void *workspace, size_t workspaceSize)
113559 -       ZSTD_parameters const params = ZSTD_getParamsFromCDict(cdict);
113560 -       ZSTD_CStream *const zcs = ZSTD_initCStream(params, pledgedSrcSize, workspace, workspaceSize);
113561 -       if (zcs) {
113562 -               zcs->cdict = cdict;
113563 -               if (ZSTD_isError(ZSTD_resetCStream_internal(zcs, pledgedSrcSize))) {
113564 -                       return NULL;
113565 -               }
113566 -       }
113567 -       return zcs;
113570 -/*======   Compression   ======*/
113572 -typedef enum { zsf_gather, zsf_flush, zsf_end } ZSTD_flush_e;
113574 -ZSTD_STATIC size_t ZSTD_limitCopy(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
113576 -       size_t const length = MIN(dstCapacity, srcSize);
113577 -       memcpy(dst, src, length);
113578 -       return length;
113581 -static size_t ZSTD_compressStream_generic(ZSTD_CStream *zcs, void *dst, size_t *dstCapacityPtr, const void *src, size_t *srcSizePtr, ZSTD_flush_e const flush)
113583 -       U32 someMoreWork = 1;
113584 -       const char *const istart = (const char *)src;
113585 -       const char *const iend = istart + *srcSizePtr;
113586 -       const char *ip = istart;
113587 -       char *const ostart = (char *)dst;
113588 -       char *const oend = ostart + *dstCapacityPtr;
113589 -       char *op = ostart;
113591 -       while (someMoreWork) {
113592 -               switch (zcs->stage) {
113593 -               case zcss_init:
113594 -                       return ERROR(init_missing); /* call ZBUFF_compressInit() first ! */
113596 -               case zcss_load:
113597 -                       /* complete inBuffer */
113598 -                       {
113599 -                               size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
113600 -                               size_t const loaded = ZSTD_limitCopy(zcs->inBuff + zcs->inBuffPos, toLoad, ip, iend - ip);
113601 -                               zcs->inBuffPos += loaded;
113602 -                               ip += loaded;
113603 -                               if ((zcs->inBuffPos == zcs->inToCompress) || (!flush && (toLoad != loaded))) {
113604 -                                       someMoreWork = 0;
113605 -                                       break; /* not enough input to get a full block : stop there, wait for more */
113606 -                               }
113607 -                       }
113608 -                       /* compress curr block (note : this stage cannot be stopped in the middle) */
113609 -                       {
113610 -                               void *cDst;
113611 -                               size_t cSize;
113612 -                               size_t const iSize = zcs->inBuffPos - zcs->inToCompress;
113613 -                               size_t oSize = oend - op;
113614 -                               if (oSize >= ZSTD_compressBound(iSize))
113615 -                                       cDst = op; /* compress directly into output buffer (avoid flush stage) */
113616 -                               else
113617 -                                       cDst = zcs->outBuff, oSize = zcs->outBuffSize;
113618 -                               cSize = (flush == zsf_end) ? ZSTD_compressEnd(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize)
113619 -                                                          : ZSTD_compressContinue(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize);
113620 -                               if (ZSTD_isError(cSize))
113621 -                                       return cSize;
113622 -                               if (flush == zsf_end)
113623 -                                       zcs->frameEnded = 1;
113624 -                               /* prepare next block */
113625 -                               zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
113626 -                               if (zcs->inBuffTarget > zcs->inBuffSize)
113627 -                                       zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize; /* note : inBuffSize >= blockSize */
113628 -                               zcs->inToCompress = zcs->inBuffPos;
113629 -                               if (cDst == op) {
113630 -                                       op += cSize;
113631 -                                       break;
113632 -                               } /* no need to flush */
113633 -                               zcs->outBuffContentSize = cSize;
113634 -                               zcs->outBuffFlushedSize = 0;
113635 -                               zcs->stage = zcss_flush; /* pass-through to flush stage */
113636 -                       }
113637 -                       fallthrough;
113639 -               case zcss_flush: {
113640 -                       size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
113641 -                       size_t const flushed = ZSTD_limitCopy(op, oend - op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
113642 -                       op += flushed;
113643 -                       zcs->outBuffFlushedSize += flushed;
113644 -                       if (toFlush != flushed) {
113645 -                               someMoreWork = 0;
113646 -                               break;
113647 -                       } /* dst too small to store flushed data : stop there */
113648 -                       zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
113649 -                       zcs->stage = zcss_load;
113650 -                       break;
113651 -               }
113653 -               case zcss_final:
113654 -                       someMoreWork = 0; /* do nothing */
113655 -                       break;
113657 -               default:
113658 -                       return ERROR(GENERIC); /* impossible */
113659 -               }
113660 -       }
113662 -       *srcSizePtr = ip - istart;
113663 -       *dstCapacityPtr = op - ostart;
113664 -       zcs->inputProcessed += *srcSizePtr;
113665 -       if (zcs->frameEnded)
113666 -               return 0;
113667 -       {
113668 -               size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos;
113669 -               if (hintInSize == 0)
113670 -                       hintInSize = zcs->blockSize;
113671 -               return hintInSize;
113672 -       }
113675 -size_t ZSTD_compressStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output, ZSTD_inBuffer *input)
113677 -       size_t sizeRead = input->size - input->pos;
113678 -       size_t sizeWritten = output->size - output->pos;
113679 -       size_t const result =
113680 -           ZSTD_compressStream_generic(zcs, (char *)(output->dst) + output->pos, &sizeWritten, (const char *)(input->src) + input->pos, &sizeRead, zsf_gather);
113681 -       input->pos += sizeRead;
113682 -       output->pos += sizeWritten;
113683 -       return result;
113686 -/*======   Finalize   ======*/
113688 -/*! ZSTD_flushStream() :
113689 -*   @return : amount of data remaining to flush */
113690 -size_t ZSTD_flushStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output)
113692 -       size_t srcSize = 0;
113693 -       size_t sizeWritten = output->size - output->pos;
113694 -       size_t const result = ZSTD_compressStream_generic(zcs, (char *)(output->dst) + output->pos, &sizeWritten, &srcSize,
113695 -                                                         &srcSize, /* use a valid src address instead of NULL */
113696 -                                                         zsf_flush);
113697 -       output->pos += sizeWritten;
113698 -       if (ZSTD_isError(result))
113699 -               return result;
113700 -       return zcs->outBuffContentSize - zcs->outBuffFlushedSize; /* remaining to flush */
113703 -size_t ZSTD_endStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output)
113705 -       BYTE *const ostart = (BYTE *)(output->dst) + output->pos;
113706 -       BYTE *const oend = (BYTE *)(output->dst) + output->size;
113707 -       BYTE *op = ostart;
113709 -       if ((zcs->pledgedSrcSize) && (zcs->inputProcessed != zcs->pledgedSrcSize))
113710 -               return ERROR(srcSize_wrong); /* pledgedSrcSize not respected */
113712 -       if (zcs->stage != zcss_final) {
113713 -               /* flush whatever remains */
113714 -               size_t srcSize = 0;
113715 -               size_t sizeWritten = output->size - output->pos;
113716 -               size_t const notEnded =
113717 -                   ZSTD_compressStream_generic(zcs, ostart, &sizeWritten, &srcSize, &srcSize, zsf_end); /* use a valid src address instead of NULL */
113718 -               size_t const remainingToFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
113719 -               op += sizeWritten;
113720 -               if (remainingToFlush) {
113721 -                       output->pos += sizeWritten;
113722 -                       return remainingToFlush + ZSTD_BLOCKHEADERSIZE /* final empty block */ + (zcs->checksum * 4);
113723 -               }
113724 -               /* create epilogue */
113725 -               zcs->stage = zcss_final;
113726 -               zcs->outBuffContentSize = !notEnded ? 0 : ZSTD_compressEnd(zcs->cctx, zcs->outBuff, zcs->outBuffSize, NULL,
113727 -                                                                          0); /* write epilogue, including final empty block, into outBuff */
113728 -       }
113730 -       /* flush epilogue */
113731 -       {
113732 -               size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
113733 -               size_t const flushed = ZSTD_limitCopy(op, oend - op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
113734 -               op += flushed;
113735 -               zcs->outBuffFlushedSize += flushed;
113736 -               output->pos += op - ostart;
113737 -               if (toFlush == flushed)
113738 -                       zcs->stage = zcss_init; /* end reached */
113739 -               return toFlush - flushed;
113740 -       }
113743 -/*-=====  Pre-defined compression levels  =====-*/
113745 -#define ZSTD_DEFAULT_CLEVEL 1
113746 -#define ZSTD_MAX_CLEVEL 22
113747 -int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
113749 -static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL + 1] = {
113750 -    {
113751 -       /* "default" */
113752 -       /* W,  C,  H,  S,  L, TL, strat */
113753 -       {18, 12, 12, 1, 7, 16, ZSTD_fast},    /* level  0 - never used */
113754 -       {19, 13, 14, 1, 7, 16, ZSTD_fast},    /* level  1 */
113755 -       {19, 15, 16, 1, 6, 16, ZSTD_fast},    /* level  2 */
113756 -       {20, 16, 17, 1, 5, 16, ZSTD_dfast},   /* level  3.*/
113757 -       {20, 18, 18, 1, 5, 16, ZSTD_dfast},   /* level  4.*/
113758 -       {20, 15, 18, 3, 5, 16, ZSTD_greedy},  /* level  5 */
113759 -       {21, 16, 19, 2, 5, 16, ZSTD_lazy},    /* level  6 */
113760 -       {21, 17, 20, 3, 5, 16, ZSTD_lazy},    /* level  7 */
113761 -       {21, 18, 20, 3, 5, 16, ZSTD_lazy2},   /* level  8 */
113762 -       {21, 20, 20, 3, 5, 16, ZSTD_lazy2},   /* level  9 */
113763 -       {21, 19, 21, 4, 5, 16, ZSTD_lazy2},   /* level 10 */
113764 -       {22, 20, 22, 4, 5, 16, ZSTD_lazy2},   /* level 11 */
113765 -       {22, 20, 22, 5, 5, 16, ZSTD_lazy2},   /* level 12 */
113766 -       {22, 21, 22, 5, 5, 16, ZSTD_lazy2},   /* level 13 */
113767 -       {22, 21, 22, 6, 5, 16, ZSTD_lazy2},   /* level 14 */
113768 -       {22, 21, 21, 5, 5, 16, ZSTD_btlazy2}, /* level 15 */
113769 -       {23, 22, 22, 5, 5, 16, ZSTD_btlazy2}, /* level 16 */
113770 -       {23, 21, 22, 4, 5, 24, ZSTD_btopt},   /* level 17 */
113771 -       {23, 23, 22, 6, 5, 32, ZSTD_btopt},   /* level 18 */
113772 -       {23, 23, 22, 6, 3, 48, ZSTD_btopt},   /* level 19 */
113773 -       {25, 25, 23, 7, 3, 64, ZSTD_btopt2},  /* level 20 */
113774 -       {26, 26, 23, 7, 3, 256, ZSTD_btopt2}, /* level 21 */
113775 -       {27, 27, 25, 9, 3, 512, ZSTD_btopt2}, /* level 22 */
113776 -    },
113777 -    {
113778 -       /* for srcSize <= 256 KB */
113779 -       /* W,  C,  H,  S,  L,  T, strat */
113780 -       {0, 0, 0, 0, 0, 0, ZSTD_fast},   /* level  0 - not used */
113781 -       {18, 13, 14, 1, 6, 8, ZSTD_fast},      /* level  1 */
113782 -       {18, 14, 13, 1, 5, 8, ZSTD_dfast},     /* level  2 */
113783 -       {18, 16, 15, 1, 5, 8, ZSTD_dfast},     /* level  3 */
113784 -       {18, 15, 17, 1, 5, 8, ZSTD_greedy},    /* level  4.*/
113785 -       {18, 16, 17, 4, 5, 8, ZSTD_greedy},    /* level  5.*/
113786 -       {18, 16, 17, 3, 5, 8, ZSTD_lazy},      /* level  6.*/
113787 -       {18, 17, 17, 4, 4, 8, ZSTD_lazy},      /* level  7 */
113788 -       {18, 17, 17, 4, 4, 8, ZSTD_lazy2},     /* level  8 */
113789 -       {18, 17, 17, 5, 4, 8, ZSTD_lazy2},     /* level  9 */
113790 -       {18, 17, 17, 6, 4, 8, ZSTD_lazy2},     /* level 10 */
113791 -       {18, 18, 17, 6, 4, 8, ZSTD_lazy2},     /* level 11.*/
113792 -       {18, 18, 17, 7, 4, 8, ZSTD_lazy2},     /* level 12.*/
113793 -       {18, 19, 17, 6, 4, 8, ZSTD_btlazy2},   /* level 13 */
113794 -       {18, 18, 18, 4, 4, 16, ZSTD_btopt},    /* level 14.*/
113795 -       {18, 18, 18, 4, 3, 16, ZSTD_btopt},    /* level 15.*/
113796 -       {18, 19, 18, 6, 3, 32, ZSTD_btopt},    /* level 16.*/
113797 -       {18, 19, 18, 8, 3, 64, ZSTD_btopt},    /* level 17.*/
113798 -       {18, 19, 18, 9, 3, 128, ZSTD_btopt},   /* level 18.*/
113799 -       {18, 19, 18, 10, 3, 256, ZSTD_btopt},  /* level 19.*/
113800 -       {18, 19, 18, 11, 3, 512, ZSTD_btopt2}, /* level 20.*/
113801 -       {18, 19, 18, 12, 3, 512, ZSTD_btopt2}, /* level 21.*/
113802 -       {18, 19, 18, 13, 3, 512, ZSTD_btopt2}, /* level 22.*/
113803 -    },
113804 -    {
113805 -       /* for srcSize <= 128 KB */
113806 -       /* W,  C,  H,  S,  L,  T, strat */
113807 -       {17, 12, 12, 1, 7, 8, ZSTD_fast},      /* level  0 - not used */
113808 -       {17, 12, 13, 1, 6, 8, ZSTD_fast},      /* level  1 */
113809 -       {17, 13, 16, 1, 5, 8, ZSTD_fast},      /* level  2 */
113810 -       {17, 16, 16, 2, 5, 8, ZSTD_dfast},     /* level  3 */
113811 -       {17, 13, 15, 3, 4, 8, ZSTD_greedy},    /* level  4 */
113812 -       {17, 15, 17, 4, 4, 8, ZSTD_greedy},    /* level  5 */
113813 -       {17, 16, 17, 3, 4, 8, ZSTD_lazy},      /* level  6 */
113814 -       {17, 15, 17, 4, 4, 8, ZSTD_lazy2},     /* level  7 */
113815 -       {17, 17, 17, 4, 4, 8, ZSTD_lazy2},     /* level  8 */
113816 -       {17, 17, 17, 5, 4, 8, ZSTD_lazy2},     /* level  9 */
113817 -       {17, 17, 17, 6, 4, 8, ZSTD_lazy2},     /* level 10 */
113818 -       {17, 17, 17, 7, 4, 8, ZSTD_lazy2},     /* level 11 */
113819 -       {17, 17, 17, 8, 4, 8, ZSTD_lazy2},     /* level 12 */
113820 -       {17, 18, 17, 6, 4, 8, ZSTD_btlazy2},   /* level 13.*/
113821 -       {17, 17, 17, 7, 3, 8, ZSTD_btopt},     /* level 14.*/
113822 -       {17, 17, 17, 7, 3, 16, ZSTD_btopt},    /* level 15.*/
113823 -       {17, 18, 17, 7, 3, 32, ZSTD_btopt},    /* level 16.*/
113824 -       {17, 18, 17, 7, 3, 64, ZSTD_btopt},    /* level 17.*/
113825 -       {17, 18, 17, 7, 3, 256, ZSTD_btopt},   /* level 18.*/
113826 -       {17, 18, 17, 8, 3, 256, ZSTD_btopt},   /* level 19.*/
113827 -       {17, 18, 17, 9, 3, 256, ZSTD_btopt2},  /* level 20.*/
113828 -       {17, 18, 17, 10, 3, 256, ZSTD_btopt2}, /* level 21.*/
113829 -       {17, 18, 17, 11, 3, 512, ZSTD_btopt2}, /* level 22.*/
113830 -    },
113831 -    {
113832 -       /* for srcSize <= 16 KB */
113833 -       /* W,  C,  H,  S,  L,  T, strat */
113834 -       {14, 12, 12, 1, 7, 6, ZSTD_fast},      /* level  0 - not used */
113835 -       {14, 14, 14, 1, 6, 6, ZSTD_fast},      /* level  1 */
113836 -       {14, 14, 14, 1, 4, 6, ZSTD_fast},      /* level  2 */
113837 -       {14, 14, 14, 1, 4, 6, ZSTD_dfast},     /* level  3.*/
113838 -       {14, 14, 14, 4, 4, 6, ZSTD_greedy},    /* level  4.*/
113839 -       {14, 14, 14, 3, 4, 6, ZSTD_lazy},      /* level  5.*/
113840 -       {14, 14, 14, 4, 4, 6, ZSTD_lazy2},     /* level  6 */
113841 -       {14, 14, 14, 5, 4, 6, ZSTD_lazy2},     /* level  7 */
113842 -       {14, 14, 14, 6, 4, 6, ZSTD_lazy2},     /* level  8.*/
113843 -       {14, 15, 14, 6, 4, 6, ZSTD_btlazy2},   /* level  9.*/
113844 -       {14, 15, 14, 3, 3, 6, ZSTD_btopt},     /* level 10.*/
113845 -       {14, 15, 14, 6, 3, 8, ZSTD_btopt},     /* level 11.*/
113846 -       {14, 15, 14, 6, 3, 16, ZSTD_btopt},    /* level 12.*/
113847 -       {14, 15, 14, 6, 3, 24, ZSTD_btopt},    /* level 13.*/
113848 -       {14, 15, 15, 6, 3, 48, ZSTD_btopt},    /* level 14.*/
113849 -       {14, 15, 15, 6, 3, 64, ZSTD_btopt},    /* level 15.*/
113850 -       {14, 15, 15, 6, 3, 96, ZSTD_btopt},    /* level 16.*/
113851 -       {14, 15, 15, 6, 3, 128, ZSTD_btopt},   /* level 17.*/
113852 -       {14, 15, 15, 6, 3, 256, ZSTD_btopt},   /* level 18.*/
113853 -       {14, 15, 15, 7, 3, 256, ZSTD_btopt},   /* level 19.*/
113854 -       {14, 15, 15, 8, 3, 256, ZSTD_btopt2},  /* level 20.*/
113855 -       {14, 15, 15, 9, 3, 256, ZSTD_btopt2},  /* level 21.*/
113856 -       {14, 15, 15, 10, 3, 256, ZSTD_btopt2}, /* level 22.*/
113857 -    },
113860 -/*! ZSTD_getCParams() :
113861 -*   @return ZSTD_compressionParameters structure for a selected compression level, `srcSize` and `dictSize`.
113862 -*   Size values are optional, provide 0 if not known or unused */
113863 -ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSize, size_t dictSize)
113865 -       ZSTD_compressionParameters cp;
113866 -       size_t const addedSize = srcSize ? 0 : 500;
113867 -       U64 const rSize = srcSize + dictSize ? srcSize + dictSize + addedSize : (U64)-1;
113868 -       U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB); /* intentional underflow for srcSizeHint == 0 */
113869 -       if (compressionLevel <= 0)
113870 -               compressionLevel = ZSTD_DEFAULT_CLEVEL; /* 0 == default; no negative compressionLevel yet */
113871 -       if (compressionLevel > ZSTD_MAX_CLEVEL)
113872 -               compressionLevel = ZSTD_MAX_CLEVEL;
113873 -       cp = ZSTD_defaultCParameters[tableID][compressionLevel];
113874 -       if (ZSTD_32bits()) { /* auto-correction, for 32-bits mode */
113875 -               if (cp.windowLog > ZSTD_WINDOWLOG_MAX)
113876 -                       cp.windowLog = ZSTD_WINDOWLOG_MAX;
113877 -               if (cp.chainLog > ZSTD_CHAINLOG_MAX)
113878 -                       cp.chainLog = ZSTD_CHAINLOG_MAX;
113879 -               if (cp.hashLog > ZSTD_HASHLOG_MAX)
113880 -                       cp.hashLog = ZSTD_HASHLOG_MAX;
113881 -       }
113882 -       cp = ZSTD_adjustCParams(cp, srcSize, dictSize);
113883 -       return cp;
113886 -/*! ZSTD_getParams() :
113887 -*   same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`).
113888 -*   All fields of `ZSTD_frameParameters` are set to default (0) */
113889 -ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSize, size_t dictSize)
113891 -       ZSTD_parameters params;
113892 -       ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSize, dictSize);
113893 -       memset(&params, 0, sizeof(params));
113894 -       params.cParams = cParams;
113895 -       return params;
113898 -EXPORT_SYMBOL(ZSTD_maxCLevel);
113899 -EXPORT_SYMBOL(ZSTD_compressBound);
113901 -EXPORT_SYMBOL(ZSTD_CCtxWorkspaceBound);
113902 -EXPORT_SYMBOL(ZSTD_initCCtx);
113903 -EXPORT_SYMBOL(ZSTD_compressCCtx);
113904 -EXPORT_SYMBOL(ZSTD_compress_usingDict);
113906 -EXPORT_SYMBOL(ZSTD_CDictWorkspaceBound);
113907 -EXPORT_SYMBOL(ZSTD_initCDict);
113908 -EXPORT_SYMBOL(ZSTD_compress_usingCDict);
113910 -EXPORT_SYMBOL(ZSTD_CStreamWorkspaceBound);
113911 -EXPORT_SYMBOL(ZSTD_initCStream);
113912 -EXPORT_SYMBOL(ZSTD_initCStream_usingCDict);
113913 -EXPORT_SYMBOL(ZSTD_resetCStream);
113914 -EXPORT_SYMBOL(ZSTD_compressStream);
113915 -EXPORT_SYMBOL(ZSTD_flushStream);
113916 -EXPORT_SYMBOL(ZSTD_endStream);
113917 -EXPORT_SYMBOL(ZSTD_CStreamInSize);
113918 -EXPORT_SYMBOL(ZSTD_CStreamOutSize);
113920 -EXPORT_SYMBOL(ZSTD_getCParams);
113921 -EXPORT_SYMBOL(ZSTD_getParams);
113922 -EXPORT_SYMBOL(ZSTD_checkCParams);
113923 -EXPORT_SYMBOL(ZSTD_adjustCParams);
113925 -EXPORT_SYMBOL(ZSTD_compressBegin);
113926 -EXPORT_SYMBOL(ZSTD_compressBegin_usingDict);
113927 -EXPORT_SYMBOL(ZSTD_compressBegin_advanced);
113928 -EXPORT_SYMBOL(ZSTD_copyCCtx);
113929 -EXPORT_SYMBOL(ZSTD_compressBegin_usingCDict);
113930 -EXPORT_SYMBOL(ZSTD_compressContinue);
113931 -EXPORT_SYMBOL(ZSTD_compressEnd);
113933 -EXPORT_SYMBOL(ZSTD_getBlockSizeMax);
113934 -EXPORT_SYMBOL(ZSTD_compressBlock);
113936 -MODULE_LICENSE("Dual BSD/GPL");
113937 -MODULE_DESCRIPTION("Zstd Compressor");
113938 diff --git a/lib/zstd/compress/fse_compress.c b/lib/zstd/compress/fse_compress.c
113939 new file mode 100644
113940 index 000000000000..436985b620e5
113941 --- /dev/null
113942 +++ b/lib/zstd/compress/fse_compress.c
113943 @@ -0,0 +1,625 @@
113944 +/* ******************************************************************
113945 + * FSE : Finite State Entropy encoder
113946 + * Copyright (c) Yann Collet, Facebook, Inc.
113948 + *  You can contact the author at :
113949 + *  - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
113950 + *  - Public forum : https://groups.google.com/forum/#!forum/lz4c
113952 + * This source code is licensed under both the BSD-style license (found in the
113953 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
113954 + * in the COPYING file in the root directory of this source tree).
113955 + * You may select, at your option, one of the above-listed licenses.
113956 +****************************************************************** */
113958 +/* **************************************************************
113959 +*  Includes
113960 +****************************************************************/
113961 +#include "../common/compiler.h"
113962 +#include "../common/mem.h"        /* U32, U16, etc. */
113963 +#include "../common/debug.h"      /* assert, DEBUGLOG */
113964 +#include "hist.h"       /* HIST_count_wksp */
113965 +#include "../common/bitstream.h"
113966 +#define FSE_STATIC_LINKING_ONLY
113967 +#include "../common/fse.h"
113968 +#include "../common/error_private.h"
113969 +#define ZSTD_DEPS_NEED_MALLOC
113970 +#define ZSTD_DEPS_NEED_MATH64
113971 +#include "../common/zstd_deps.h"  /* ZSTD_malloc, ZSTD_free, ZSTD_memcpy, ZSTD_memset */
113974 +/* **************************************************************
113975 +*  Error Management
113976 +****************************************************************/
113977 +#define FSE_isError ERR_isError
113980 +/* **************************************************************
113981 +*  Templates
113982 +****************************************************************/
113984 +  designed to be included
113985 +  for type-specific functions (template emulation in C)
113986 +  Objective is to write these functions only once, for improved maintenance
113989 +/* safety checks */
113990 +#ifndef FSE_FUNCTION_EXTENSION
113991 +#  error "FSE_FUNCTION_EXTENSION must be defined"
113992 +#endif
113993 +#ifndef FSE_FUNCTION_TYPE
113994 +#  error "FSE_FUNCTION_TYPE must be defined"
113995 +#endif
113997 +/* Function names */
113998 +#define FSE_CAT(X,Y) X##Y
113999 +#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
114000 +#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
114003 +/* Function templates */
114005 +/* FSE_buildCTable_wksp() :
114006 + * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
114007 + * wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
114008 + * workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
114009 + */
114010 +size_t FSE_buildCTable_wksp(FSE_CTable* ct,
114011 +                      const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
114012 +                            void* workSpace, size_t wkspSize)
114014 +    U32 const tableSize = 1 << tableLog;
114015 +    U32 const tableMask = tableSize - 1;
114016 +    void* const ptr = ct;
114017 +    U16* const tableU16 = ( (U16*) ptr) + 2;
114018 +    void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableLog ? tableSize>>1 : 1) ;
114019 +    FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
114020 +    U32 const step = FSE_TABLESTEP(tableSize);
114022 +    U32* cumul = (U32*)workSpace;
114023 +    FSE_FUNCTION_TYPE* tableSymbol = (FSE_FUNCTION_TYPE*)(cumul + (maxSymbolValue + 2));
114025 +    U32 highThreshold = tableSize-1;
114027 +    if ((size_t)workSpace & 3) return ERROR(GENERIC); /* Must be 4 byte aligned */
114028 +    if (FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) > wkspSize) return ERROR(tableLog_tooLarge);
114029 +    /* CTable header */
114030 +    tableU16[-2] = (U16) tableLog;
114031 +    tableU16[-1] = (U16) maxSymbolValue;
114032 +    assert(tableLog < 16);   /* required for threshold strategy to work */
114034 +    /* For explanations on how to distribute symbol values over the table :
114035 +     * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
114037 +     #ifdef __clang_analyzer__
114038 +     ZSTD_memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize);   /* useless initialization, just to keep scan-build happy */
114039 +     #endif
114041 +    /* symbol start positions */
114042 +    {   U32 u;
114043 +        cumul[0] = 0;
114044 +        for (u=1; u <= maxSymbolValue+1; u++) {
114045 +            if (normalizedCounter[u-1]==-1) {  /* Low proba symbol */
114046 +                cumul[u] = cumul[u-1] + 1;
114047 +                tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1);
114048 +            } else {
114049 +                cumul[u] = cumul[u-1] + normalizedCounter[u-1];
114050 +        }   }
114051 +        cumul[maxSymbolValue+1] = tableSize+1;
114052 +    }
114054 +    /* Spread symbols */
114055 +    {   U32 position = 0;
114056 +        U32 symbol;
114057 +        for (symbol=0; symbol<=maxSymbolValue; symbol++) {
114058 +            int nbOccurrences;
114059 +            int const freq = normalizedCounter[symbol];
114060 +            for (nbOccurrences=0; nbOccurrences<freq; nbOccurrences++) {
114061 +                tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
114062 +                position = (position + step) & tableMask;
114063 +                while (position > highThreshold)
114064 +                    position = (position + step) & tableMask;   /* Low proba area */
114065 +        }   }
114067 +        assert(position==0);  /* Must have initialized all positions */
114068 +    }
114070 +    /* Build table */
114071 +    {   U32 u; for (u=0; u<tableSize; u++) {
114072 +        FSE_FUNCTION_TYPE s = tableSymbol[u];   /* note : static analyzer may not understand tableSymbol is properly initialized */
114073 +        tableU16[cumul[s]++] = (U16) (tableSize+u);   /* TableU16 : sorted by symbol order; gives next state value */
114074 +    }   }
114076 +    /* Build Symbol Transformation Table */
114077 +    {   unsigned total = 0;
114078 +        unsigned s;
114079 +        for (s=0; s<=maxSymbolValue; s++) {
114080 +            switch (normalizedCounter[s])
114081 +            {
114082 +            case  0:
114083 +                /* filling nonetheless, for compatibility with FSE_getMaxNbBits() */
114084 +                symbolTT[s].deltaNbBits = ((tableLog+1) << 16) - (1<<tableLog);
114085 +                break;
114087 +            case -1:
114088 +            case  1:
114089 +                symbolTT[s].deltaNbBits = (tableLog << 16) - (1<<tableLog);
114090 +                symbolTT[s].deltaFindState = total - 1;
114091 +                total ++;
114092 +                break;
114093 +            default :
114094 +                {
114095 +                    U32 const maxBitsOut = tableLog - BIT_highbit32 (normalizedCounter[s]-1);
114096 +                    U32 const minStatePlus = normalizedCounter[s] << maxBitsOut;
114097 +                    symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
114098 +                    symbolTT[s].deltaFindState = total - normalizedCounter[s];
114099 +                    total +=  normalizedCounter[s];
114100 +    }   }   }   }
114102 +#if 0  /* debug : symbol costs */
114103 +    DEBUGLOG(5, "\n --- table statistics : ");
114104 +    {   U32 symbol;
114105 +        for (symbol=0; symbol<=maxSymbolValue; symbol++) {
114106 +            DEBUGLOG(5, "%3u: w=%3i,   maxBits=%u, fracBits=%.2f",
114107 +                symbol, normalizedCounter[symbol],
114108 +                FSE_getMaxNbBits(symbolTT, symbol),
114109 +                (double)FSE_bitCost(symbolTT, tableLog, symbol, 8) / 256);
114110 +        }
114111 +    }
114112 +#endif
114114 +    return 0;
114120 +#ifndef FSE_COMMONDEFS_ONLY
114123 +/*-**************************************************************
114124 +*  FSE NCount encoding
114125 +****************************************************************/
114126 +size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
114128 +    size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog) >> 3) + 3;
114129 +    return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND;  /* maxSymbolValue==0 ? use default */
114132 +static size_t
114133 +FSE_writeNCount_generic (void* header, size_t headerBufferSize,
114134 +                   const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
114135 +                         unsigned writeIsSafe)
114137 +    BYTE* const ostart = (BYTE*) header;
114138 +    BYTE* out = ostart;
114139 +    BYTE* const oend = ostart + headerBufferSize;
114140 +    int nbBits;
114141 +    const int tableSize = 1 << tableLog;
114142 +    int remaining;
114143 +    int threshold;
114144 +    U32 bitStream = 0;
114145 +    int bitCount = 0;
114146 +    unsigned symbol = 0;
114147 +    unsigned const alphabetSize = maxSymbolValue + 1;
114148 +    int previousIs0 = 0;
114150 +    /* Table Size */
114151 +    bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount;
114152 +    bitCount  += 4;
114154 +    /* Init */
114155 +    remaining = tableSize+1;   /* +1 for extra accuracy */
114156 +    threshold = tableSize;
114157 +    nbBits = tableLog+1;
114159 +    while ((symbol < alphabetSize) && (remaining>1)) {  /* stops at 1 */
114160 +        if (previousIs0) {
114161 +            unsigned start = symbol;
114162 +            while ((symbol < alphabetSize) && !normalizedCounter[symbol]) symbol++;
114163 +            if (symbol == alphabetSize) break;   /* incorrect distribution */
114164 +            while (symbol >= start+24) {
114165 +                start+=24;
114166 +                bitStream += 0xFFFFU << bitCount;
114167 +                if ((!writeIsSafe) && (out > oend-2))
114168 +                    return ERROR(dstSize_tooSmall);   /* Buffer overflow */
114169 +                out[0] = (BYTE) bitStream;
114170 +                out[1] = (BYTE)(bitStream>>8);
114171 +                out+=2;
114172 +                bitStream>>=16;
114173 +            }
114174 +            while (symbol >= start+3) {
114175 +                start+=3;
114176 +                bitStream += 3 << bitCount;
114177 +                bitCount += 2;
114178 +            }
114179 +            bitStream += (symbol-start) << bitCount;
114180 +            bitCount += 2;
114181 +            if (bitCount>16) {
114182 +                if ((!writeIsSafe) && (out > oend - 2))
114183 +                    return ERROR(dstSize_tooSmall);   /* Buffer overflow */
114184 +                out[0] = (BYTE)bitStream;
114185 +                out[1] = (BYTE)(bitStream>>8);
114186 +                out += 2;
114187 +                bitStream >>= 16;
114188 +                bitCount -= 16;
114189 +        }   }
114190 +        {   int count = normalizedCounter[symbol++];
114191 +            int const max = (2*threshold-1) - remaining;
114192 +            remaining -= count < 0 ? -count : count;
114193 +            count++;   /* +1 for extra accuracy */
114194 +            if (count>=threshold)
114195 +                count += max;   /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
114196 +            bitStream += count << bitCount;
114197 +            bitCount  += nbBits;
114198 +            bitCount  -= (count<max);
114199 +            previousIs0  = (count==1);
114200 +            if (remaining<1) return ERROR(GENERIC);
114201 +            while (remaining<threshold) { nbBits--; threshold>>=1; }
114202 +        }
114203 +        if (bitCount>16) {
114204 +            if ((!writeIsSafe) && (out > oend - 2))
114205 +                return ERROR(dstSize_tooSmall);   /* Buffer overflow */
114206 +            out[0] = (BYTE)bitStream;
114207 +            out[1] = (BYTE)(bitStream>>8);
114208 +            out += 2;
114209 +            bitStream >>= 16;
114210 +            bitCount -= 16;
114211 +    }   }
114213 +    if (remaining != 1)
114214 +        return ERROR(GENERIC);  /* incorrect normalized distribution */
114215 +    assert(symbol <= alphabetSize);
114217 +    /* flush remaining bitStream */
114218 +    if ((!writeIsSafe) && (out > oend - 2))
114219 +        return ERROR(dstSize_tooSmall);   /* Buffer overflow */
114220 +    out[0] = (BYTE)bitStream;
114221 +    out[1] = (BYTE)(bitStream>>8);
114222 +    out+= (bitCount+7) /8;
114224 +    return (out-ostart);
114228 +size_t FSE_writeNCount (void* buffer, size_t bufferSize,
114229 +                  const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
114231 +    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);   /* Unsupported */
114232 +    if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC);   /* Unsupported */
114234 +    if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
114235 +        return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);
114237 +    return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1 /* write in buffer is safe */);
114241 +/*-**************************************************************
114242 +*  FSE Compression Code
114243 +****************************************************************/
114245 +FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog)
114247 +    size_t size;
114248 +    if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
114249 +    size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
114250 +    return (FSE_CTable*)ZSTD_malloc(size);
114253 +void FSE_freeCTable (FSE_CTable* ct) { ZSTD_free(ct); }
114255 +/* provides the minimum logSize to safely represent a distribution */
114256 +static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
114258 +    U32 minBitsSrc = BIT_highbit32((U32)(srcSize)) + 1;
114259 +    U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
114260 +    U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
114261 +    assert(srcSize > 1); /* Not supported, RLE should be used instead */
114262 +    return minBits;
114265 +unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
114267 +    U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;
114268 +    U32 tableLog = maxTableLog;
114269 +    U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
114270 +    assert(srcSize > 1); /* Not supported, RLE should be used instead */
114271 +    if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
114272 +    if (maxBitsSrc < tableLog) tableLog = maxBitsSrc;   /* Accuracy can be reduced */
114273 +    if (minBits > tableLog) tableLog = minBits;   /* Need a minimum to safely represent all symbol values */
114274 +    if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG;
114275 +    if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG;
114276 +    return tableLog;
114279 +unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
114281 +    return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
114284 +/* Secondary normalization method.
114285 +   To be used when primary method fails. */
114287 +static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue, short lowProbCount)
114289 +    short const NOT_YET_ASSIGNED = -2;
114290 +    U32 s;
114291 +    U32 distributed = 0;
114292 +    U32 ToDistribute;
114294 +    /* Init */
114295 +    U32 const lowThreshold = (U32)(total >> tableLog);
114296 +    U32 lowOne = (U32)((total * 3) >> (tableLog + 1));
114298 +    for (s=0; s<=maxSymbolValue; s++) {
114299 +        if (count[s] == 0) {
114300 +            norm[s]=0;
114301 +            continue;
114302 +        }
114303 +        if (count[s] <= lowThreshold) {
114304 +            norm[s] = lowProbCount;
114305 +            distributed++;
114306 +            total -= count[s];
114307 +            continue;
114308 +        }
114309 +        if (count[s] <= lowOne) {
114310 +            norm[s] = 1;
114311 +            distributed++;
114312 +            total -= count[s];
114313 +            continue;
114314 +        }
114316 +        norm[s]=NOT_YET_ASSIGNED;
114317 +    }
114318 +    ToDistribute = (1 << tableLog) - distributed;
114320 +    if (ToDistribute == 0)
114321 +        return 0;
114323 +    if ((total / ToDistribute) > lowOne) {
114324 +        /* risk of rounding to zero */
114325 +        lowOne = (U32)((total * 3) / (ToDistribute * 2));
114326 +        for (s=0; s<=maxSymbolValue; s++) {
114327 +            if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) {
114328 +                norm[s] = 1;
114329 +                distributed++;
114330 +                total -= count[s];
114331 +                continue;
114332 +        }   }
114333 +        ToDistribute = (1 << tableLog) - distributed;
114334 +    }
114336 +    if (distributed == maxSymbolValue+1) {
114337 +        /* all values are pretty poor;
114338 +           probably incompressible data (should have already been detected);
114339 +           find max, then give all remaining points to max */
114340 +        U32 maxV = 0, maxC = 0;
114341 +        for (s=0; s<=maxSymbolValue; s++)
114342 +            if (count[s] > maxC) { maxV=s; maxC=count[s]; }
114343 +        norm[maxV] += (short)ToDistribute;
114344 +        return 0;
114345 +    }
114347 +    if (total == 0) {
114348 +        /* all of the symbols were low enough for the lowOne or lowThreshold */
114349 +        for (s=0; ToDistribute > 0; s = (s+1)%(maxSymbolValue+1))
114350 +            if (norm[s] > 0) { ToDistribute--; norm[s]++; }
114351 +        return 0;
114352 +    }
114354 +    {   U64 const vStepLog = 62 - tableLog;
114355 +        U64 const mid = (1ULL << (vStepLog-1)) - 1;
114356 +        U64 const rStep = ZSTD_div64((((U64)1<<vStepLog) * ToDistribute) + mid, (U32)total);   /* scale on remaining */
114357 +        U64 tmpTotal = mid;
114358 +        for (s=0; s<=maxSymbolValue; s++) {
114359 +            if (norm[s]==NOT_YET_ASSIGNED) {
114360 +                U64 const end = tmpTotal + (count[s] * rStep);
114361 +                U32 const sStart = (U32)(tmpTotal >> vStepLog);
114362 +                U32 const sEnd = (U32)(end >> vStepLog);
114363 +                U32 const weight = sEnd - sStart;
114364 +                if (weight < 1)
114365 +                    return ERROR(GENERIC);
114366 +                norm[s] = (short)weight;
114367 +                tmpTotal = end;
114368 +    }   }   }
114370 +    return 0;
114373 +size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
114374 +                           const unsigned* count, size_t total,
114375 +                           unsigned maxSymbolValue, unsigned useLowProbCount)
114377 +    /* Sanity checks */
114378 +    if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
114379 +    if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC);   /* Unsupported size */
114380 +    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);   /* Unsupported size */
114381 +    if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC);   /* Too small tableLog, compression potentially impossible */
114383 +    {   static U32 const rtbTable[] = {     0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
114384 +        short const lowProbCount = useLowProbCount ? -1 : 1;
114385 +        U64 const scale = 62 - tableLog;
114386 +        U64 const step = ZSTD_div64((U64)1<<62, (U32)total);   /* <== here, one division ! */
114387 +        U64 const vStep = 1ULL<<(scale-20);
114388 +        int stillToDistribute = 1<<tableLog;
114389 +        unsigned s;
114390 +        unsigned largest=0;
114391 +        short largestP=0;
114392 +        U32 lowThreshold = (U32)(total >> tableLog);
114394 +        for (s=0; s<=maxSymbolValue; s++) {
114395 +            if (count[s] == total) return 0;   /* rle special case */
114396 +            if (count[s] == 0) { normalizedCounter[s]=0; continue; }
114397 +            if (count[s] <= lowThreshold) {
114398 +                normalizedCounter[s] = lowProbCount;
114399 +                stillToDistribute--;
114400 +            } else {
114401 +                short proba = (short)((count[s]*step) >> scale);
114402 +                if (proba<8) {
114403 +                    U64 restToBeat = vStep * rtbTable[proba];
114404 +                    proba += (count[s]*step) - ((U64)proba<<scale) > restToBeat;
114405 +                }
114406 +                if (proba > largestP) { largestP=proba; largest=s; }
114407 +                normalizedCounter[s] = proba;
114408 +                stillToDistribute -= proba;
114409 +        }   }
114410 +        if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
114411 +            /* corner case, need another normalization method */
114412 +            size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue, lowProbCount);
114413 +            if (FSE_isError(errorCode)) return errorCode;
114414 +        }
114415 +        else normalizedCounter[largest] += (short)stillToDistribute;
114416 +    }
114418 +#if 0
114419 +    {   /* Print Table (debug) */
114420 +        U32 s;
114421 +        U32 nTotal = 0;
114422 +        for (s=0; s<=maxSymbolValue; s++)
114423 +            RAWLOG(2, "%3i: %4i \n", s, normalizedCounter[s]);
114424 +        for (s=0; s<=maxSymbolValue; s++)
114425 +            nTotal += abs(normalizedCounter[s]);
114426 +        if (nTotal != (1U<<tableLog))
114427 +            RAWLOG(2, "Warning !!! Total == %u != %u !!!", nTotal, 1U<<tableLog);
114428 +        getchar();
114429 +    }
114430 +#endif
114432 +    return tableLog;
114436 +/* fake FSE_CTable, for raw (uncompressed) input */
114437 +size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits)
114439 +    const unsigned tableSize = 1 << nbBits;
114440 +    const unsigned tableMask = tableSize - 1;
114441 +    const unsigned maxSymbolValue = tableMask;
114442 +    void* const ptr = ct;
114443 +    U16* const tableU16 = ( (U16*) ptr) + 2;
114444 +    void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableSize>>1);   /* assumption : tableLog >= 1 */
114445 +    FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
114446 +    unsigned s;
114448 +    /* Sanity checks */
114449 +    if (nbBits < 1) return ERROR(GENERIC);             /* min size */
114451 +    /* header */
114452 +    tableU16[-2] = (U16) nbBits;
114453 +    tableU16[-1] = (U16) maxSymbolValue;
114455 +    /* Build table */
114456 +    for (s=0; s<tableSize; s++)
114457 +        tableU16[s] = (U16)(tableSize + s);
114459 +    /* Build Symbol Transformation Table */
114460 +    {   const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);
114461 +        for (s=0; s<=maxSymbolValue; s++) {
114462 +            symbolTT[s].deltaNbBits = deltaNbBits;
114463 +            symbolTT[s].deltaFindState = s-1;
114464 +    }   }
114466 +    return 0;
114469 +/* fake FSE_CTable, for rle input (always same symbol) */
114470 +size_t FSE_buildCTable_rle (FSE_CTable* ct, BYTE symbolValue)
114472 +    void* ptr = ct;
114473 +    U16* tableU16 = ( (U16*) ptr) + 2;
114474 +    void* FSCTptr = (U32*)ptr + 2;
114475 +    FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*) FSCTptr;
114477 +    /* header */
114478 +    tableU16[-2] = (U16) 0;
114479 +    tableU16[-1] = (U16) symbolValue;
114481 +    /* Build table */
114482 +    tableU16[0] = 0;
114483 +    tableU16[1] = 0;   /* just in case */
114485 +    /* Build Symbol Transformation Table */
114486 +    symbolTT[symbolValue].deltaNbBits = 0;
114487 +    symbolTT[symbolValue].deltaFindState = 0;
114489 +    return 0;
114493 +static size_t FSE_compress_usingCTable_generic (void* dst, size_t dstSize,
114494 +                           const void* src, size_t srcSize,
114495 +                           const FSE_CTable* ct, const unsigned fast)
114497 +    const BYTE* const istart = (const BYTE*) src;
114498 +    const BYTE* const iend = istart + srcSize;
114499 +    const BYTE* ip=iend;
114501 +    BIT_CStream_t bitC;
114502 +    FSE_CState_t CState1, CState2;
114504 +    /* init */
114505 +    if (srcSize <= 2) return 0;
114506 +    { size_t const initError = BIT_initCStream(&bitC, dst, dstSize);
114507 +      if (FSE_isError(initError)) return 0; /* not enough space available to write a bitstream */ }
114509 +#define FSE_FLUSHBITS(s)  (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
114511 +    if (srcSize & 1) {
114512 +        FSE_initCState2(&CState1, ct, *--ip);
114513 +        FSE_initCState2(&CState2, ct, *--ip);
114514 +        FSE_encodeSymbol(&bitC, &CState1, *--ip);
114515 +        FSE_FLUSHBITS(&bitC);
114516 +    } else {
114517 +        FSE_initCState2(&CState2, ct, *--ip);
114518 +        FSE_initCState2(&CState1, ct, *--ip);
114519 +    }
114521 +    /* join to mod 4 */
114522 +    srcSize -= 2;
114523 +    if ((sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) && (srcSize & 2)) {  /* test bit 2 */
114524 +        FSE_encodeSymbol(&bitC, &CState2, *--ip);
114525 +        FSE_encodeSymbol(&bitC, &CState1, *--ip);
114526 +        FSE_FLUSHBITS(&bitC);
114527 +    }
114529 +    /* 2 or 4 encoding per loop */
114530 +    while ( ip>istart ) {
114532 +        FSE_encodeSymbol(&bitC, &CState2, *--ip);
114534 +        if (sizeof(bitC.bitContainer)*8 < FSE_MAX_TABLELOG*2+7 )   /* this test must be static */
114535 +            FSE_FLUSHBITS(&bitC);
114537 +        FSE_encodeSymbol(&bitC, &CState1, *--ip);
114539 +        if (sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) {  /* this test must be static */
114540 +            FSE_encodeSymbol(&bitC, &CState2, *--ip);
114541 +            FSE_encodeSymbol(&bitC, &CState1, *--ip);
114542 +        }
114544 +        FSE_FLUSHBITS(&bitC);
114545 +    }
114547 +    FSE_flushCState(&bitC, &CState2);
114548 +    FSE_flushCState(&bitC, &CState1);
114549 +    return BIT_closeCStream(&bitC);
114552 +size_t FSE_compress_usingCTable (void* dst, size_t dstSize,
114553 +                           const void* src, size_t srcSize,
114554 +                           const FSE_CTable* ct)
114556 +    unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize));
114558 +    if (fast)
114559 +        return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);
114560 +    else
114561 +        return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0);
114565 +size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
114568 +#endif   /* FSE_COMMONDEFS_ONLY */
114569 diff --git a/lib/zstd/compress/hist.c b/lib/zstd/compress/hist.c
114570 new file mode 100644
114571 index 000000000000..5fc30f766591
114572 --- /dev/null
114573 +++ b/lib/zstd/compress/hist.c
114574 @@ -0,0 +1,164 @@
114575 +/* ******************************************************************
114576 + * hist : Histogram functions
114577 + * part of Finite State Entropy project
114578 + * Copyright (c) Yann Collet, Facebook, Inc.
114580 + *  You can contact the author at :
114581 + *  - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
114582 + *  - Public forum : https://groups.google.com/forum/#!forum/lz4c
114584 + * This source code is licensed under both the BSD-style license (found in the
114585 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
114586 + * in the COPYING file in the root directory of this source tree).
114587 + * You may select, at your option, one of the above-listed licenses.
114588 +****************************************************************** */
114590 +/* --- dependencies --- */
114591 +#include "../common/mem.h"             /* U32, BYTE, etc. */
114592 +#include "../common/debug.h"           /* assert, DEBUGLOG */
114593 +#include "../common/error_private.h"   /* ERROR */
114594 +#include "hist.h"
114597 +/* --- Error management --- */
114598 +unsigned HIST_isError(size_t code) { return ERR_isError(code); }
114600 +/*-**************************************************************
114601 + *  Histogram functions
114602 + ****************************************************************/
114603 +unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
114604 +                           const void* src, size_t srcSize)
114606 +    const BYTE* ip = (const BYTE*)src;
114607 +    const BYTE* const end = ip + srcSize;
114608 +    unsigned maxSymbolValue = *maxSymbolValuePtr;
114609 +    unsigned largestCount=0;
114611 +    ZSTD_memset(count, 0, (maxSymbolValue+1) * sizeof(*count));
114612 +    if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
114614 +    while (ip<end) {
114615 +        assert(*ip <= maxSymbolValue);
114616 +        count[*ip++]++;
114617 +    }
114619 +    while (!count[maxSymbolValue]) maxSymbolValue--;
114620 +    *maxSymbolValuePtr = maxSymbolValue;
114622 +    {   U32 s;
114623 +        for (s=0; s<=maxSymbolValue; s++)
114624 +            if (count[s] > largestCount) largestCount = count[s];
114625 +    }
114627 +    return largestCount;
114630 +typedef enum { trustInput, checkMaxSymbolValue } HIST_checkInput_e;
114632 +/* HIST_count_parallel_wksp() :
114633 + * store histogram into 4 intermediate tables, recombined at the end.
114634 + * this design makes better use of OoO cpus,
114635 + * and is noticeably faster when some values are heavily repeated.
114636 + * But it needs some additional workspace for intermediate tables.
114637 + * `workSpace` must be a U32 table of size >= HIST_WKSP_SIZE_U32.
114638 + * @return : largest histogram frequency,
114639 + *           or an error code (notably when histogram's alphabet is larger than *maxSymbolValuePtr) */
114640 +static size_t HIST_count_parallel_wksp(
114641 +                                unsigned* count, unsigned* maxSymbolValuePtr,
114642 +                                const void* source, size_t sourceSize,
114643 +                                HIST_checkInput_e check,
114644 +                                U32* const workSpace)
114646 +    const BYTE* ip = (const BYTE*)source;
114647 +    const BYTE* const iend = ip+sourceSize;
114648 +    size_t const countSize = (*maxSymbolValuePtr + 1) * sizeof(*count);
114649 +    unsigned max=0;
114650 +    U32* const Counting1 = workSpace;
114651 +    U32* const Counting2 = Counting1 + 256;
114652 +    U32* const Counting3 = Counting2 + 256;
114653 +    U32* const Counting4 = Counting3 + 256;
114655 +    /* safety checks */
114656 +    assert(*maxSymbolValuePtr <= 255);
114657 +    if (!sourceSize) {
114658 +        ZSTD_memset(count, 0, countSize);
114659 +        *maxSymbolValuePtr = 0;
114660 +        return 0;
114661 +    }
114662 +    ZSTD_memset(workSpace, 0, 4*256*sizeof(unsigned));
114664 +    /* by stripes of 16 bytes */
114665 +    {   U32 cached = MEM_read32(ip); ip += 4;
114666 +        while (ip < iend-15) {
114667 +            U32 c = cached; cached = MEM_read32(ip); ip += 4;
114668 +            Counting1[(BYTE) c     ]++;
114669 +            Counting2[(BYTE)(c>>8) ]++;
114670 +            Counting3[(BYTE)(c>>16)]++;
114671 +            Counting4[       c>>24 ]++;
114672 +            c = cached; cached = MEM_read32(ip); ip += 4;
114673 +            Counting1[(BYTE) c     ]++;
114674 +            Counting2[(BYTE)(c>>8) ]++;
114675 +            Counting3[(BYTE)(c>>16)]++;
114676 +            Counting4[       c>>24 ]++;
114677 +            c = cached; cached = MEM_read32(ip); ip += 4;
114678 +            Counting1[(BYTE) c     ]++;
114679 +            Counting2[(BYTE)(c>>8) ]++;
114680 +            Counting3[(BYTE)(c>>16)]++;
114681 +            Counting4[       c>>24 ]++;
114682 +            c = cached; cached = MEM_read32(ip); ip += 4;
114683 +            Counting1[(BYTE) c     ]++;
114684 +            Counting2[(BYTE)(c>>8) ]++;
114685 +            Counting3[(BYTE)(c>>16)]++;
114686 +            Counting4[       c>>24 ]++;
114687 +        }
114688 +        ip-=4;
114689 +    }
114691 +    /* finish last symbols */
114692 +    while (ip<iend) Counting1[*ip++]++;
114694 +    {   U32 s;
114695 +        for (s=0; s<256; s++) {
114696 +            Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
114697 +            if (Counting1[s] > max) max = Counting1[s];
114698 +    }   }
114700 +    {   unsigned maxSymbolValue = 255;
114701 +        while (!Counting1[maxSymbolValue]) maxSymbolValue--;
114702 +        if (check && maxSymbolValue > *maxSymbolValuePtr) return ERROR(maxSymbolValue_tooSmall);
114703 +        *maxSymbolValuePtr = maxSymbolValue;
114704 +        ZSTD_memmove(count, Counting1, countSize);   /* in case count & Counting1 are overlapping */
114705 +    }
114706 +    return (size_t)max;
114709 +/* HIST_countFast_wksp() :
114710 + * Same as HIST_countFast(), but using an externally provided scratch buffer.
114711 + * `workSpace` is a writable buffer which must be 4-bytes aligned,
114712 + * `workSpaceSize` must be >= HIST_WKSP_SIZE
114713 + */
114714 +size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
114715 +                          const void* source, size_t sourceSize,
114716 +                          void* workSpace, size_t workSpaceSize)
114718 +    if (sourceSize < 1500) /* heuristic threshold */
114719 +        return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize);
114720 +    if ((size_t)workSpace & 3) return ERROR(GENERIC);  /* must be aligned on 4-bytes boundaries */
114721 +    if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
114722 +    return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, trustInput, (U32*)workSpace);
114725 +/* HIST_count_wksp() :
114726 + * Same as HIST_count(), but using an externally provided scratch buffer.
114727 + * `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */
114728 +size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
114729 +                       const void* source, size_t sourceSize,
114730 +                       void* workSpace, size_t workSpaceSize)
114732 +    if ((size_t)workSpace & 3) return ERROR(GENERIC);  /* must be aligned on 4-bytes boundaries */
114733 +    if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
114734 +    if (*maxSymbolValuePtr < 255)
114735 +        return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue, (U32*)workSpace);
114736 +    *maxSymbolValuePtr = 255;
114737 +    return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize);
114739 diff --git a/lib/zstd/compress/hist.h b/lib/zstd/compress/hist.h
114740 new file mode 100644
114741 index 000000000000..228ed48a71de
114742 --- /dev/null
114743 +++ b/lib/zstd/compress/hist.h
114744 @@ -0,0 +1,75 @@
114745 +/* ******************************************************************
114746 + * hist : Histogram functions
114747 + * part of Finite State Entropy project
114748 + * Copyright (c) Yann Collet, Facebook, Inc.
114750 + *  You can contact the author at :
114751 + *  - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
114752 + *  - Public forum : https://groups.google.com/forum/#!forum/lz4c
114754 + * This source code is licensed under both the BSD-style license (found in the
114755 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
114756 + * in the COPYING file in the root directory of this source tree).
114757 + * You may select, at your option, one of the above-listed licenses.
114758 +****************************************************************** */
114760 +/* --- dependencies --- */
114761 +#include "../common/zstd_deps.h"   /* size_t */
114764 +/* --- simple histogram functions --- */
114766 +/*! HIST_count():
114767 + *  Provides the precise count of each byte within a table 'count'.
114768 + * 'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1).
114769 + *  Updates *maxSymbolValuePtr with actual largest symbol value detected.
114770 + * @return : count of the most frequent symbol (which isn't identified).
114771 + *           or an error code, which can be tested using HIST_isError().
114772 + *           note : if return == srcSize, there is only one symbol.
114773 + */
114774 +size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr,
114775 +                  const void* src, size_t srcSize);
114777 +unsigned HIST_isError(size_t code);  /**< tells if a return value is an error code */
114780 +/* --- advanced histogram functions --- */
114782 +#define HIST_WKSP_SIZE_U32 1024
114783 +#define HIST_WKSP_SIZE    (HIST_WKSP_SIZE_U32 * sizeof(unsigned))
114784 +/** HIST_count_wksp() :
114785 + *  Same as HIST_count(), but using an externally provided scratch buffer.
114786 + *  Benefit is this function will use very little stack space.
114787 + * `workSpace` is a writable buffer which must be 4-bytes aligned,
114788 + * `workSpaceSize` must be >= HIST_WKSP_SIZE
114789 + */
114790 +size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
114791 +                       const void* src, size_t srcSize,
114792 +                       void* workSpace, size_t workSpaceSize);
114794 +/** HIST_countFast() :
114795 + *  same as HIST_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr.
114796 + *  This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr`
114797 + */
114798 +size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
114799 +                      const void* src, size_t srcSize);
114801 +/** HIST_countFast_wksp() :
114802 + *  Same as HIST_countFast(), but using an externally provided scratch buffer.
114803 + * `workSpace` is a writable buffer which must be 4-bytes aligned,
114804 + * `workSpaceSize` must be >= HIST_WKSP_SIZE
114805 + */
114806 +size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
114807 +                           const void* src, size_t srcSize,
114808 +                           void* workSpace, size_t workSpaceSize);
114810 +/*! HIST_count_simple() :
114811 + *  Same as HIST_countFast(), this function is unsafe,
114812 + *  and will segfault if any value within `src` is `> *maxSymbolValuePtr`.
114813 + *  It is also a bit slower for large inputs.
114814 + *  However, it does not need any additional memory (not even on stack).
114815 + * @return : count of the most frequent symbol.
114816 + *  Note this function doesn't produce any error (i.e. it must succeed).
114817 + */
114818 +unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
114819 +                           const void* src, size_t srcSize);
114820 diff --git a/lib/zstd/compress/huf_compress.c b/lib/zstd/compress/huf_compress.c
114821 new file mode 100644
114822 index 000000000000..ff0e76a2e0e3
114823 --- /dev/null
114824 +++ b/lib/zstd/compress/huf_compress.c
114825 @@ -0,0 +1,901 @@
114826 +/* ******************************************************************
114827 + * Huffman encoder, part of New Generation Entropy library
114828 + * Copyright (c) Yann Collet, Facebook, Inc.
114830 + *  You can contact the author at :
114831 + *  - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
114832 + *  - Public forum : https://groups.google.com/forum/#!forum/lz4c
114834 + * This source code is licensed under both the BSD-style license (found in the
114835 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
114836 + * in the COPYING file in the root directory of this source tree).
114837 + * You may select, at your option, one of the above-listed licenses.
114838 +****************************************************************** */
114840 +/* **************************************************************
114841 +*  Compiler specifics
114842 +****************************************************************/
114845 +/* **************************************************************
114846 +*  Includes
114847 +****************************************************************/
114848 +#include "../common/zstd_deps.h"     /* ZSTD_memcpy, ZSTD_memset */
114849 +#include "../common/compiler.h"
114850 +#include "../common/bitstream.h"
114851 +#include "hist.h"
114852 +#define FSE_STATIC_LINKING_ONLY   /* FSE_optimalTableLog_internal */
114853 +#include "../common/fse.h"        /* header compression */
114854 +#define HUF_STATIC_LINKING_ONLY
114855 +#include "../common/huf.h"
114856 +#include "../common/error_private.h"
114859 +/* **************************************************************
114860 +*  Error Management
114861 +****************************************************************/
114862 +#define HUF_isError ERR_isError
114863 +#define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)   /* use only *after* variable declarations */
114866 +/* **************************************************************
114867 +*  Utils
114868 +****************************************************************/
114869 +unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
114871 +    return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
114875 +/* *******************************************************
114876 +*  HUF : Huffman block compression
114877 +*********************************************************/
114878 +/* HUF_compressWeights() :
114879 + * Same as FSE_compress(), but dedicated to huff0's weights compression.
114880 + * The use case needs much less stack memory.
114881 + * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
114882 + */
114883 +#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
114885 +typedef struct {
114886 +    FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
114887 +    U32 scratchBuffer[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(HUF_TABLELOG_MAX, MAX_FSE_TABLELOG_FOR_HUFF_HEADER)];
114888 +    unsigned count[HUF_TABLELOG_MAX+1];
114889 +    S16 norm[HUF_TABLELOG_MAX+1];
114890 +} HUF_CompressWeightsWksp;
114892 +static size_t HUF_compressWeights(void* dst, size_t dstSize, const void* weightTable, size_t wtSize, void* workspace, size_t workspaceSize)
114894 +    BYTE* const ostart = (BYTE*) dst;
114895 +    BYTE* op = ostart;
114896 +    BYTE* const oend = ostart + dstSize;
114898 +    unsigned maxSymbolValue = HUF_TABLELOG_MAX;
114899 +    U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
114900 +    HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)workspace;
114902 +    if (workspaceSize < sizeof(HUF_CompressWeightsWksp)) return ERROR(GENERIC);
114904 +    /* init conditions */
114905 +    if (wtSize <= 1) return 0;  /* Not compressible */
114907 +    /* Scan input and build symbol stats */
114908 +    {   unsigned const maxCount = HIST_count_simple(wksp->count, &maxSymbolValue, weightTable, wtSize);   /* never fails */
114909 +        if (maxCount == wtSize) return 1;   /* only a single symbol in src : rle */
114910 +        if (maxCount == 1) return 0;        /* each symbol present maximum once => not compressible */
114911 +    }
114913 +    tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
114914 +    CHECK_F( FSE_normalizeCount(wksp->norm, tableLog, wksp->count, wtSize, maxSymbolValue, /* useLowProbCount */ 0) );
114916 +    /* Write table description header */
114917 +    {   CHECK_V_F(hSize, FSE_writeNCount(op, (size_t)(oend-op), wksp->norm, maxSymbolValue, tableLog) );
114918 +        op += hSize;
114919 +    }
114921 +    /* Compress */
114922 +    CHECK_F( FSE_buildCTable_wksp(wksp->CTable, wksp->norm, maxSymbolValue, tableLog, wksp->scratchBuffer, sizeof(wksp->scratchBuffer)) );
114923 +    {   CHECK_V_F(cSize, FSE_compress_usingCTable(op, (size_t)(oend - op), weightTable, wtSize, wksp->CTable) );
114924 +        if (cSize == 0) return 0;   /* not enough space for compressed data */
114925 +        op += cSize;
114926 +    }
114928 +    return (size_t)(op-ostart);
114932 +typedef struct {
114933 +    HUF_CompressWeightsWksp wksp;
114934 +    BYTE bitsToWeight[HUF_TABLELOG_MAX + 1];   /* precomputed conversion table */
114935 +    BYTE huffWeight[HUF_SYMBOLVALUE_MAX];
114936 +} HUF_WriteCTableWksp;
114938 +size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize,
114939 +                            const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog,
114940 +                            void* workspace, size_t workspaceSize)
114942 +    BYTE* op = (BYTE*)dst;
114943 +    U32 n;
114944 +    HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)workspace;
114946 +    /* check conditions */
114947 +    if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC);
114948 +    if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
114950 +    /* convert to weight */
114951 +    wksp->bitsToWeight[0] = 0;
114952 +    for (n=1; n<huffLog+1; n++)
114953 +        wksp->bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
114954 +    for (n=0; n<maxSymbolValue; n++)
114955 +        wksp->huffWeight[n] = wksp->bitsToWeight[CTable[n].nbBits];
114957 +    /* attempt weights compression by FSE */
114958 +    {   CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, wksp->huffWeight, maxSymbolValue, &wksp->wksp, sizeof(wksp->wksp)) );
114959 +        if ((hSize>1) & (hSize < maxSymbolValue/2)) {   /* FSE compressed */
114960 +            op[0] = (BYTE)hSize;
114961 +            return hSize+1;
114962 +    }   }
114964 +    /* write raw values as 4-bits (max : 15) */
114965 +    if (maxSymbolValue > (256-128)) return ERROR(GENERIC);   /* should not happen : likely means source cannot be compressed */
114966 +    if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall);   /* not enough space within dst buffer */
114967 +    op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1));
114968 +    wksp->huffWeight[maxSymbolValue] = 0;   /* to be sure it doesn't cause msan issue in final combination */
114969 +    for (n=0; n<maxSymbolValue; n+=2)
114970 +        op[(n/2)+1] = (BYTE)((wksp->huffWeight[n] << 4) + wksp->huffWeight[n+1]);
114971 +    return ((maxSymbolValue+1)/2) + 1;
114974 +/*! HUF_writeCTable() :
114975 +    `CTable` : Huffman tree to save, using huf representation.
114976 +    @return : size of saved CTable */
114977 +size_t HUF_writeCTable (void* dst, size_t maxDstSize,
114978 +                        const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog)
114980 +    HUF_WriteCTableWksp wksp;
114981 +    return HUF_writeCTable_wksp(dst, maxDstSize, CTable, maxSymbolValue, huffLog, &wksp, sizeof(wksp));
114985 +size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* hasZeroWeights)
114987 +    BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1];   /* init not required, even though some static analyzer may complain */
114988 +    U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1];   /* large enough for values from 0 to 16 */
114989 +    U32 tableLog = 0;
114990 +    U32 nbSymbols = 0;
114992 +    /* get symbol weights */
114993 +    CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize));
114994 +    *hasZeroWeights = (rankVal[0] > 0);
114996 +    /* check result */
114997 +    if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
114998 +    if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall);
115000 +    /* Prepare base value per rank */
115001 +    {   U32 n, nextRankStart = 0;
115002 +        for (n=1; n<=tableLog; n++) {
115003 +            U32 curr = nextRankStart;
115004 +            nextRankStart += (rankVal[n] << (n-1));
115005 +            rankVal[n] = curr;
115006 +    }   }
115008 +    /* fill nbBits */
115009 +    {   U32 n; for (n=0; n<nbSymbols; n++) {
115010 +            const U32 w = huffWeight[n];
115011 +            CTable[n].nbBits = (BYTE)(tableLog + 1 - w) & -(w != 0);
115012 +    }   }
115014 +    /* fill val */
115015 +    {   U16 nbPerRank[HUF_TABLELOG_MAX+2]  = {0};  /* support w=0=>n=tableLog+1 */
115016 +        U16 valPerRank[HUF_TABLELOG_MAX+2] = {0};
115017 +        { U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[CTable[n].nbBits]++; }
115018 +        /* determine stating value per rank */
115019 +        valPerRank[tableLog+1] = 0;   /* for w==0 */
115020 +        {   U16 min = 0;
115021 +            U32 n; for (n=tableLog; n>0; n--) {  /* start at n=tablelog <-> w=1 */
115022 +                valPerRank[n] = min;     /* get starting value within each rank */
115023 +                min += nbPerRank[n];
115024 +                min >>= 1;
115025 +        }   }
115026 +        /* assign value within rank, symbol order */
115027 +        { U32 n; for (n=0; n<nbSymbols; n++) CTable[n].val = valPerRank[CTable[n].nbBits]++; }
115028 +    }
115030 +    *maxSymbolValuePtr = nbSymbols - 1;
115031 +    return readSize;
115034 +U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue)
115036 +    const HUF_CElt* table = (const HUF_CElt*)symbolTable;
115037 +    assert(symbolValue <= HUF_SYMBOLVALUE_MAX);
115038 +    return table[symbolValue].nbBits;
115042 +typedef struct nodeElt_s {
115043 +    U32 count;
115044 +    U16 parent;
115045 +    BYTE byte;
115046 +    BYTE nbBits;
115047 +} nodeElt;
115050 + * HUF_setMaxHeight():
115051 + * Enforces maxNbBits on the Huffman tree described in huffNode.
115053 + * It sets all nodes with nbBits > maxNbBits to be maxNbBits. Then it adjusts
115054 + * the tree to so that it is a valid canonical Huffman tree.
115056 + * @pre               The sum of the ranks of each symbol == 2^largestBits,
115057 + *                    where largestBits == huffNode[lastNonNull].nbBits.
115058 + * @post              The sum of the ranks of each symbol == 2^largestBits,
115059 + *                    where largestBits is the return value <= maxNbBits.
115061 + * @param huffNode    The Huffman tree modified in place to enforce maxNbBits.
115062 + * @param lastNonNull The symbol with the lowest count in the Huffman tree.
115063 + * @param maxNbBits   The maximum allowed number of bits, which the Huffman tree
115064 + *                    may not respect. After this function the Huffman tree will
115065 + *                    respect maxNbBits.
115066 + * @return            The maximum number of bits of the Huffman tree after adjustment,
115067 + *                    necessarily no more than maxNbBits.
115068 + */
115069 +static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
115071 +    const U32 largestBits = huffNode[lastNonNull].nbBits;
115072 +    /* early exit : no elt > maxNbBits, so the tree is already valid. */
115073 +    if (largestBits <= maxNbBits) return largestBits;
115075 +    /* there are several too large elements (at least >= 2) */
115076 +    {   int totalCost = 0;
115077 +        const U32 baseCost = 1 << (largestBits - maxNbBits);
115078 +        int n = (int)lastNonNull;
115080 +        /* Adjust any ranks > maxNbBits to maxNbBits.
115081 +         * Compute totalCost, which is how far the sum of the ranks is
115082 +         * we are over 2^largestBits after adjust the offending ranks.
115083 +         */
115084 +        while (huffNode[n].nbBits > maxNbBits) {
115085 +            totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
115086 +            huffNode[n].nbBits = (BYTE)maxNbBits;
115087 +            n--;
115088 +        }
115089 +        /* n stops at huffNode[n].nbBits <= maxNbBits */
115090 +        assert(huffNode[n].nbBits <= maxNbBits);
115091 +        /* n end at index of smallest symbol using < maxNbBits */
115092 +        while (huffNode[n].nbBits == maxNbBits) --n;
115094 +        /* renorm totalCost from 2^largestBits to 2^maxNbBits
115095 +         * note : totalCost is necessarily a multiple of baseCost */
115096 +        assert((totalCost & (baseCost - 1)) == 0);
115097 +        totalCost >>= (largestBits - maxNbBits);
115098 +        assert(totalCost > 0);
115100 +        /* repay normalized cost */
115101 +        {   U32 const noSymbol = 0xF0F0F0F0;
115102 +            U32 rankLast[HUF_TABLELOG_MAX+2];
115104 +            /* Get pos of last (smallest = lowest cum. count) symbol per rank */
115105 +            ZSTD_memset(rankLast, 0xF0, sizeof(rankLast));
115106 +            {   U32 currentNbBits = maxNbBits;
115107 +                int pos;
115108 +                for (pos=n ; pos >= 0; pos--) {
115109 +                    if (huffNode[pos].nbBits >= currentNbBits) continue;
115110 +                    currentNbBits = huffNode[pos].nbBits;   /* < maxNbBits */
115111 +                    rankLast[maxNbBits-currentNbBits] = (U32)pos;
115112 +            }   }
115114 +            while (totalCost > 0) {
115115 +                /* Try to reduce the next power of 2 above totalCost because we
115116 +                 * gain back half the rank.
115117 +                 */
115118 +                U32 nBitsToDecrease = BIT_highbit32((U32)totalCost) + 1;
115119 +                for ( ; nBitsToDecrease > 1; nBitsToDecrease--) {
115120 +                    U32 const highPos = rankLast[nBitsToDecrease];
115121 +                    U32 const lowPos = rankLast[nBitsToDecrease-1];
115122 +                    if (highPos == noSymbol) continue;
115123 +                    /* Decrease highPos if no symbols of lowPos or if it is
115124 +                     * not cheaper to remove 2 lowPos than highPos.
115125 +                     */
115126 +                    if (lowPos == noSymbol) break;
115127 +                    {   U32 const highTotal = huffNode[highPos].count;
115128 +                        U32 const lowTotal = 2 * huffNode[lowPos].count;
115129 +                        if (highTotal <= lowTotal) break;
115130 +                }   }
115131 +                /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
115132 +                assert(rankLast[nBitsToDecrease] != noSymbol || nBitsToDecrease == 1);
115133 +                /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
115134 +                while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
115135 +                    nBitsToDecrease++;
115136 +                assert(rankLast[nBitsToDecrease] != noSymbol);
115137 +                /* Increase the number of bits to gain back half the rank cost. */
115138 +                totalCost -= 1 << (nBitsToDecrease-1);
115139 +                huffNode[rankLast[nBitsToDecrease]].nbBits++;
115141 +                /* Fix up the new rank.
115142 +                 * If the new rank was empty, this symbol is now its smallest.
115143 +                 * Otherwise, this symbol will be the largest in the new rank so no adjustment.
115144 +                 */
115145 +                if (rankLast[nBitsToDecrease-1] == noSymbol)
115146 +                    rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease];
115147 +                /* Fix up the old rank.
115148 +                 * If the symbol was at position 0, meaning it was the highest weight symbol in the tree,
115149 +                 * it must be the only symbol in its rank, so the old rank now has no symbols.
115150 +                 * Otherwise, since the Huffman nodes are sorted by count, the previous position is now
115151 +                 * the smallest node in the rank. If the previous position belongs to a different rank,
115152 +                 * then the rank is now empty.
115153 +                 */
115154 +                if (rankLast[nBitsToDecrease] == 0)    /* special case, reached largest symbol */
115155 +                    rankLast[nBitsToDecrease] = noSymbol;
115156 +                else {
115157 +                    rankLast[nBitsToDecrease]--;
115158 +                    if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease)
115159 +                        rankLast[nBitsToDecrease] = noSymbol;   /* this rank is now empty */
115160 +                }
115161 +            }   /* while (totalCost > 0) */
115163 +            /* If we've removed too much weight, then we have to add it back.
115164 +             * To avoid overshooting again, we only adjust the smallest rank.
115165 +             * We take the largest nodes from the lowest rank 0 and move them
115166 +             * to rank 1. There's guaranteed to be enough rank 0 symbols because
115167 +             * TODO.
115168 +             */
115169 +            while (totalCost < 0) {  /* Sometimes, cost correction overshoot */
115170 +                /* special case : no rank 1 symbol (using maxNbBits-1);
115171 +                 * let's create one from largest rank 0 (using maxNbBits).
115172 +                 */
115173 +                if (rankLast[1] == noSymbol) {
115174 +                    while (huffNode[n].nbBits == maxNbBits) n--;
115175 +                    huffNode[n+1].nbBits--;
115176 +                    assert(n >= 0);
115177 +                    rankLast[1] = (U32)(n+1);
115178 +                    totalCost++;
115179 +                    continue;
115180 +                }
115181 +                huffNode[ rankLast[1] + 1 ].nbBits--;
115182 +                rankLast[1]++;
115183 +                totalCost ++;
115184 +            }
115185 +        }   /* repay normalized cost */
115186 +    }   /* there are several too large elements (at least >= 2) */
115188 +    return maxNbBits;
115191 +typedef struct {
115192 +    U32 base;
115193 +    U32 curr;
115194 +} rankPos;
115196 +typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32];
115198 +#define RANK_POSITION_TABLE_SIZE 32
115200 +typedef struct {
115201 +  huffNodeTable huffNodeTbl;
115202 +  rankPos rankPosition[RANK_POSITION_TABLE_SIZE];
115203 +} HUF_buildCTable_wksp_tables;
115206 + * HUF_sort():
115207 + * Sorts the symbols [0, maxSymbolValue] by count[symbol] in decreasing order.
115209 + * @param[out] huffNode       Sorted symbols by decreasing count. Only members `.count` and `.byte` are filled.
115210 + *                            Must have (maxSymbolValue + 1) entries.
115211 + * @param[in]  count          Histogram of the symbols.
115212 + * @param[in]  maxSymbolValue Maximum symbol value.
115213 + * @param      rankPosition   This is a scratch workspace. Must have RANK_POSITION_TABLE_SIZE entries.
115214 + */
115215 +static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue, rankPos* rankPosition)
115217 +    int n;
115218 +    int const maxSymbolValue1 = (int)maxSymbolValue + 1;
115220 +    /* Compute base and set curr to base.
115221 +     * For symbol s let lowerRank = BIT_highbit32(count[n]+1) and rank = lowerRank + 1.
115222 +     * Then 2^lowerRank <= count[n]+1 <= 2^rank.
115223 +     * We attribute each symbol to lowerRank's base value, because we want to know where
115224 +     * each rank begins in the output, so for rank R we want to count ranks R+1 and above.
115225 +     */
115226 +    ZSTD_memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE);
115227 +    for (n = 0; n < maxSymbolValue1; ++n) {
115228 +        U32 lowerRank = BIT_highbit32(count[n] + 1);
115229 +        rankPosition[lowerRank].base++;
115230 +    }
115231 +    assert(rankPosition[RANK_POSITION_TABLE_SIZE - 1].base == 0);
115232 +    for (n = RANK_POSITION_TABLE_SIZE - 1; n > 0; --n) {
115233 +        rankPosition[n-1].base += rankPosition[n].base;
115234 +        rankPosition[n-1].curr = rankPosition[n-1].base;
115235 +    }
115236 +    /* Sort */
115237 +    for (n = 0; n < maxSymbolValue1; ++n) {
115238 +        U32 const c = count[n];
115239 +        U32 const r = BIT_highbit32(c+1) + 1;
115240 +        U32 pos = rankPosition[r].curr++;
115241 +        /* Insert into the correct position in the rank.
115242 +         * We have at most 256 symbols, so this insertion should be fine.
115243 +         */
115244 +        while ((pos > rankPosition[r].base) && (c > huffNode[pos-1].count)) {
115245 +            huffNode[pos] = huffNode[pos-1];
115246 +            pos--;
115247 +        }
115248 +        huffNode[pos].count = c;
115249 +        huffNode[pos].byte  = (BYTE)n;
115250 +    }
115254 +/** HUF_buildCTable_wksp() :
115255 + *  Same as HUF_buildCTable(), but using externally allocated scratch buffer.
115256 + *  `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as sizeof(HUF_buildCTable_wksp_tables).
115257 + */
115258 +#define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
115260 +/* HUF_buildTree():
115261 + * Takes the huffNode array sorted by HUF_sort() and builds an unlimited-depth Huffman tree.
115263 + * @param huffNode        The array sorted by HUF_sort(). Builds the Huffman tree in this array.
115264 + * @param maxSymbolValue  The maximum symbol value.
115265 + * @return                The smallest node in the Huffman tree (by count).
115266 + */
115267 +static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue)
115269 +    nodeElt* const huffNode0 = huffNode - 1;
115270 +    int nonNullRank;
115271 +    int lowS, lowN;
115272 +    int nodeNb = STARTNODE;
115273 +    int n, nodeRoot;
115274 +    /* init for parents */
115275 +    nonNullRank = (int)maxSymbolValue;
115276 +    while(huffNode[nonNullRank].count == 0) nonNullRank--;
115277 +    lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb;
115278 +    huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count;
115279 +    huffNode[lowS].parent = huffNode[lowS-1].parent = (U16)nodeNb;
115280 +    nodeNb++; lowS-=2;
115281 +    for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30);
115282 +    huffNode0[0].count = (U32)(1U<<31);  /* fake entry, strong barrier */
115284 +    /* create parents */
115285 +    while (nodeNb <= nodeRoot) {
115286 +        int const n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
115287 +        int const n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
115288 +        huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;
115289 +        huffNode[n1].parent = huffNode[n2].parent = (U16)nodeNb;
115290 +        nodeNb++;
115291 +    }
115293 +    /* distribute weights (unlimited tree height) */
115294 +    huffNode[nodeRoot].nbBits = 0;
115295 +    for (n=nodeRoot-1; n>=STARTNODE; n--)
115296 +        huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
115297 +    for (n=0; n<=nonNullRank; n++)
115298 +        huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
115300 +    return nonNullRank;
115304 + * HUF_buildCTableFromTree():
115305 + * Build the CTable given the Huffman tree in huffNode.
115307 + * @param[out] CTable         The output Huffman CTable.
115308 + * @param      huffNode       The Huffman tree.
115309 + * @param      nonNullRank    The last and smallest node in the Huffman tree.
115310 + * @param      maxSymbolValue The maximum symbol value.
115311 + * @param      maxNbBits      The exact maximum number of bits used in the Huffman tree.
115312 + */
115313 +static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, int nonNullRank, U32 maxSymbolValue, U32 maxNbBits)
115315 +    /* fill result into ctable (val, nbBits) */
115316 +    int n;
115317 +    U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};
115318 +    U16 valPerRank[HUF_TABLELOG_MAX+1] = {0};
115319 +    int const alphabetSize = (int)(maxSymbolValue + 1);
115320 +    for (n=0; n<=nonNullRank; n++)
115321 +        nbPerRank[huffNode[n].nbBits]++;
115322 +    /* determine starting value per rank */
115323 +    {   U16 min = 0;
115324 +        for (n=(int)maxNbBits; n>0; n--) {
115325 +            valPerRank[n] = min;      /* get starting value within each rank */
115326 +            min += nbPerRank[n];
115327 +            min >>= 1;
115328 +    }   }
115329 +    for (n=0; n<alphabetSize; n++)
115330 +        CTable[huffNode[n].byte].nbBits = huffNode[n].nbBits;   /* push nbBits per symbol, symbol order */
115331 +    for (n=0; n<alphabetSize; n++)
115332 +        CTable[n].val = valPerRank[CTable[n].nbBits]++;   /* assign value within rank, symbol order */
115335 +size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
115337 +    HUF_buildCTable_wksp_tables* const wksp_tables = (HUF_buildCTable_wksp_tables*)workSpace;
115338 +    nodeElt* const huffNode0 = wksp_tables->huffNodeTbl;
115339 +    nodeElt* const huffNode = huffNode0+1;
115340 +    int nonNullRank;
115342 +    /* safety checks */
115343 +    if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC);  /* must be aligned on 4-bytes boundaries */
115344 +    if (wkspSize < sizeof(HUF_buildCTable_wksp_tables))
115345 +      return ERROR(workSpace_tooSmall);
115346 +    if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
115347 +    if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
115348 +      return ERROR(maxSymbolValue_tooLarge);
115349 +    ZSTD_memset(huffNode0, 0, sizeof(huffNodeTable));
115351 +    /* sort, decreasing order */
115352 +    HUF_sort(huffNode, count, maxSymbolValue, wksp_tables->rankPosition);
115354 +    /* build tree */
115355 +    nonNullRank = HUF_buildTree(huffNode, maxSymbolValue);
115357 +    /* enforce maxTableLog */
115358 +    maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits);
115359 +    if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC);   /* check fit into table */
115361 +    HUF_buildCTableFromTree(tree, huffNode, nonNullRank, maxSymbolValue, maxNbBits);
115363 +    return maxNbBits;
115366 +size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue)
115368 +    size_t nbBits = 0;
115369 +    int s;
115370 +    for (s = 0; s <= (int)maxSymbolValue; ++s) {
115371 +        nbBits += CTable[s].nbBits * count[s];
115372 +    }
115373 +    return nbBits >> 3;
115376 +int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) {
115377 +  int bad = 0;
115378 +  int s;
115379 +  for (s = 0; s <= (int)maxSymbolValue; ++s) {
115380 +    bad |= (count[s] != 0) & (CTable[s].nbBits == 0);
115381 +  }
115382 +  return !bad;
115385 +size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
115387 +FORCE_INLINE_TEMPLATE void
115388 +HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable)
115390 +    BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits);
115393 +#define HUF_FLUSHBITS(s)  BIT_flushBits(s)
115395 +#define HUF_FLUSHBITS_1(stream) \
115396 +    if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream)
115398 +#define HUF_FLUSHBITS_2(stream) \
115399 +    if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream)
115401 +FORCE_INLINE_TEMPLATE size_t
115402 +HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize,
115403 +                                   const void* src, size_t srcSize,
115404 +                                   const HUF_CElt* CTable)
115406 +    const BYTE* ip = (const BYTE*) src;
115407 +    BYTE* const ostart = (BYTE*)dst;
115408 +    BYTE* const oend = ostart + dstSize;
115409 +    BYTE* op = ostart;
115410 +    size_t n;
115411 +    BIT_CStream_t bitC;
115413 +    /* init */
115414 +    if (dstSize < 8) return 0;   /* not enough space to compress */
115415 +    { size_t const initErr = BIT_initCStream(&bitC, op, (size_t)(oend-op));
115416 +      if (HUF_isError(initErr)) return 0; }
115418 +    n = srcSize & ~3;  /* join to mod 4 */
115419 +    switch (srcSize & 3)
115420 +    {
115421 +        case 3 : HUF_encodeSymbol(&bitC, ip[n+ 2], CTable);
115422 +                 HUF_FLUSHBITS_2(&bitC);
115423 +                /* fall-through */
115424 +        case 2 : HUF_encodeSymbol(&bitC, ip[n+ 1], CTable);
115425 +                 HUF_FLUSHBITS_1(&bitC);
115426 +                /* fall-through */
115427 +        case 1 : HUF_encodeSymbol(&bitC, ip[n+ 0], CTable);
115428 +                 HUF_FLUSHBITS(&bitC);
115429 +                /* fall-through */
115430 +        case 0 : /* fall-through */
115431 +        default: break;
115432 +    }
115434 +    for (; n>0; n-=4) {  /* note : n&3==0 at this stage */
115435 +        HUF_encodeSymbol(&bitC, ip[n- 1], CTable);
115436 +        HUF_FLUSHBITS_1(&bitC);
115437 +        HUF_encodeSymbol(&bitC, ip[n- 2], CTable);
115438 +        HUF_FLUSHBITS_2(&bitC);
115439 +        HUF_encodeSymbol(&bitC, ip[n- 3], CTable);
115440 +        HUF_FLUSHBITS_1(&bitC);
115441 +        HUF_encodeSymbol(&bitC, ip[n- 4], CTable);
115442 +        HUF_FLUSHBITS(&bitC);
115443 +    }
115445 +    return BIT_closeCStream(&bitC);
115448 +#if DYNAMIC_BMI2
115450 +static TARGET_ATTRIBUTE("bmi2") size_t
115451 +HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize,
115452 +                                   const void* src, size_t srcSize,
115453 +                                   const HUF_CElt* CTable)
115455 +    return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
115458 +static size_t
115459 +HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize,
115460 +                                      const void* src, size_t srcSize,
115461 +                                      const HUF_CElt* CTable)
115463 +    return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
115466 +static size_t
115467 +HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
115468 +                              const void* src, size_t srcSize,
115469 +                              const HUF_CElt* CTable, const int bmi2)
115471 +    if (bmi2) {
115472 +        return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable);
115473 +    }
115474 +    return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable);
115477 +#else
115479 +static size_t
115480 +HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
115481 +                              const void* src, size_t srcSize,
115482 +                              const HUF_CElt* CTable, const int bmi2)
115484 +    (void)bmi2;
115485 +    return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
115488 +#endif
115490 +size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
115492 +    return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
115496 +static size_t
115497 +HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
115498 +                              const void* src, size_t srcSize,
115499 +                              const HUF_CElt* CTable, int bmi2)
115501 +    size_t const segmentSize = (srcSize+3)/4;   /* first 3 segments */
115502 +    const BYTE* ip = (const BYTE*) src;
115503 +    const BYTE* const iend = ip + srcSize;
115504 +    BYTE* const ostart = (BYTE*) dst;
115505 +    BYTE* const oend = ostart + dstSize;
115506 +    BYTE* op = ostart;
115508 +    if (dstSize < 6 + 1 + 1 + 1 + 8) return 0;   /* minimum space to compress successfully */
115509 +    if (srcSize < 12) return 0;   /* no saving possible : too small input */
115510 +    op += 6;   /* jumpTable */
115512 +    assert(op <= oend);
115513 +    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
115514 +        if (cSize==0) return 0;
115515 +        assert(cSize <= 65535);
115516 +        MEM_writeLE16(ostart, (U16)cSize);
115517 +        op += cSize;
115518 +    }
115520 +    ip += segmentSize;
115521 +    assert(op <= oend);
115522 +    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
115523 +        if (cSize==0) return 0;
115524 +        assert(cSize <= 65535);
115525 +        MEM_writeLE16(ostart+2, (U16)cSize);
115526 +        op += cSize;
115527 +    }
115529 +    ip += segmentSize;
115530 +    assert(op <= oend);
115531 +    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
115532 +        if (cSize==0) return 0;
115533 +        assert(cSize <= 65535);
115534 +        MEM_writeLE16(ostart+4, (U16)cSize);
115535 +        op += cSize;
115536 +    }
115538 +    ip += segmentSize;
115539 +    assert(op <= oend);
115540 +    assert(ip <= iend);
115541 +    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, bmi2) );
115542 +        if (cSize==0) return 0;
115543 +        op += cSize;
115544 +    }
115546 +    return (size_t)(op-ostart);
115549 +size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
115551 +    return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
115554 +typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e;
115556 +static size_t HUF_compressCTable_internal(
115557 +                BYTE* const ostart, BYTE* op, BYTE* const oend,
115558 +                const void* src, size_t srcSize,
115559 +                HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int bmi2)
115561 +    size_t const cSize = (nbStreams==HUF_singleStream) ?
115562 +                         HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2) :
115563 +                         HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2);
115564 +    if (HUF_isError(cSize)) { return cSize; }
115565 +    if (cSize==0) { return 0; }   /* uncompressible */
115566 +    op += cSize;
115567 +    /* check compressibility */
115568 +    assert(op >= ostart);
115569 +    if ((size_t)(op-ostart) >= srcSize-1) { return 0; }
115570 +    return (size_t)(op-ostart);
115573 +typedef struct {
115574 +    unsigned count[HUF_SYMBOLVALUE_MAX + 1];
115575 +    HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1];
115576 +    union {
115577 +        HUF_buildCTable_wksp_tables buildCTable_wksp;
115578 +        HUF_WriteCTableWksp writeCTable_wksp;
115579 +    } wksps;
115580 +} HUF_compress_tables_t;
115582 +/* HUF_compress_internal() :
115583 + * `workSpace_align4` must be aligned on 4-bytes boundaries,
115584 + * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U32 unsigned */
115585 +static size_t
115586 +HUF_compress_internal (void* dst, size_t dstSize,
115587 +                 const void* src, size_t srcSize,
115588 +                       unsigned maxSymbolValue, unsigned huffLog,
115589 +                       HUF_nbStreams_e nbStreams,
115590 +                       void* workSpace_align4, size_t wkspSize,
115591 +                       HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
115592 +                 const int bmi2)
115594 +    HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace_align4;
115595 +    BYTE* const ostart = (BYTE*)dst;
115596 +    BYTE* const oend = ostart + dstSize;
115597 +    BYTE* op = ostart;
115599 +    HUF_STATIC_ASSERT(sizeof(*table) <= HUF_WORKSPACE_SIZE);
115600 +    assert(((size_t)workSpace_align4 & 3) == 0);   /* must be aligned on 4-bytes boundaries */
115602 +    /* checks & inits */
115603 +    if (wkspSize < HUF_WORKSPACE_SIZE) return ERROR(workSpace_tooSmall);
115604 +    if (!srcSize) return 0;  /* Uncompressed */
115605 +    if (!dstSize) return 0;  /* cannot fit anything within dst budget */
115606 +    if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong);   /* current block size limit */
115607 +    if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
115608 +    if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
115609 +    if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX;
115610 +    if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
115612 +    /* Heuristic : If old table is valid, use it for small inputs */
115613 +    if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
115614 +        return HUF_compressCTable_internal(ostart, op, oend,
115615 +                                           src, srcSize,
115616 +                                           nbStreams, oldHufTable, bmi2);
115617 +    }
115619 +    /* Scan input and build symbol stats */
115620 +    {   CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, workSpace_align4, wkspSize) );
115621 +        if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; }   /* single symbol, rle */
115622 +        if (largest <= (srcSize >> 7)+4) return 0;   /* heuristic : probably not compressible enough */
115623 +    }
115625 +    /* Check validity of previous table */
115626 +    if ( repeat
115627 +      && *repeat == HUF_repeat_check
115628 +      && !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) {
115629 +        *repeat = HUF_repeat_none;
115630 +    }
115631 +    /* Heuristic : use existing table for small inputs */
115632 +    if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
115633 +        return HUF_compressCTable_internal(ostart, op, oend,
115634 +                                           src, srcSize,
115635 +                                           nbStreams, oldHufTable, bmi2);
115636 +    }
115638 +    /* Build Huffman Tree */
115639 +    huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
115640 +    {   size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count,
115641 +                                            maxSymbolValue, huffLog,
115642 +                                            &table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp));
115643 +        CHECK_F(maxBits);
115644 +        huffLog = (U32)maxBits;
115645 +        /* Zero unused symbols in CTable, so we can check it for validity */
115646 +        ZSTD_memset(table->CTable + (maxSymbolValue + 1), 0,
115647 +               sizeof(table->CTable) - ((maxSymbolValue + 1) * sizeof(HUF_CElt)));
115648 +    }
115650 +    /* Write table description header */
115651 +    {   CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, table->CTable, maxSymbolValue, huffLog,
115652 +                                              &table->wksps.writeCTable_wksp, sizeof(table->wksps.writeCTable_wksp)) );
115653 +        /* Check if using previous huffman table is beneficial */
115654 +        if (repeat && *repeat != HUF_repeat_none) {
115655 +            size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue);
115656 +            size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue);
115657 +            if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
115658 +                return HUF_compressCTable_internal(ostart, op, oend,
115659 +                                                   src, srcSize,
115660 +                                                   nbStreams, oldHufTable, bmi2);
115661 +        }   }
115663 +        /* Use the new huffman table */
115664 +        if (hSize + 12ul >= srcSize) { return 0; }
115665 +        op += hSize;
115666 +        if (repeat) { *repeat = HUF_repeat_none; }
115667 +        if (oldHufTable)
115668 +            ZSTD_memcpy(oldHufTable, table->CTable, sizeof(table->CTable));  /* Save new table */
115669 +    }
115670 +    return HUF_compressCTable_internal(ostart, op, oend,
115671 +                                       src, srcSize,
115672 +                                       nbStreams, table->CTable, bmi2);
115676 +size_t HUF_compress1X_wksp (void* dst, size_t dstSize,
115677 +                      const void* src, size_t srcSize,
115678 +                      unsigned maxSymbolValue, unsigned huffLog,
115679 +                      void* workSpace, size_t wkspSize)
115681 +    return HUF_compress_internal(dst, dstSize, src, srcSize,
115682 +                                 maxSymbolValue, huffLog, HUF_singleStream,
115683 +                                 workSpace, wkspSize,
115684 +                                 NULL, NULL, 0, 0 /*bmi2*/);
115687 +size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
115688 +                      const void* src, size_t srcSize,
115689 +                      unsigned maxSymbolValue, unsigned huffLog,
115690 +                      void* workSpace, size_t wkspSize,
115691 +                      HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
115693 +    return HUF_compress_internal(dst, dstSize, src, srcSize,
115694 +                                 maxSymbolValue, huffLog, HUF_singleStream,
115695 +                                 workSpace, wkspSize, hufTable,
115696 +                                 repeat, preferRepeat, bmi2);
115699 +/* HUF_compress4X_repeat():
115700 + * compress input using 4 streams.
115701 + * provide workspace to generate compression tables */
115702 +size_t HUF_compress4X_wksp (void* dst, size_t dstSize,
115703 +                      const void* src, size_t srcSize,
115704 +                      unsigned maxSymbolValue, unsigned huffLog,
115705 +                      void* workSpace, size_t wkspSize)
115707 +    return HUF_compress_internal(dst, dstSize, src, srcSize,
115708 +                                 maxSymbolValue, huffLog, HUF_fourStreams,
115709 +                                 workSpace, wkspSize,
115710 +                                 NULL, NULL, 0, 0 /*bmi2*/);
115713 +/* HUF_compress4X_repeat():
115714 + * compress input using 4 streams.
115715 + * re-use an existing huffman compression table */
115716 +size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
115717 +                      const void* src, size_t srcSize,
115718 +                      unsigned maxSymbolValue, unsigned huffLog,
115719 +                      void* workSpace, size_t wkspSize,
115720 +                      HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
115722 +    return HUF_compress_internal(dst, dstSize, src, srcSize,
115723 +                                 maxSymbolValue, huffLog, HUF_fourStreams,
115724 +                                 workSpace, wkspSize,
115725 +                                 hufTable, repeat, preferRepeat, bmi2);
115727 diff --git a/lib/zstd/compress/zstd_compress.c b/lib/zstd/compress/zstd_compress.c
115728 new file mode 100644
115729 index 000000000000..78aa14c50dd2
115730 --- /dev/null
115731 +++ b/lib/zstd/compress/zstd_compress.c
115732 @@ -0,0 +1,5105 @@
115734 + * Copyright (c) Yann Collet, Facebook, Inc.
115735 + * All rights reserved.
115737 + * This source code is licensed under both the BSD-style license (found in the
115738 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
115739 + * in the COPYING file in the root directory of this source tree).
115740 + * You may select, at your option, one of the above-listed licenses.
115741 + */
115743 +/*-*************************************
115744 +*  Dependencies
115745 +***************************************/
115746 +#include "../common/zstd_deps.h"  /* INT_MAX, ZSTD_memset, ZSTD_memcpy */
115747 +#include "../common/cpu.h"
115748 +#include "../common/mem.h"
115749 +#include "hist.h"           /* HIST_countFast_wksp */
115750 +#define FSE_STATIC_LINKING_ONLY   /* FSE_encodeSymbol */
115751 +#include "../common/fse.h"
115752 +#define HUF_STATIC_LINKING_ONLY
115753 +#include "../common/huf.h"
115754 +#include "zstd_compress_internal.h"
115755 +#include "zstd_compress_sequences.h"
115756 +#include "zstd_compress_literals.h"
115757 +#include "zstd_fast.h"
115758 +#include "zstd_double_fast.h"
115759 +#include "zstd_lazy.h"
115760 +#include "zstd_opt.h"
115761 +#include "zstd_ldm.h"
115762 +#include "zstd_compress_superblock.h"
115764 +/* ***************************************************************
115765 +*  Tuning parameters
115766 +*****************************************************************/
115768 + * COMPRESS_HEAPMODE :
115769 + * Select how default decompression function ZSTD_compress() allocates its context,
115770 + * on stack (0, default), or into heap (1).
115771 + * Note that functions with explicit context such as ZSTD_compressCCtx() are unaffected.
115772 + */
115775 +/*-*************************************
115776 +*  Helper functions
115777 +***************************************/
115778 +/* ZSTD_compressBound()
115779 + * Note that the result from this function is only compatible with the "normal"
115780 + * full-block strategy.
115781 + * When there are a lot of small blocks due to frequent flush in streaming mode
115782 + * the overhead of headers can make the compressed data to be larger than the
115783 + * return value of ZSTD_compressBound().
115784 + */
115785 +size_t ZSTD_compressBound(size_t srcSize) {
115786 +    return ZSTD_COMPRESSBOUND(srcSize);
115790 +/*-*************************************
115791 +*  Context memory management
115792 +***************************************/
115793 +struct ZSTD_CDict_s {
115794 +    const void* dictContent;
115795 +    size_t dictContentSize;
115796 +    ZSTD_dictContentType_e dictContentType; /* The dictContentType the CDict was created with */
115797 +    U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
115798 +    ZSTD_cwksp workspace;
115799 +    ZSTD_matchState_t matchState;
115800 +    ZSTD_compressedBlockState_t cBlockState;
115801 +    ZSTD_customMem customMem;
115802 +    U32 dictID;
115803 +    int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */
115804 +};  /* typedef'd to ZSTD_CDict within "zstd.h" */
115806 +ZSTD_CCtx* ZSTD_createCCtx(void)
115808 +    return ZSTD_createCCtx_advanced(ZSTD_defaultCMem);
115811 +static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager)
115813 +    assert(cctx != NULL);
115814 +    ZSTD_memset(cctx, 0, sizeof(*cctx));
115815 +    cctx->customMem = memManager;
115816 +    cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
115817 +    {   size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters);
115818 +        assert(!ZSTD_isError(err));
115819 +        (void)err;
115820 +    }
115823 +ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
115825 +    ZSTD_STATIC_ASSERT(zcss_init==0);
115826 +    ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
115827 +    if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
115828 +    {   ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_customMalloc(sizeof(ZSTD_CCtx), customMem);
115829 +        if (!cctx) return NULL;
115830 +        ZSTD_initCCtx(cctx, customMem);
115831 +        return cctx;
115832 +    }
115835 +ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize)
115837 +    ZSTD_cwksp ws;
115838 +    ZSTD_CCtx* cctx;
115839 +    if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL;  /* minimum size */
115840 +    if ((size_t)workspace & 7) return NULL;  /* must be 8-aligned */
115841 +    ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc);
115843 +    cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx));
115844 +    if (cctx == NULL) return NULL;
115846 +    ZSTD_memset(cctx, 0, sizeof(ZSTD_CCtx));
115847 +    ZSTD_cwksp_move(&cctx->workspace, &ws);
115848 +    cctx->staticSize = workspaceSize;
115850 +    /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
115851 +    if (!ZSTD_cwksp_check_available(&cctx->workspace, ENTROPY_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL;
115852 +    cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
115853 +    cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
115854 +    cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cctx->workspace, ENTROPY_WORKSPACE_SIZE);
115855 +    cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
115856 +    return cctx;
115860 + * Clears and frees all of the dictionaries in the CCtx.
115861 + */
115862 +static void ZSTD_clearAllDicts(ZSTD_CCtx* cctx)
115864 +    ZSTD_customFree(cctx->localDict.dictBuffer, cctx->customMem);
115865 +    ZSTD_freeCDict(cctx->localDict.cdict);
115866 +    ZSTD_memset(&cctx->localDict, 0, sizeof(cctx->localDict));
115867 +    ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));
115868 +    cctx->cdict = NULL;
115871 +static size_t ZSTD_sizeof_localDict(ZSTD_localDict dict)
115873 +    size_t const bufferSize = dict.dictBuffer != NULL ? dict.dictSize : 0;
115874 +    size_t const cdictSize = ZSTD_sizeof_CDict(dict.cdict);
115875 +    return bufferSize + cdictSize;
115878 +static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
115880 +    assert(cctx != NULL);
115881 +    assert(cctx->staticSize == 0);
115882 +    ZSTD_clearAllDicts(cctx);
115883 +    ZSTD_cwksp_free(&cctx->workspace, cctx->customMem);
115886 +size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
115888 +    if (cctx==NULL) return 0;   /* support free on NULL */
115889 +    RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
115890 +                    "not compatible with static CCtx");
115891 +    {
115892 +        int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx);
115893 +        ZSTD_freeCCtxContent(cctx);
115894 +        if (!cctxInWorkspace) {
115895 +            ZSTD_customFree(cctx, cctx->customMem);
115896 +        }
115897 +    }
115898 +    return 0;
115902 +static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx)
115904 +    (void)cctx;
115905 +    return 0;
115909 +size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
115911 +    if (cctx==NULL) return 0;   /* support sizeof on NULL */
115912 +    /* cctx may be in the workspace */
115913 +    return (cctx->workspace.workspace == cctx ? 0 : sizeof(*cctx))
115914 +           + ZSTD_cwksp_sizeof(&cctx->workspace)
115915 +           + ZSTD_sizeof_localDict(cctx->localDict)
115916 +           + ZSTD_sizeof_mtctx(cctx);
115919 +size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
115921 +    return ZSTD_sizeof_CCtx(zcs);  /* same object */
115924 +/* private API call, for dictBuilder only */
115925 +const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
115927 +/* Returns 1 if compression parameters are such that we should
115928 + * enable long distance matching (wlog >= 27, strategy >= btopt).
115929 + * Returns 0 otherwise.
115930 + */
115931 +static U32 ZSTD_CParams_shouldEnableLdm(const ZSTD_compressionParameters* const cParams) {
115932 +    return cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27;
115935 +static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
115936 +        ZSTD_compressionParameters cParams)
115938 +    ZSTD_CCtx_params cctxParams;
115939 +    /* should not matter, as all cParams are presumed properly defined */
115940 +    ZSTD_CCtxParams_init(&cctxParams, ZSTD_CLEVEL_DEFAULT);
115941 +    cctxParams.cParams = cParams;
115943 +    if (ZSTD_CParams_shouldEnableLdm(&cParams)) {
115944 +        DEBUGLOG(4, "ZSTD_makeCCtxParamsFromCParams(): Including LDM into cctx params");
115945 +        cctxParams.ldmParams.enableLdm = 1;
115946 +        /* LDM is enabled by default for optimal parser and window size >= 128MB */
115947 +        ZSTD_ldm_adjustParameters(&cctxParams.ldmParams, &cParams);
115948 +        assert(cctxParams.ldmParams.hashLog >= cctxParams.ldmParams.bucketSizeLog);
115949 +        assert(cctxParams.ldmParams.hashRateLog < 32);
115950 +    }
115952 +    assert(!ZSTD_checkCParams(cParams));
115953 +    return cctxParams;
115956 +static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced(
115957 +        ZSTD_customMem customMem)
115959 +    ZSTD_CCtx_params* params;
115960 +    if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
115961 +    params = (ZSTD_CCtx_params*)ZSTD_customCalloc(
115962 +            sizeof(ZSTD_CCtx_params), customMem);
115963 +    if (!params) { return NULL; }
115964 +    ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
115965 +    params->customMem = customMem;
115966 +    return params;
115969 +ZSTD_CCtx_params* ZSTD_createCCtxParams(void)
115971 +    return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem);
115974 +size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params)
115976 +    if (params == NULL) { return 0; }
115977 +    ZSTD_customFree(params, params->customMem);
115978 +    return 0;
115981 +size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params)
115983 +    return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
115986 +size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
115987 +    RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!");
115988 +    ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
115989 +    cctxParams->compressionLevel = compressionLevel;
115990 +    cctxParams->fParams.contentSizeFlag = 1;
115991 +    return 0;
115994 +#define ZSTD_NO_CLEVEL 0
115997 + * Initializes the cctxParams from params and compressionLevel.
115998 + * @param compressionLevel If params are derived from a compression level then that compression level, otherwise ZSTD_NO_CLEVEL.
115999 + */
116000 +static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_parameters const* params, int compressionLevel)
116002 +    assert(!ZSTD_checkCParams(params->cParams));
116003 +    ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
116004 +    cctxParams->cParams = params->cParams;
116005 +    cctxParams->fParams = params->fParams;
116006 +    /* Should not matter, as all cParams are presumed properly defined.
116007 +     * But, set it for tracing anyway.
116008 +     */
116009 +    cctxParams->compressionLevel = compressionLevel;
116012 +size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
116014 +    RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!");
116015 +    FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , "");
116016 +    ZSTD_CCtxParams_init_internal(cctxParams, &params, ZSTD_NO_CLEVEL);
116017 +    return 0;
116021 + * Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone.
116022 + * @param param Validated zstd parameters.
116023 + */
116024 +static void ZSTD_CCtxParams_setZstdParams(
116025 +        ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params)
116027 +    assert(!ZSTD_checkCParams(params->cParams));
116028 +    cctxParams->cParams = params->cParams;
116029 +    cctxParams->fParams = params->fParams;
116030 +    /* Should not matter, as all cParams are presumed properly defined.
116031 +     * But, set it for tracing anyway.
116032 +     */
116033 +    cctxParams->compressionLevel = ZSTD_NO_CLEVEL;
116036 +ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
116038 +    ZSTD_bounds bounds = { 0, 0, 0 };
116040 +    switch(param)
116041 +    {
116042 +    case ZSTD_c_compressionLevel:
116043 +        bounds.lowerBound = ZSTD_minCLevel();
116044 +        bounds.upperBound = ZSTD_maxCLevel();
116045 +        return bounds;
116047 +    case ZSTD_c_windowLog:
116048 +        bounds.lowerBound = ZSTD_WINDOWLOG_MIN;
116049 +        bounds.upperBound = ZSTD_WINDOWLOG_MAX;
116050 +        return bounds;
116052 +    case ZSTD_c_hashLog:
116053 +        bounds.lowerBound = ZSTD_HASHLOG_MIN;
116054 +        bounds.upperBound = ZSTD_HASHLOG_MAX;
116055 +        return bounds;
116057 +    case ZSTD_c_chainLog:
116058 +        bounds.lowerBound = ZSTD_CHAINLOG_MIN;
116059 +        bounds.upperBound = ZSTD_CHAINLOG_MAX;
116060 +        return bounds;
116062 +    case ZSTD_c_searchLog:
116063 +        bounds.lowerBound = ZSTD_SEARCHLOG_MIN;
116064 +        bounds.upperBound = ZSTD_SEARCHLOG_MAX;
116065 +        return bounds;
116067 +    case ZSTD_c_minMatch:
116068 +        bounds.lowerBound = ZSTD_MINMATCH_MIN;
116069 +        bounds.upperBound = ZSTD_MINMATCH_MAX;
116070 +        return bounds;
116072 +    case ZSTD_c_targetLength:
116073 +        bounds.lowerBound = ZSTD_TARGETLENGTH_MIN;
116074 +        bounds.upperBound = ZSTD_TARGETLENGTH_MAX;
116075 +        return bounds;
116077 +    case ZSTD_c_strategy:
116078 +        bounds.lowerBound = ZSTD_STRATEGY_MIN;
116079 +        bounds.upperBound = ZSTD_STRATEGY_MAX;
116080 +        return bounds;
116082 +    case ZSTD_c_contentSizeFlag:
116083 +        bounds.lowerBound = 0;
116084 +        bounds.upperBound = 1;
116085 +        return bounds;
116087 +    case ZSTD_c_checksumFlag:
116088 +        bounds.lowerBound = 0;
116089 +        bounds.upperBound = 1;
116090 +        return bounds;
116092 +    case ZSTD_c_dictIDFlag:
116093 +        bounds.lowerBound = 0;
116094 +        bounds.upperBound = 1;
116095 +        return bounds;
116097 +    case ZSTD_c_nbWorkers:
116098 +        bounds.lowerBound = 0;
116099 +        bounds.upperBound = 0;
116100 +        return bounds;
116102 +    case ZSTD_c_jobSize:
116103 +        bounds.lowerBound = 0;
116104 +        bounds.upperBound = 0;
116105 +        return bounds;
116107 +    case ZSTD_c_overlapLog:
116108 +        bounds.lowerBound = 0;
116109 +        bounds.upperBound = 0;
116110 +        return bounds;
116112 +    case ZSTD_c_enableDedicatedDictSearch:
116113 +        bounds.lowerBound = 0;
116114 +        bounds.upperBound = 1;
116115 +        return bounds;
116117 +    case ZSTD_c_enableLongDistanceMatching:
116118 +        bounds.lowerBound = 0;
116119 +        bounds.upperBound = 1;
116120 +        return bounds;
116122 +    case ZSTD_c_ldmHashLog:
116123 +        bounds.lowerBound = ZSTD_LDM_HASHLOG_MIN;
116124 +        bounds.upperBound = ZSTD_LDM_HASHLOG_MAX;
116125 +        return bounds;
116127 +    case ZSTD_c_ldmMinMatch:
116128 +        bounds.lowerBound = ZSTD_LDM_MINMATCH_MIN;
116129 +        bounds.upperBound = ZSTD_LDM_MINMATCH_MAX;
116130 +        return bounds;
116132 +    case ZSTD_c_ldmBucketSizeLog:
116133 +        bounds.lowerBound = ZSTD_LDM_BUCKETSIZELOG_MIN;
116134 +        bounds.upperBound = ZSTD_LDM_BUCKETSIZELOG_MAX;
116135 +        return bounds;
116137 +    case ZSTD_c_ldmHashRateLog:
116138 +        bounds.lowerBound = ZSTD_LDM_HASHRATELOG_MIN;
116139 +        bounds.upperBound = ZSTD_LDM_HASHRATELOG_MAX;
116140 +        return bounds;
116142 +    /* experimental parameters */
116143 +    case ZSTD_c_rsyncable:
116144 +        bounds.lowerBound = 0;
116145 +        bounds.upperBound = 1;
116146 +        return bounds;
116148 +    case ZSTD_c_forceMaxWindow :
116149 +        bounds.lowerBound = 0;
116150 +        bounds.upperBound = 1;
116151 +        return bounds;
116153 +    case ZSTD_c_format:
116154 +        ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
116155 +        bounds.lowerBound = ZSTD_f_zstd1;
116156 +        bounds.upperBound = ZSTD_f_zstd1_magicless;   /* note : how to ensure at compile time that this is the highest value enum ? */
116157 +        return bounds;
116159 +    case ZSTD_c_forceAttachDict:
116160 +        ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceLoad);
116161 +        bounds.lowerBound = ZSTD_dictDefaultAttach;
116162 +        bounds.upperBound = ZSTD_dictForceLoad;       /* note : how to ensure at compile time that this is the highest value enum ? */
116163 +        return bounds;
116165 +    case ZSTD_c_literalCompressionMode:
116166 +        ZSTD_STATIC_ASSERT(ZSTD_lcm_auto < ZSTD_lcm_huffman && ZSTD_lcm_huffman < ZSTD_lcm_uncompressed);
116167 +        bounds.lowerBound = ZSTD_lcm_auto;
116168 +        bounds.upperBound = ZSTD_lcm_uncompressed;
116169 +        return bounds;
116171 +    case ZSTD_c_targetCBlockSize:
116172 +        bounds.lowerBound = ZSTD_TARGETCBLOCKSIZE_MIN;
116173 +        bounds.upperBound = ZSTD_TARGETCBLOCKSIZE_MAX;
116174 +        return bounds;
116176 +    case ZSTD_c_srcSizeHint:
116177 +        bounds.lowerBound = ZSTD_SRCSIZEHINT_MIN;
116178 +        bounds.upperBound = ZSTD_SRCSIZEHINT_MAX;
116179 +        return bounds;
116181 +    case ZSTD_c_stableInBuffer:
116182 +    case ZSTD_c_stableOutBuffer:
116183 +        bounds.lowerBound = (int)ZSTD_bm_buffered;
116184 +        bounds.upperBound = (int)ZSTD_bm_stable;
116185 +        return bounds;
116187 +    case ZSTD_c_blockDelimiters:
116188 +        bounds.lowerBound = (int)ZSTD_sf_noBlockDelimiters;
116189 +        bounds.upperBound = (int)ZSTD_sf_explicitBlockDelimiters;
116190 +        return bounds;
116192 +    case ZSTD_c_validateSequences:
116193 +        bounds.lowerBound = 0;
116194 +        bounds.upperBound = 1;
116195 +        return bounds;
116197 +    default:
116198 +        bounds.error = ERROR(parameter_unsupported);
116199 +        return bounds;
116200 +    }
116203 +/* ZSTD_cParam_clampBounds:
116204 + * Clamps the value into the bounded range.
116205 + */
116206 +static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value)
116208 +    ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
116209 +    if (ZSTD_isError(bounds.error)) return bounds.error;
116210 +    if (*value < bounds.lowerBound) *value = bounds.lowerBound;
116211 +    if (*value > bounds.upperBound) *value = bounds.upperBound;
116212 +    return 0;
116215 +#define BOUNDCHECK(cParam, val) { \
116216 +    RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \
116217 +                    parameter_outOfBound, "Param out of bounds"); \
116221 +static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
116223 +    switch(param)
116224 +    {
116225 +    case ZSTD_c_compressionLevel:
116226 +    case ZSTD_c_hashLog:
116227 +    case ZSTD_c_chainLog:
116228 +    case ZSTD_c_searchLog:
116229 +    case ZSTD_c_minMatch:
116230 +    case ZSTD_c_targetLength:
116231 +    case ZSTD_c_strategy:
116232 +        return 1;
116234 +    case ZSTD_c_format:
116235 +    case ZSTD_c_windowLog:
116236 +    case ZSTD_c_contentSizeFlag:
116237 +    case ZSTD_c_checksumFlag:
116238 +    case ZSTD_c_dictIDFlag:
116239 +    case ZSTD_c_forceMaxWindow :
116240 +    case ZSTD_c_nbWorkers:
116241 +    case ZSTD_c_jobSize:
116242 +    case ZSTD_c_overlapLog:
116243 +    case ZSTD_c_rsyncable:
116244 +    case ZSTD_c_enableDedicatedDictSearch:
116245 +    case ZSTD_c_enableLongDistanceMatching:
116246 +    case ZSTD_c_ldmHashLog:
116247 +    case ZSTD_c_ldmMinMatch:
116248 +    case ZSTD_c_ldmBucketSizeLog:
116249 +    case ZSTD_c_ldmHashRateLog:
116250 +    case ZSTD_c_forceAttachDict:
116251 +    case ZSTD_c_literalCompressionMode:
116252 +    case ZSTD_c_targetCBlockSize:
116253 +    case ZSTD_c_srcSizeHint:
116254 +    case ZSTD_c_stableInBuffer:
116255 +    case ZSTD_c_stableOutBuffer:
116256 +    case ZSTD_c_blockDelimiters:
116257 +    case ZSTD_c_validateSequences:
116258 +    default:
116259 +        return 0;
116260 +    }
116263 +size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
116265 +    DEBUGLOG(4, "ZSTD_CCtx_setParameter (%i, %i)", (int)param, value);
116266 +    if (cctx->streamStage != zcss_init) {
116267 +        if (ZSTD_isUpdateAuthorized(param)) {
116268 +            cctx->cParamsChanged = 1;
116269 +        } else {
116270 +            RETURN_ERROR(stage_wrong, "can only set params in ctx init stage");
116271 +    }   }
116273 +    switch(param)
116274 +    {
116275 +    case ZSTD_c_nbWorkers:
116276 +        RETURN_ERROR_IF((value!=0) && cctx->staticSize, parameter_unsupported,
116277 +                        "MT not compatible with static alloc");
116278 +        break;
116280 +    case ZSTD_c_compressionLevel:
116281 +    case ZSTD_c_windowLog:
116282 +    case ZSTD_c_hashLog:
116283 +    case ZSTD_c_chainLog:
116284 +    case ZSTD_c_searchLog:
116285 +    case ZSTD_c_minMatch:
116286 +    case ZSTD_c_targetLength:
116287 +    case ZSTD_c_strategy:
116288 +    case ZSTD_c_ldmHashRateLog:
116289 +    case ZSTD_c_format:
116290 +    case ZSTD_c_contentSizeFlag:
116291 +    case ZSTD_c_checksumFlag:
116292 +    case ZSTD_c_dictIDFlag:
116293 +    case ZSTD_c_forceMaxWindow:
116294 +    case ZSTD_c_forceAttachDict:
116295 +    case ZSTD_c_literalCompressionMode:
116296 +    case ZSTD_c_jobSize:
116297 +    case ZSTD_c_overlapLog:
116298 +    case ZSTD_c_rsyncable:
116299 +    case ZSTD_c_enableDedicatedDictSearch:
116300 +    case ZSTD_c_enableLongDistanceMatching:
116301 +    case ZSTD_c_ldmHashLog:
116302 +    case ZSTD_c_ldmMinMatch:
116303 +    case ZSTD_c_ldmBucketSizeLog:
116304 +    case ZSTD_c_targetCBlockSize:
116305 +    case ZSTD_c_srcSizeHint:
116306 +    case ZSTD_c_stableInBuffer:
116307 +    case ZSTD_c_stableOutBuffer:
116308 +    case ZSTD_c_blockDelimiters:
116309 +    case ZSTD_c_validateSequences:
116310 +        break;
116312 +    default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
116313 +    }
116314 +    return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value);
116317 +size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
116318 +                                    ZSTD_cParameter param, int value)
116320 +    DEBUGLOG(4, "ZSTD_CCtxParams_setParameter (%i, %i)", (int)param, value);
116321 +    switch(param)
116322 +    {
116323 +    case ZSTD_c_format :
116324 +        BOUNDCHECK(ZSTD_c_format, value);
116325 +        CCtxParams->format = (ZSTD_format_e)value;
116326 +        return (size_t)CCtxParams->format;
116328 +    case ZSTD_c_compressionLevel : {
116329 +        FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), "");
116330 +        if (value == 0)
116331 +            CCtxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
116332 +        else
116333 +            CCtxParams->compressionLevel = value;
116334 +        if (CCtxParams->compressionLevel >= 0) return (size_t)CCtxParams->compressionLevel;
116335 +        return 0;  /* return type (size_t) cannot represent negative values */
116336 +    }
116338 +    case ZSTD_c_windowLog :
116339 +        if (value!=0)   /* 0 => use default */
116340 +            BOUNDCHECK(ZSTD_c_windowLog, value);
116341 +        CCtxParams->cParams.windowLog = (U32)value;
116342 +        return CCtxParams->cParams.windowLog;
116344 +    case ZSTD_c_hashLog :
116345 +        if (value!=0)   /* 0 => use default */
116346 +            BOUNDCHECK(ZSTD_c_hashLog, value);
116347 +        CCtxParams->cParams.hashLog = (U32)value;
116348 +        return CCtxParams->cParams.hashLog;
116350 +    case ZSTD_c_chainLog :
116351 +        if (value!=0)   /* 0 => use default */
116352 +            BOUNDCHECK(ZSTD_c_chainLog, value);
116353 +        CCtxParams->cParams.chainLog = (U32)value;
116354 +        return CCtxParams->cParams.chainLog;
116356 +    case ZSTD_c_searchLog :
116357 +        if (value!=0)   /* 0 => use default */
116358 +            BOUNDCHECK(ZSTD_c_searchLog, value);
116359 +        CCtxParams->cParams.searchLog = (U32)value;
116360 +        return (size_t)value;
116362 +    case ZSTD_c_minMatch :
116363 +        if (value!=0)   /* 0 => use default */
116364 +            BOUNDCHECK(ZSTD_c_minMatch, value);
116365 +        CCtxParams->cParams.minMatch = value;
116366 +        return CCtxParams->cParams.minMatch;
116368 +    case ZSTD_c_targetLength :
116369 +        BOUNDCHECK(ZSTD_c_targetLength, value);
116370 +        CCtxParams->cParams.targetLength = value;
116371 +        return CCtxParams->cParams.targetLength;
116373 +    case ZSTD_c_strategy :
116374 +        if (value!=0)   /* 0 => use default */
116375 +            BOUNDCHECK(ZSTD_c_strategy, value);
116376 +        CCtxParams->cParams.strategy = (ZSTD_strategy)value;
116377 +        return (size_t)CCtxParams->cParams.strategy;
116379 +    case ZSTD_c_contentSizeFlag :
116380 +        /* Content size written in frame header _when known_ (default:1) */
116381 +        DEBUGLOG(4, "set content size flag = %u", (value!=0));
116382 +        CCtxParams->fParams.contentSizeFlag = value != 0;
116383 +        return CCtxParams->fParams.contentSizeFlag;
116385 +    case ZSTD_c_checksumFlag :
116386 +        /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */
116387 +        CCtxParams->fParams.checksumFlag = value != 0;
116388 +        return CCtxParams->fParams.checksumFlag;
116390 +    case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
116391 +        DEBUGLOG(4, "set dictIDFlag = %u", (value!=0));
116392 +        CCtxParams->fParams.noDictIDFlag = !value;
116393 +        return !CCtxParams->fParams.noDictIDFlag;
116395 +    case ZSTD_c_forceMaxWindow :
116396 +        CCtxParams->forceWindow = (value != 0);
116397 +        return CCtxParams->forceWindow;
116399 +    case ZSTD_c_forceAttachDict : {
116400 +        const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value;
116401 +        BOUNDCHECK(ZSTD_c_forceAttachDict, pref);
116402 +        CCtxParams->attachDictPref = pref;
116403 +        return CCtxParams->attachDictPref;
116404 +    }
116406 +    case ZSTD_c_literalCompressionMode : {
116407 +        const ZSTD_literalCompressionMode_e lcm = (ZSTD_literalCompressionMode_e)value;
116408 +        BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm);
116409 +        CCtxParams->literalCompressionMode = lcm;
116410 +        return CCtxParams->literalCompressionMode;
116411 +    }
116413 +    case ZSTD_c_nbWorkers :
116414 +        RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
116415 +        return 0;
116417 +    case ZSTD_c_jobSize :
116418 +        RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
116419 +        return 0;
116421 +    case ZSTD_c_overlapLog :
116422 +        RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
116423 +        return 0;
116425 +    case ZSTD_c_rsyncable :
116426 +        RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
116427 +        return 0;
116429 +    case ZSTD_c_enableDedicatedDictSearch :
116430 +        CCtxParams->enableDedicatedDictSearch = (value!=0);
116431 +        return CCtxParams->enableDedicatedDictSearch;
116433 +    case ZSTD_c_enableLongDistanceMatching :
116434 +        CCtxParams->ldmParams.enableLdm = (value!=0);
116435 +        return CCtxParams->ldmParams.enableLdm;
116437 +    case ZSTD_c_ldmHashLog :
116438 +        if (value!=0)   /* 0 ==> auto */
116439 +            BOUNDCHECK(ZSTD_c_ldmHashLog, value);
116440 +        CCtxParams->ldmParams.hashLog = value;
116441 +        return CCtxParams->ldmParams.hashLog;
116443 +    case ZSTD_c_ldmMinMatch :
116444 +        if (value!=0)   /* 0 ==> default */
116445 +            BOUNDCHECK(ZSTD_c_ldmMinMatch, value);
116446 +        CCtxParams->ldmParams.minMatchLength = value;
116447 +        return CCtxParams->ldmParams.minMatchLength;
116449 +    case ZSTD_c_ldmBucketSizeLog :
116450 +        if (value!=0)   /* 0 ==> default */
116451 +            BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value);
116452 +        CCtxParams->ldmParams.bucketSizeLog = value;
116453 +        return CCtxParams->ldmParams.bucketSizeLog;
116455 +    case ZSTD_c_ldmHashRateLog :
116456 +        RETURN_ERROR_IF(value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN,
116457 +                        parameter_outOfBound, "Param out of bounds!");
116458 +        CCtxParams->ldmParams.hashRateLog = value;
116459 +        return CCtxParams->ldmParams.hashRateLog;
116461 +    case ZSTD_c_targetCBlockSize :
116462 +        if (value!=0)   /* 0 ==> default */
116463 +            BOUNDCHECK(ZSTD_c_targetCBlockSize, value);
116464 +        CCtxParams->targetCBlockSize = value;
116465 +        return CCtxParams->targetCBlockSize;
116467 +    case ZSTD_c_srcSizeHint :
116468 +        if (value!=0)    /* 0 ==> default */
116469 +            BOUNDCHECK(ZSTD_c_srcSizeHint, value);
116470 +        CCtxParams->srcSizeHint = value;
116471 +        return CCtxParams->srcSizeHint;
116473 +    case ZSTD_c_stableInBuffer:
116474 +        BOUNDCHECK(ZSTD_c_stableInBuffer, value);
116475 +        CCtxParams->inBufferMode = (ZSTD_bufferMode_e)value;
116476 +        return CCtxParams->inBufferMode;
116478 +    case ZSTD_c_stableOutBuffer:
116479 +        BOUNDCHECK(ZSTD_c_stableOutBuffer, value);
116480 +        CCtxParams->outBufferMode = (ZSTD_bufferMode_e)value;
116481 +        return CCtxParams->outBufferMode;
116483 +    case ZSTD_c_blockDelimiters:
116484 +        BOUNDCHECK(ZSTD_c_blockDelimiters, value);
116485 +        CCtxParams->blockDelimiters = (ZSTD_sequenceFormat_e)value;
116486 +        return CCtxParams->blockDelimiters;
116488 +    case ZSTD_c_validateSequences:
116489 +        BOUNDCHECK(ZSTD_c_validateSequences, value);
116490 +        CCtxParams->validateSequences = value;
116491 +        return CCtxParams->validateSequences;
116493 +    default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
116494 +    }
116497 +size_t ZSTD_CCtx_getParameter(ZSTD_CCtx const* cctx, ZSTD_cParameter param, int* value)
116499 +    return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value);
116502 +size_t ZSTD_CCtxParams_getParameter(
116503 +        ZSTD_CCtx_params const* CCtxParams, ZSTD_cParameter param, int* value)
116505 +    switch(param)
116506 +    {
116507 +    case ZSTD_c_format :
116508 +        *value = CCtxParams->format;
116509 +        break;
116510 +    case ZSTD_c_compressionLevel :
116511 +        *value = CCtxParams->compressionLevel;
116512 +        break;
116513 +    case ZSTD_c_windowLog :
116514 +        *value = (int)CCtxParams->cParams.windowLog;
116515 +        break;
116516 +    case ZSTD_c_hashLog :
116517 +        *value = (int)CCtxParams->cParams.hashLog;
116518 +        break;
116519 +    case ZSTD_c_chainLog :
116520 +        *value = (int)CCtxParams->cParams.chainLog;
116521 +        break;
116522 +    case ZSTD_c_searchLog :
116523 +        *value = CCtxParams->cParams.searchLog;
116524 +        break;
116525 +    case ZSTD_c_minMatch :
116526 +        *value = CCtxParams->cParams.minMatch;
116527 +        break;
116528 +    case ZSTD_c_targetLength :
116529 +        *value = CCtxParams->cParams.targetLength;
116530 +        break;
116531 +    case ZSTD_c_strategy :
116532 +        *value = (unsigned)CCtxParams->cParams.strategy;
116533 +        break;
116534 +    case ZSTD_c_contentSizeFlag :
116535 +        *value = CCtxParams->fParams.contentSizeFlag;
116536 +        break;
116537 +    case ZSTD_c_checksumFlag :
116538 +        *value = CCtxParams->fParams.checksumFlag;
116539 +        break;
116540 +    case ZSTD_c_dictIDFlag :
116541 +        *value = !CCtxParams->fParams.noDictIDFlag;
116542 +        break;
116543 +    case ZSTD_c_forceMaxWindow :
116544 +        *value = CCtxParams->forceWindow;
116545 +        break;
116546 +    case ZSTD_c_forceAttachDict :
116547 +        *value = CCtxParams->attachDictPref;
116548 +        break;
116549 +    case ZSTD_c_literalCompressionMode :
116550 +        *value = CCtxParams->literalCompressionMode;
116551 +        break;
116552 +    case ZSTD_c_nbWorkers :
116553 +        assert(CCtxParams->nbWorkers == 0);
116554 +        *value = CCtxParams->nbWorkers;
116555 +        break;
116556 +    case ZSTD_c_jobSize :
116557 +        RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
116558 +    case ZSTD_c_overlapLog :
116559 +        RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
116560 +    case ZSTD_c_rsyncable :
116561 +        RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
116562 +    case ZSTD_c_enableDedicatedDictSearch :
116563 +        *value = CCtxParams->enableDedicatedDictSearch;
116564 +        break;
116565 +    case ZSTD_c_enableLongDistanceMatching :
116566 +        *value = CCtxParams->ldmParams.enableLdm;
116567 +        break;
116568 +    case ZSTD_c_ldmHashLog :
116569 +        *value = CCtxParams->ldmParams.hashLog;
116570 +        break;
116571 +    case ZSTD_c_ldmMinMatch :
116572 +        *value = CCtxParams->ldmParams.minMatchLength;
116573 +        break;
116574 +    case ZSTD_c_ldmBucketSizeLog :
116575 +        *value = CCtxParams->ldmParams.bucketSizeLog;
116576 +        break;
116577 +    case ZSTD_c_ldmHashRateLog :
116578 +        *value = CCtxParams->ldmParams.hashRateLog;
116579 +        break;
116580 +    case ZSTD_c_targetCBlockSize :
116581 +        *value = (int)CCtxParams->targetCBlockSize;
116582 +        break;
116583 +    case ZSTD_c_srcSizeHint :
116584 +        *value = (int)CCtxParams->srcSizeHint;
116585 +        break;
116586 +    case ZSTD_c_stableInBuffer :
116587 +        *value = (int)CCtxParams->inBufferMode;
116588 +        break;
116589 +    case ZSTD_c_stableOutBuffer :
116590 +        *value = (int)CCtxParams->outBufferMode;
116591 +        break;
116592 +    case ZSTD_c_blockDelimiters :
116593 +        *value = (int)CCtxParams->blockDelimiters;
116594 +        break;
116595 +    case ZSTD_c_validateSequences :
116596 +        *value = (int)CCtxParams->validateSequences;
116597 +        break;
116598 +    default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
116599 +    }
116600 +    return 0;
116603 +/** ZSTD_CCtx_setParametersUsingCCtxParams() :
116604 + *  just applies `params` into `cctx`
116605 + *  no action is performed, parameters are merely stored.
116606 + *  If ZSTDMT is enabled, parameters are pushed to cctx->mtctx.
116607 + *    This is possible even if a compression is ongoing.
116608 + *    In which case, new parameters will be applied on the fly, starting with next compression job.
116609 + */
116610 +size_t ZSTD_CCtx_setParametersUsingCCtxParams(
116611 +        ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
116613 +    DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams");
116614 +    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
116615 +                    "The context is in the wrong stage!");
116616 +    RETURN_ERROR_IF(cctx->cdict, stage_wrong,
116617 +                    "Can't override parameters with cdict attached (some must "
116618 +                    "be inherited from the cdict).");
116620 +    cctx->requestedParams = *params;
116621 +    return 0;
116624 +ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
116626 +    DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize);
116627 +    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
116628 +                    "Can't set pledgedSrcSize when not in init stage.");
116629 +    cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
116630 +    return 0;
116633 +static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(
116634 +        int const compressionLevel,
116635 +        size_t const dictSize);
116636 +static int ZSTD_dedicatedDictSearch_isSupported(
116637 +        const ZSTD_compressionParameters* cParams);
116638 +static void ZSTD_dedicatedDictSearch_revertCParams(
116639 +        ZSTD_compressionParameters* cParams);
116642 + * Initializes the local dict using the requested parameters.
116643 + * NOTE: This does not use the pledged src size, because it may be used for more
116644 + * than one compression.
116645 + */
116646 +static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
116648 +    ZSTD_localDict* const dl = &cctx->localDict;
116649 +    if (dl->dict == NULL) {
116650 +        /* No local dictionary. */
116651 +        assert(dl->dictBuffer == NULL);
116652 +        assert(dl->cdict == NULL);
116653 +        assert(dl->dictSize == 0);
116654 +        return 0;
116655 +    }
116656 +    if (dl->cdict != NULL) {
116657 +        assert(cctx->cdict == dl->cdict);
116658 +        /* Local dictionary already initialized. */
116659 +        return 0;
116660 +    }
116661 +    assert(dl->dictSize > 0);
116662 +    assert(cctx->cdict == NULL);
116663 +    assert(cctx->prefixDict.dict == NULL);
116665 +    dl->cdict = ZSTD_createCDict_advanced2(
116666 +            dl->dict,
116667 +            dl->dictSize,
116668 +            ZSTD_dlm_byRef,
116669 +            dl->dictContentType,
116670 +            &cctx->requestedParams,
116671 +            cctx->customMem);
116672 +    RETURN_ERROR_IF(!dl->cdict, memory_allocation, "ZSTD_createCDict_advanced failed");
116673 +    cctx->cdict = dl->cdict;
116674 +    return 0;
116677 +size_t ZSTD_CCtx_loadDictionary_advanced(
116678 +        ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
116679 +        ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
116681 +    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
116682 +                    "Can't load a dictionary when ctx is not in init stage.");
116683 +    DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
116684 +    ZSTD_clearAllDicts(cctx);  /* in case one already exists */
116685 +    if (dict == NULL || dictSize == 0)  /* no dictionary mode */
116686 +        return 0;
116687 +    if (dictLoadMethod == ZSTD_dlm_byRef) {
116688 +        cctx->localDict.dict = dict;
116689 +    } else {
116690 +        void* dictBuffer;
116691 +        RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
116692 +                        "no malloc for static CCtx");
116693 +        dictBuffer = ZSTD_customMalloc(dictSize, cctx->customMem);
116694 +        RETURN_ERROR_IF(!dictBuffer, memory_allocation, "NULL pointer!");
116695 +        ZSTD_memcpy(dictBuffer, dict, dictSize);
116696 +        cctx->localDict.dictBuffer = dictBuffer;
116697 +        cctx->localDict.dict = dictBuffer;
116698 +    }
116699 +    cctx->localDict.dictSize = dictSize;
116700 +    cctx->localDict.dictContentType = dictContentType;
116701 +    return 0;
116704 +ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(
116705 +      ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
116707 +    return ZSTD_CCtx_loadDictionary_advanced(
116708 +            cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
116711 +ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
116713 +    return ZSTD_CCtx_loadDictionary_advanced(
116714 +            cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
116718 +size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
116720 +    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
116721 +                    "Can't ref a dict when ctx not in init stage.");
116722 +    /* Free the existing local cdict (if any) to save memory. */
116723 +    ZSTD_clearAllDicts(cctx);
116724 +    cctx->cdict = cdict;
116725 +    return 0;
116728 +size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool)
116730 +    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
116731 +                    "Can't ref a pool when ctx not in init stage.");
116732 +    cctx->pool = pool;
116733 +    return 0;
116736 +size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize)
116738 +    return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent);
116741 +size_t ZSTD_CCtx_refPrefix_advanced(
116742 +        ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
116744 +    RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
116745 +                    "Can't ref a prefix when ctx not in init stage.");
116746 +    ZSTD_clearAllDicts(cctx);
116747 +    if (prefix != NULL && prefixSize > 0) {
116748 +        cctx->prefixDict.dict = prefix;
116749 +        cctx->prefixDict.dictSize = prefixSize;
116750 +        cctx->prefixDict.dictContentType = dictContentType;
116751 +    }
116752 +    return 0;
116755 +/*! ZSTD_CCtx_reset() :
116756 + *  Also dumps dictionary */
116757 +size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)
116759 +    if ( (reset == ZSTD_reset_session_only)
116760 +      || (reset == ZSTD_reset_session_and_parameters) ) {
116761 +        cctx->streamStage = zcss_init;
116762 +        cctx->pledgedSrcSizePlusOne = 0;
116763 +    }
116764 +    if ( (reset == ZSTD_reset_parameters)
116765 +      || (reset == ZSTD_reset_session_and_parameters) ) {
116766 +        RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
116767 +                        "Can't reset parameters only when not in init stage.");
116768 +        ZSTD_clearAllDicts(cctx);
116769 +        return ZSTD_CCtxParams_reset(&cctx->requestedParams);
116770 +    }
116771 +    return 0;
116775 +/** ZSTD_checkCParams() :
116776 +    control CParam values remain within authorized range.
116777 +    @return : 0, or an error code if one value is beyond authorized range */
116778 +size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
116780 +    BOUNDCHECK(ZSTD_c_windowLog, (int)cParams.windowLog);
116781 +    BOUNDCHECK(ZSTD_c_chainLog,  (int)cParams.chainLog);
116782 +    BOUNDCHECK(ZSTD_c_hashLog,   (int)cParams.hashLog);
116783 +    BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog);
116784 +    BOUNDCHECK(ZSTD_c_minMatch,  (int)cParams.minMatch);
116785 +    BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength);
116786 +    BOUNDCHECK(ZSTD_c_strategy,  cParams.strategy);
116787 +    return 0;
116790 +/** ZSTD_clampCParams() :
116791 + *  make CParam values within valid range.
116792 + *  @return : valid CParams */
116793 +static ZSTD_compressionParameters
116794 +ZSTD_clampCParams(ZSTD_compressionParameters cParams)
116796 +#   define CLAMP_TYPE(cParam, val, type) {                                \
116797 +        ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);         \
116798 +        if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound;      \
116799 +        else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \
116800 +    }
116801 +#   define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned)
116802 +    CLAMP(ZSTD_c_windowLog, cParams.windowLog);
116803 +    CLAMP(ZSTD_c_chainLog,  cParams.chainLog);
116804 +    CLAMP(ZSTD_c_hashLog,   cParams.hashLog);
116805 +    CLAMP(ZSTD_c_searchLog, cParams.searchLog);
116806 +    CLAMP(ZSTD_c_minMatch,  cParams.minMatch);
116807 +    CLAMP(ZSTD_c_targetLength,cParams.targetLength);
116808 +    CLAMP_TYPE(ZSTD_c_strategy,cParams.strategy, ZSTD_strategy);
116809 +    return cParams;
116812 +/** ZSTD_cycleLog() :
116813 + *  condition for correct operation : hashLog > 1 */
116814 +U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
116816 +    U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
116817 +    return hashLog - btScale;
116820 +/** ZSTD_dictAndWindowLog() :
116821 + * Returns an adjusted window log that is large enough to fit the source and the dictionary.
116822 + * The zstd format says that the entire dictionary is valid if one byte of the dictionary
116823 + * is within the window. So the hashLog and chainLog should be large enough to reference both
116824 + * the dictionary and the window. So we must use this adjusted dictAndWindowLog when downsizing
116825 + * the hashLog and windowLog.
116826 + * NOTE: srcSize must not be ZSTD_CONTENTSIZE_UNKNOWN.
116827 + */
116828 +static U32 ZSTD_dictAndWindowLog(U32 windowLog, U64 srcSize, U64 dictSize)
116830 +    const U64 maxWindowSize = 1ULL << ZSTD_WINDOWLOG_MAX;
116831 +    /* No dictionary ==> No change */
116832 +    if (dictSize == 0) {
116833 +        return windowLog;
116834 +    }
116835 +    assert(windowLog <= ZSTD_WINDOWLOG_MAX);
116836 +    assert(srcSize != ZSTD_CONTENTSIZE_UNKNOWN); /* Handled in ZSTD_adjustCParams_internal() */
116837 +    {
116838 +        U64 const windowSize = 1ULL << windowLog;
116839 +        U64 const dictAndWindowSize = dictSize + windowSize;
116840 +        /* If the window size is already large enough to fit both the source and the dictionary
116841 +         * then just use the window size. Otherwise adjust so that it fits the dictionary and
116842 +         * the window.
116843 +         */
116844 +        if (windowSize >= dictSize + srcSize) {
116845 +            return windowLog; /* Window size large enough already */
116846 +        } else if (dictAndWindowSize >= maxWindowSize) {
116847 +            return ZSTD_WINDOWLOG_MAX; /* Larger than max window log */
116848 +        } else  {
116849 +            return ZSTD_highbit32((U32)dictAndWindowSize - 1) + 1;
116850 +        }
116851 +    }
116854 +/** ZSTD_adjustCParams_internal() :
116855 + *  optimize `cPar` for a specified input (`srcSize` and `dictSize`).
116856 + *  mostly downsize to reduce memory consumption and initialization latency.
116857 + * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known.
116858 + * `mode` is the mode for parameter adjustment. See docs for `ZSTD_cParamMode_e`.
116859 + *  note : `srcSize==0` means 0!
116860 + *  condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */
116861 +static ZSTD_compressionParameters
116862 +ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
116863 +                            unsigned long long srcSize,
116864 +                            size_t dictSize,
116865 +                            ZSTD_cParamMode_e mode)
116867 +    const U64 minSrcSize = 513; /* (1<<9) + 1 */
116868 +    const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
116869 +    assert(ZSTD_checkCParams(cPar)==0);
116871 +    switch (mode) {
116872 +    case ZSTD_cpm_unknown:
116873 +    case ZSTD_cpm_noAttachDict:
116874 +        /* If we don't know the source size, don't make any
116875 +         * assumptions about it. We will already have selected
116876 +         * smaller parameters if a dictionary is in use.
116877 +         */
116878 +        break;
116879 +    case ZSTD_cpm_createCDict:
116880 +        /* Assume a small source size when creating a dictionary
116881 +         * with an unkown source size.
116882 +         */
116883 +        if (dictSize && srcSize == ZSTD_CONTENTSIZE_UNKNOWN)
116884 +            srcSize = minSrcSize;
116885 +        break;
116886 +    case ZSTD_cpm_attachDict:
116887 +        /* Dictionary has its own dedicated parameters which have
116888 +         * already been selected. We are selecting parameters
116889 +         * for only the source.
116890 +         */
116891 +        dictSize = 0;
116892 +        break;
116893 +    default:
116894 +        assert(0);
116895 +        break;
116896 +    }
116898 +    /* resize windowLog if input is small enough, to use less memory */
116899 +    if ( (srcSize < maxWindowResize)
116900 +      && (dictSize < maxWindowResize) )  {
116901 +        U32 const tSize = (U32)(srcSize + dictSize);
116902 +        static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN;
116903 +        U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN :
116904 +                            ZSTD_highbit32(tSize-1) + 1;
116905 +        if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
116906 +    }
116907 +    if (srcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
116908 +        U32 const dictAndWindowLog = ZSTD_dictAndWindowLog(cPar.windowLog, (U64)srcSize, (U64)dictSize);
116909 +        U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
116910 +        if (cPar.hashLog > dictAndWindowLog+1) cPar.hashLog = dictAndWindowLog+1;
116911 +        if (cycleLog > dictAndWindowLog)
116912 +            cPar.chainLog -= (cycleLog - dictAndWindowLog);
116913 +    }
116915 +    if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
116916 +        cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN;  /* minimum wlog required for valid frame header */
116918 +    return cPar;
116921 +ZSTD_compressionParameters
116922 +ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
116923 +                   unsigned long long srcSize,
116924 +                   size_t dictSize)
116926 +    cPar = ZSTD_clampCParams(cPar);   /* resulting cPar is necessarily valid (all parameters within range) */
116927 +    if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN;
116928 +    return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown);
116931 +static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
116932 +static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
116934 +static void ZSTD_overrideCParams(
116935 +              ZSTD_compressionParameters* cParams,
116936 +        const ZSTD_compressionParameters* overrides)
116938 +    if (overrides->windowLog)    cParams->windowLog    = overrides->windowLog;
116939 +    if (overrides->hashLog)      cParams->hashLog      = overrides->hashLog;
116940 +    if (overrides->chainLog)     cParams->chainLog     = overrides->chainLog;
116941 +    if (overrides->searchLog)    cParams->searchLog    = overrides->searchLog;
116942 +    if (overrides->minMatch)     cParams->minMatch     = overrides->minMatch;
116943 +    if (overrides->targetLength) cParams->targetLength = overrides->targetLength;
116944 +    if (overrides->strategy)     cParams->strategy     = overrides->strategy;
116947 +ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
116948 +        const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
116950 +    ZSTD_compressionParameters cParams;
116951 +    if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) {
116952 +      srcSizeHint = CCtxParams->srcSizeHint;
116953 +    }
116954 +    cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize, mode);
116955 +    if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
116956 +    ZSTD_overrideCParams(&cParams, &CCtxParams->cParams);
116957 +    assert(!ZSTD_checkCParams(cParams));
116958 +    /* srcSizeHint == 0 means 0 */
116959 +    return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode);
116962 +static size_t
116963 +ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
116964 +                       const U32 forCCtx)
116966 +    size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
116967 +    size_t const hSize = ((size_t)1) << cParams->hashLog;
116968 +    U32    const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
116969 +    size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
116970 +    /* We don't use ZSTD_cwksp_alloc_size() here because the tables aren't
116971 +     * surrounded by redzones in ASAN. */
116972 +    size_t const tableSpace = chainSize * sizeof(U32)
116973 +                            + hSize * sizeof(U32)
116974 +                            + h3Size * sizeof(U32);
116975 +    size_t const optPotentialSpace =
116976 +        ZSTD_cwksp_alloc_size((MaxML+1) * sizeof(U32))
116977 +      + ZSTD_cwksp_alloc_size((MaxLL+1) * sizeof(U32))
116978 +      + ZSTD_cwksp_alloc_size((MaxOff+1) * sizeof(U32))
116979 +      + ZSTD_cwksp_alloc_size((1<<Litbits) * sizeof(U32))
116980 +      + ZSTD_cwksp_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t))
116981 +      + ZSTD_cwksp_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
116982 +    size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt))
116983 +                                ? optPotentialSpace
116984 +                                : 0;
116985 +    DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u",
116986 +                (U32)chainSize, (U32)hSize, (U32)h3Size);
116987 +    return tableSpace + optSpace;
116990 +static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
116991 +        const ZSTD_compressionParameters* cParams,
116992 +        const ldmParams_t* ldmParams,
116993 +        const int isStatic,
116994 +        const size_t buffInSize,
116995 +        const size_t buffOutSize,
116996 +        const U64 pledgedSrcSize)
116998 +    size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << cParams->windowLog), pledgedSrcSize));
116999 +    size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
117000 +    U32    const divider = (cParams->minMatch==3) ? 3 : 4;
117001 +    size_t const maxNbSeq = blockSize / divider;
117002 +    size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize)
117003 +                            + ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(seqDef))
117004 +                            + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE));
117005 +    size_t const entropySpace = ZSTD_cwksp_alloc_size(ENTROPY_WORKSPACE_SIZE);
117006 +    size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t));
117007 +    size_t const matchStateSize = ZSTD_sizeof_matchState(cParams, /* forCCtx */ 1);
117009 +    size_t const ldmSpace = ZSTD_ldm_getTableSize(*ldmParams);
117010 +    size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize);
117011 +    size_t const ldmSeqSpace = ldmParams->enableLdm ?
117012 +        ZSTD_cwksp_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0;
117015 +    size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize)
117016 +                             + ZSTD_cwksp_alloc_size(buffOutSize);
117018 +    size_t const cctxSpace = isStatic ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0;
117020 +    size_t const neededSpace =
117021 +        cctxSpace +
117022 +        entropySpace +
117023 +        blockStateSpace +
117024 +        ldmSpace +
117025 +        ldmSeqSpace +
117026 +        matchStateSize +
117027 +        tokenSpace +
117028 +        bufferSpace;
117030 +    DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace);
117031 +    return neededSpace;
117034 +size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
117036 +    ZSTD_compressionParameters const cParams =
117037 +                ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
117039 +    RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
117040 +    /* estimateCCtxSize is for one-shot compression. So no buffers should
117041 +     * be needed. However, we still allocate two 0-sized buffers, which can
117042 +     * take space under ASAN. */
117043 +    return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
117044 +        &cParams, &params->ldmParams, 1, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN);
117047 +size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
117049 +    ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
117050 +    return ZSTD_estimateCCtxSize_usingCCtxParams(&params);
117053 +static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel)
117055 +    int tier = 0;
117056 +    size_t largestSize = 0;
117057 +    static const unsigned long long srcSizeTiers[4] = {16 KB, 128 KB, 256 KB, ZSTD_CONTENTSIZE_UNKNOWN};
117058 +    for (; tier < 4; ++tier) {
117059 +        /* Choose the set of cParams for a given level across all srcSizes that give the largest cctxSize */
117060 +        ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeTiers[tier], 0, ZSTD_cpm_noAttachDict);
117061 +        largestSize = MAX(ZSTD_estimateCCtxSize_usingCParams(cParams), largestSize);
117062 +    }
117063 +    return largestSize;
117066 +size_t ZSTD_estimateCCtxSize(int compressionLevel)
117068 +    int level;
117069 +    size_t memBudget = 0;
117070 +    for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
117071 +        /* Ensure monotonically increasing memory usage as compression level increases */
117072 +        size_t const newMB = ZSTD_estimateCCtxSize_internal(level);
117073 +        if (newMB > memBudget) memBudget = newMB;
117074 +    }
117075 +    return memBudget;
117078 +size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
117080 +    RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
117081 +    {   ZSTD_compressionParameters const cParams =
117082 +                ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
117083 +        size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
117084 +        size_t const inBuffSize = (params->inBufferMode == ZSTD_bm_buffered)
117085 +                ? ((size_t)1 << cParams.windowLog) + blockSize
117086 +                : 0;
117087 +        size_t const outBuffSize = (params->outBufferMode == ZSTD_bm_buffered)
117088 +                ? ZSTD_compressBound(blockSize) + 1
117089 +                : 0;
117091 +        return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
117092 +            &cParams, &params->ldmParams, 1, inBuffSize, outBuffSize,
117093 +            ZSTD_CONTENTSIZE_UNKNOWN);
117094 +    }
117097 +size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams)
117099 +    ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
117100 +    return ZSTD_estimateCStreamSize_usingCCtxParams(&params);
117103 +static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel)
117105 +    ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
117106 +    return ZSTD_estimateCStreamSize_usingCParams(cParams);
117109 +size_t ZSTD_estimateCStreamSize(int compressionLevel)
117111 +    int level;
117112 +    size_t memBudget = 0;
117113 +    for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
117114 +        size_t const newMB = ZSTD_estimateCStreamSize_internal(level);
117115 +        if (newMB > memBudget) memBudget = newMB;
117116 +    }
117117 +    return memBudget;
117120 +/* ZSTD_getFrameProgression():
117121 + * tells how much data has been consumed (input) and produced (output) for current frame.
117122 + * able to count progression inside worker threads (non-blocking mode).
117123 + */
117124 +ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx)
117126 +    {   ZSTD_frameProgression fp;
117127 +        size_t const buffered = (cctx->inBuff == NULL) ? 0 :
117128 +                                cctx->inBuffPos - cctx->inToCompress;
117129 +        if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress);
117130 +        assert(buffered <= ZSTD_BLOCKSIZE_MAX);
117131 +        fp.ingested = cctx->consumedSrcSize + buffered;
117132 +        fp.consumed = cctx->consumedSrcSize;
117133 +        fp.produced = cctx->producedCSize;
117134 +        fp.flushed  = cctx->producedCSize;   /* simplified; some data might still be left within streaming output buffer */
117135 +        fp.currentJobID = 0;
117136 +        fp.nbActiveWorkers = 0;
117137 +        return fp;
117138 +}   }
117140 +/*! ZSTD_toFlushNow()
117141 + *  Only useful for multithreading scenarios currently (nbWorkers >= 1).
117142 + */
117143 +size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx)
117145 +    (void)cctx;
117146 +    return 0;   /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */
117149 +static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1,
117150 +                                    ZSTD_compressionParameters cParams2)
117152 +    (void)cParams1;
117153 +    (void)cParams2;
117154 +    assert(cParams1.windowLog    == cParams2.windowLog);
117155 +    assert(cParams1.chainLog     == cParams2.chainLog);
117156 +    assert(cParams1.hashLog      == cParams2.hashLog);
117157 +    assert(cParams1.searchLog    == cParams2.searchLog);
117158 +    assert(cParams1.minMatch     == cParams2.minMatch);
117159 +    assert(cParams1.targetLength == cParams2.targetLength);
117160 +    assert(cParams1.strategy     == cParams2.strategy);
117163 +void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)
117165 +    int i;
117166 +    for (i = 0; i < ZSTD_REP_NUM; ++i)
117167 +        bs->rep[i] = repStartValue[i];
117168 +    bs->entropy.huf.repeatMode = HUF_repeat_none;
117169 +    bs->entropy.fse.offcode_repeatMode = FSE_repeat_none;
117170 +    bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none;
117171 +    bs->entropy.fse.litlength_repeatMode = FSE_repeat_none;
117174 +/*! ZSTD_invalidateMatchState()
117175 + *  Invalidate all the matches in the match finder tables.
117176 + *  Requires nextSrc and base to be set (can be NULL).
117177 + */
117178 +static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)
117180 +    ZSTD_window_clear(&ms->window);
117182 +    ms->nextToUpdate = ms->window.dictLimit;
117183 +    ms->loadedDictEnd = 0;
117184 +    ms->opt.litLengthSum = 0;  /* force reset of btopt stats */
117185 +    ms->dictMatchState = NULL;
117189 + * Controls, for this matchState reset, whether the tables need to be cleared /
117190 + * prepared for the coming compression (ZSTDcrp_makeClean), or whether the
117191 + * tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a
117192 + * subsequent operation will overwrite the table space anyways (e.g., copying
117193 + * the matchState contents in from a CDict).
117194 + */
117195 +typedef enum {
117196 +    ZSTDcrp_makeClean,
117197 +    ZSTDcrp_leaveDirty
117198 +} ZSTD_compResetPolicy_e;
117201 + * Controls, for this matchState reset, whether indexing can continue where it
117202 + * left off (ZSTDirp_continue), or whether it needs to be restarted from zero
117203 + * (ZSTDirp_reset).
117204 + */
117205 +typedef enum {
117206 +    ZSTDirp_continue,
117207 +    ZSTDirp_reset
117208 +} ZSTD_indexResetPolicy_e;
117210 +typedef enum {
117211 +    ZSTD_resetTarget_CDict,
117212 +    ZSTD_resetTarget_CCtx
117213 +} ZSTD_resetTarget_e;
117215 +static size_t
117216 +ZSTD_reset_matchState(ZSTD_matchState_t* ms,
117217 +                      ZSTD_cwksp* ws,
117218 +                const ZSTD_compressionParameters* cParams,
117219 +                const ZSTD_compResetPolicy_e crp,
117220 +                const ZSTD_indexResetPolicy_e forceResetIndex,
117221 +                const ZSTD_resetTarget_e forWho)
117223 +    size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
117224 +    size_t const hSize = ((size_t)1) << cParams->hashLog;
117225 +    U32    const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
117226 +    size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
117228 +    DEBUGLOG(4, "reset indices : %u", forceResetIndex == ZSTDirp_reset);
117229 +    if (forceResetIndex == ZSTDirp_reset) {
117230 +        ZSTD_window_init(&ms->window);
117231 +        ZSTD_cwksp_mark_tables_dirty(ws);
117232 +    }
117234 +    ms->hashLog3 = hashLog3;
117236 +    ZSTD_invalidateMatchState(ms);
117238 +    assert(!ZSTD_cwksp_reserve_failed(ws)); /* check that allocation hasn't already failed */
117240 +    ZSTD_cwksp_clear_tables(ws);
117242 +    DEBUGLOG(5, "reserving table space");
117243 +    /* table Space */
117244 +    ms->hashTable = (U32*)ZSTD_cwksp_reserve_table(ws, hSize * sizeof(U32));
117245 +    ms->chainTable = (U32*)ZSTD_cwksp_reserve_table(ws, chainSize * sizeof(U32));
117246 +    ms->hashTable3 = (U32*)ZSTD_cwksp_reserve_table(ws, h3Size * sizeof(U32));
117247 +    RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
117248 +                    "failed a workspace allocation in ZSTD_reset_matchState");
117250 +    DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_leaveDirty);
117251 +    if (crp!=ZSTDcrp_leaveDirty) {
117252 +        /* reset tables only */
117253 +        ZSTD_cwksp_clean_tables(ws);
117254 +    }
117256 +    /* opt parser space */
117257 +    if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) {
117258 +        DEBUGLOG(4, "reserving optimal parser space");
117259 +        ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<<Litbits) * sizeof(unsigned));
117260 +        ms->opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned));
117261 +        ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned));
117262 +        ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned));
117263 +        ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t));
117264 +        ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
117265 +    }
117267 +    ms->cParams = *cParams;
117269 +    RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
117270 +                    "failed a workspace allocation in ZSTD_reset_matchState");
117272 +    return 0;
117275 +/* ZSTD_indexTooCloseToMax() :
117276 + * minor optimization : prefer memset() rather than reduceIndex()
117277 + * which is measurably slow in some circumstances (reported for Visual Studio).
117278 + * Works when re-using a context for a lot of smallish inputs :
117279 + * if all inputs are smaller than ZSTD_INDEXOVERFLOW_MARGIN,
117280 + * memset() will be triggered before reduceIndex().
117281 + */
117282 +#define ZSTD_INDEXOVERFLOW_MARGIN (16 MB)
117283 +static int ZSTD_indexTooCloseToMax(ZSTD_window_t w)
117285 +    return (size_t)(w.nextSrc - w.base) > (ZSTD_CURRENT_MAX - ZSTD_INDEXOVERFLOW_MARGIN);
117288 +/*! ZSTD_resetCCtx_internal() :
117289 +    note : `params` are assumed fully validated at this stage */
117290 +static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
117291 +                                      ZSTD_CCtx_params params,
117292 +                                      U64 const pledgedSrcSize,
117293 +                                      ZSTD_compResetPolicy_e const crp,
117294 +                                      ZSTD_buffered_policy_e const zbuff)
117296 +    ZSTD_cwksp* const ws = &zc->workspace;
117297 +    DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u",
117298 +                (U32)pledgedSrcSize, params.cParams.windowLog);
117299 +    assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
117301 +    zc->isFirstBlock = 1;
117303 +    if (params.ldmParams.enableLdm) {
117304 +        /* Adjust long distance matching parameters */
117305 +        ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
117306 +        assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
117307 +        assert(params.ldmParams.hashRateLog < 32);
117308 +    }
117310 +    {   size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
117311 +        size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
117312 +        U32    const divider = (params.cParams.minMatch==3) ? 3 : 4;
117313 +        size_t const maxNbSeq = blockSize / divider;
117314 +        size_t const buffOutSize = (zbuff == ZSTDb_buffered && params.outBufferMode == ZSTD_bm_buffered)
117315 +                ? ZSTD_compressBound(blockSize) + 1
117316 +                : 0;
117317 +        size_t const buffInSize = (zbuff == ZSTDb_buffered && params.inBufferMode == ZSTD_bm_buffered)
117318 +                ? windowSize + blockSize
117319 +                : 0;
117320 +        size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params.ldmParams, blockSize);
117322 +        int const indexTooClose = ZSTD_indexTooCloseToMax(zc->blockState.matchState.window);
117323 +        ZSTD_indexResetPolicy_e needsIndexReset =
117324 +            (!indexTooClose && zc->initialized) ? ZSTDirp_continue : ZSTDirp_reset;
117326 +        size_t const neededSpace =
117327 +            ZSTD_estimateCCtxSize_usingCCtxParams_internal(
117328 +                &params.cParams, &params.ldmParams, zc->staticSize != 0,
117329 +                buffInSize, buffOutSize, pledgedSrcSize);
117330 +        FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!");
117332 +        if (!zc->staticSize) ZSTD_cwksp_bump_oversized_duration(ws, 0);
117334 +        /* Check if workspace is large enough, alloc a new one if needed */
117335 +        {
117336 +            int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace;
117337 +            int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace);
117339 +            DEBUGLOG(4, "Need %zu B workspace", neededSpace);
117340 +            DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
117342 +            if (workspaceTooSmall || workspaceWasteful) {
117343 +                DEBUGLOG(4, "Resize workspaceSize from %zuKB to %zuKB",
117344 +                            ZSTD_cwksp_sizeof(ws) >> 10,
117345 +                            neededSpace >> 10);
117347 +                RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize");
117349 +                needsIndexReset = ZSTDirp_reset;
117351 +                ZSTD_cwksp_free(ws, zc->customMem);
117352 +                FORWARD_IF_ERROR(ZSTD_cwksp_create(ws, neededSpace, zc->customMem), "");
117354 +                DEBUGLOG(5, "reserving object space");
117355 +                /* Statically sized space.
117356 +                 * entropyWorkspace never moves,
117357 +                 * though prev/next block swap places */
117358 +                assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t)));
117359 +                zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
117360 +                RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock");
117361 +                zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
117362 +                RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock");
117363 +                zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, ENTROPY_WORKSPACE_SIZE);
117364 +                RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate entropyWorkspace");
117365 +        }   }
117367 +        ZSTD_cwksp_clear(ws);
117369 +        /* init params */
117370 +        zc->appliedParams = params;
117371 +        zc->blockState.matchState.cParams = params.cParams;
117372 +        zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
117373 +        zc->consumedSrcSize = 0;
117374 +        zc->producedCSize = 0;
117375 +        if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
117376 +            zc->appliedParams.fParams.contentSizeFlag = 0;
117377 +        DEBUGLOG(4, "pledged content size : %u ; flag : %u",
117378 +            (unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
117379 +        zc->blockSize = blockSize;
117381 +        xxh64_reset(&zc->xxhState, 0);
117382 +        zc->stage = ZSTDcs_init;
117383 +        zc->dictID = 0;
117384 +        zc->dictContentSize = 0;
117386 +        ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);
117388 +        /* ZSTD_wildcopy() is used to copy into the literals buffer,
117389 +         * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.
117390 +         */
117391 +        zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH);
117392 +        zc->seqStore.maxNbLit = blockSize;
117394 +        /* buffers */
117395 +        zc->bufferedPolicy = zbuff;
117396 +        zc->inBuffSize = buffInSize;
117397 +        zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize);
117398 +        zc->outBuffSize = buffOutSize;
117399 +        zc->outBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize);
117401 +        /* ldm bucketOffsets table */
117402 +        if (params.ldmParams.enableLdm) {
117403 +            /* TODO: avoid memset? */
117404 +            size_t const numBuckets =
117405 +                  ((size_t)1) << (params.ldmParams.hashLog -
117406 +                                  params.ldmParams.bucketSizeLog);
117407 +            zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, numBuckets);
117408 +            ZSTD_memset(zc->ldmState.bucketOffsets, 0, numBuckets);
117409 +        }
117411 +        /* sequences storage */
117412 +        ZSTD_referenceExternalSequences(zc, NULL, 0);
117413 +        zc->seqStore.maxNbSeq = maxNbSeq;
117414 +        zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
117415 +        zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
117416 +        zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
117417 +        zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef));
117419 +        FORWARD_IF_ERROR(ZSTD_reset_matchState(
117420 +            &zc->blockState.matchState,
117421 +            ws,
117422 +            &params.cParams,
117423 +            crp,
117424 +            needsIndexReset,
117425 +            ZSTD_resetTarget_CCtx), "");
117427 +        /* ldm hash table */
117428 +        if (params.ldmParams.enableLdm) {
117429 +            /* TODO: avoid memset? */
117430 +            size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog;
117431 +            zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t));
117432 +            ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t));
117433 +            zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq));
117434 +            zc->maxNbLdmSequences = maxNbLdmSeq;
117436 +            ZSTD_window_init(&zc->ldmState.window);
117437 +            ZSTD_window_clear(&zc->ldmState.window);
117438 +            zc->ldmState.loadedDictEnd = 0;
117439 +        }
117441 +        /* Due to alignment, when reusing a workspace, we can actually consume
117442 +         * up to 3 extra bytes for alignment. See the comments in zstd_cwksp.h
117443 +         */
117444 +        assert(ZSTD_cwksp_used(ws) >= neededSpace &&
117445 +               ZSTD_cwksp_used(ws) <= neededSpace + 3);
117447 +        DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws));
117448 +        zc->initialized = 1;
117450 +        return 0;
117451 +    }
117454 +/* ZSTD_invalidateRepCodes() :
117455 + * ensures next compression will not use repcodes from previous block.
117456 + * Note : only works with regular variant;
117457 + *        do not use with extDict variant ! */
117458 +void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
117459 +    int i;
117460 +    for (i=0; i<ZSTD_REP_NUM; i++) cctx->blockState.prevCBlock->rep[i] = 0;
117461 +    assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
117464 +/* These are the approximate sizes for each strategy past which copying the
117465 + * dictionary tables into the working context is faster than using them
117466 + * in-place.
117467 + */
117468 +static const size_t attachDictSizeCutoffs[ZSTD_STRATEGY_MAX+1] = {
117469 +    8 KB,  /* unused */
117470 +    8 KB,  /* ZSTD_fast */
117471 +    16 KB, /* ZSTD_dfast */
117472 +    32 KB, /* ZSTD_greedy */
117473 +    32 KB, /* ZSTD_lazy */
117474 +    32 KB, /* ZSTD_lazy2 */
117475 +    32 KB, /* ZSTD_btlazy2 */
117476 +    32 KB, /* ZSTD_btopt */
117477 +    8 KB,  /* ZSTD_btultra */
117478 +    8 KB   /* ZSTD_btultra2 */
117481 +static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict,
117482 +                                 const ZSTD_CCtx_params* params,
117483 +                                 U64 pledgedSrcSize)
117485 +    size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy];
117486 +    int const dedicatedDictSearch = cdict->matchState.dedicatedDictSearch;
117487 +    return dedicatedDictSearch
117488 +        || ( ( pledgedSrcSize <= cutoff
117489 +            || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
117490 +            || params->attachDictPref == ZSTD_dictForceAttach )
117491 +          && params->attachDictPref != ZSTD_dictForceCopy
117492 +          && !params->forceWindow ); /* dictMatchState isn't correctly
117493 +                                      * handled in _enforceMaxDist */
117496 +static size_t
117497 +ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx,
117498 +                        const ZSTD_CDict* cdict,
117499 +                        ZSTD_CCtx_params params,
117500 +                        U64 pledgedSrcSize,
117501 +                        ZSTD_buffered_policy_e zbuff)
117503 +    {
117504 +        ZSTD_compressionParameters adjusted_cdict_cParams = cdict->matchState.cParams;
117505 +        unsigned const windowLog = params.cParams.windowLog;
117506 +        assert(windowLog != 0);
117507 +        /* Resize working context table params for input only, since the dict
117508 +         * has its own tables. */
117509 +        /* pledgedSrcSize == 0 means 0! */
117511 +        if (cdict->matchState.dedicatedDictSearch) {
117512 +            ZSTD_dedicatedDictSearch_revertCParams(&adjusted_cdict_cParams);
117513 +        }
117515 +        params.cParams = ZSTD_adjustCParams_internal(adjusted_cdict_cParams, pledgedSrcSize,
117516 +                                                     cdict->dictContentSize, ZSTD_cpm_attachDict);
117517 +        params.cParams.windowLog = windowLog;
117518 +        FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
117519 +                                                 ZSTDcrp_makeClean, zbuff), "");
117520 +        assert(cctx->appliedParams.cParams.strategy == adjusted_cdict_cParams.strategy);
117521 +    }
117523 +    {   const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc
117524 +                                  - cdict->matchState.window.base);
117525 +        const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit;
117526 +        if (cdictLen == 0) {
117527 +            /* don't even attach dictionaries with no contents */
117528 +            DEBUGLOG(4, "skipping attaching empty dictionary");
117529 +        } else {
117530 +            DEBUGLOG(4, "attaching dictionary into context");
117531 +            cctx->blockState.matchState.dictMatchState = &cdict->matchState;
117533 +            /* prep working match state so dict matches never have negative indices
117534 +             * when they are translated to the working context's index space. */
117535 +            if (cctx->blockState.matchState.window.dictLimit < cdictEnd) {
117536 +                cctx->blockState.matchState.window.nextSrc =
117537 +                    cctx->blockState.matchState.window.base + cdictEnd;
117538 +                ZSTD_window_clear(&cctx->blockState.matchState.window);
117539 +            }
117540 +            /* loadedDictEnd is expressed within the referential of the active context */
117541 +            cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit;
117542 +    }   }
117544 +    cctx->dictID = cdict->dictID;
117545 +    cctx->dictContentSize = cdict->dictContentSize;
117547 +    /* copy block state */
117548 +    ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
117550 +    return 0;
117553 +static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
117554 +                            const ZSTD_CDict* cdict,
117555 +                            ZSTD_CCtx_params params,
117556 +                            U64 pledgedSrcSize,
117557 +                            ZSTD_buffered_policy_e zbuff)
117559 +    const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
117561 +    assert(!cdict->matchState.dedicatedDictSearch);
117563 +    DEBUGLOG(4, "copying dictionary into context");
117565 +    {   unsigned const windowLog = params.cParams.windowLog;
117566 +        assert(windowLog != 0);
117567 +        /* Copy only compression parameters related to tables. */
117568 +        params.cParams = *cdict_cParams;
117569 +        params.cParams.windowLog = windowLog;
117570 +        FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
117571 +                                                 ZSTDcrp_leaveDirty, zbuff), "");
117572 +        assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
117573 +        assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog);
117574 +        assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog);
117575 +    }
117577 +    ZSTD_cwksp_mark_tables_dirty(&cctx->workspace);
117579 +    /* copy tables */
117580 +    {   size_t const chainSize = (cdict_cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict_cParams->chainLog);
117581 +        size_t const hSize =  (size_t)1 << cdict_cParams->hashLog;
117583 +        ZSTD_memcpy(cctx->blockState.matchState.hashTable,
117584 +               cdict->matchState.hashTable,
117585 +               hSize * sizeof(U32));
117586 +        ZSTD_memcpy(cctx->blockState.matchState.chainTable,
117587 +               cdict->matchState.chainTable,
117588 +               chainSize * sizeof(U32));
117589 +    }
117591 +    /* Zero the hashTable3, since the cdict never fills it */
117592 +    {   int const h3log = cctx->blockState.matchState.hashLog3;
117593 +        size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
117594 +        assert(cdict->matchState.hashLog3 == 0);
117595 +        ZSTD_memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32));
117596 +    }
117598 +    ZSTD_cwksp_mark_tables_clean(&cctx->workspace);
117600 +    /* copy dictionary offsets */
117601 +    {   ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
117602 +        ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
117603 +        dstMatchState->window       = srcMatchState->window;
117604 +        dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
117605 +        dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
117606 +    }
117608 +    cctx->dictID = cdict->dictID;
117609 +    cctx->dictContentSize = cdict->dictContentSize;
117611 +    /* copy block state */
117612 +    ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
117614 +    return 0;
117617 +/* We have a choice between copying the dictionary context into the working
117618 + * context, or referencing the dictionary context from the working context
117619 + * in-place. We decide here which strategy to use. */
117620 +static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
117621 +                            const ZSTD_CDict* cdict,
117622 +                            const ZSTD_CCtx_params* params,
117623 +                            U64 pledgedSrcSize,
117624 +                            ZSTD_buffered_policy_e zbuff)
117627 +    DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)",
117628 +                (unsigned)pledgedSrcSize);
117630 +    if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) {
117631 +        return ZSTD_resetCCtx_byAttachingCDict(
117632 +            cctx, cdict, *params, pledgedSrcSize, zbuff);
117633 +    } else {
117634 +        return ZSTD_resetCCtx_byCopyingCDict(
117635 +            cctx, cdict, *params, pledgedSrcSize, zbuff);
117636 +    }
117639 +/*! ZSTD_copyCCtx_internal() :
117640 + *  Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
117641 + *  Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
117642 + *  The "context", in this case, refers to the hash and chain tables,
117643 + *  entropy tables, and dictionary references.
117644 + * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx.
117645 + * @return : 0, or an error code */
117646 +static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
117647 +                            const ZSTD_CCtx* srcCCtx,
117648 +                            ZSTD_frameParameters fParams,
117649 +                            U64 pledgedSrcSize,
117650 +                            ZSTD_buffered_policy_e zbuff)
117652 +    DEBUGLOG(5, "ZSTD_copyCCtx_internal");
117653 +    RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong,
117654 +                    "Can't copy a ctx that's not in init stage.");
117656 +    ZSTD_memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
117657 +    {   ZSTD_CCtx_params params = dstCCtx->requestedParams;
117658 +        /* Copy only compression parameters related to tables. */
117659 +        params.cParams = srcCCtx->appliedParams.cParams;
117660 +        params.fParams = fParams;
117661 +        ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize,
117662 +                                ZSTDcrp_leaveDirty, zbuff);
117663 +        assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog);
117664 +        assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy);
117665 +        assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog);
117666 +        assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog);
117667 +        assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3);
117668 +    }
117670 +    ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace);
117672 +    /* copy tables */
117673 +    {   size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog);
117674 +        size_t const hSize =  (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
117675 +        int const h3log = srcCCtx->blockState.matchState.hashLog3;
117676 +        size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
117678 +        ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable,
117679 +               srcCCtx->blockState.matchState.hashTable,
117680 +               hSize * sizeof(U32));
117681 +        ZSTD_memcpy(dstCCtx->blockState.matchState.chainTable,
117682 +               srcCCtx->blockState.matchState.chainTable,
117683 +               chainSize * sizeof(U32));
117684 +        ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable3,
117685 +               srcCCtx->blockState.matchState.hashTable3,
117686 +               h3Size * sizeof(U32));
117687 +    }
117689 +    ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace);
117691 +    /* copy dictionary offsets */
117692 +    {
117693 +        const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState;
117694 +        ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;
117695 +        dstMatchState->window       = srcMatchState->window;
117696 +        dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
117697 +        dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
117698 +    }
117699 +    dstCCtx->dictID = srcCCtx->dictID;
117700 +    dstCCtx->dictContentSize = srcCCtx->dictContentSize;
117702 +    /* copy block state */
117703 +    ZSTD_memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock));
117705 +    return 0;
117708 +/*! ZSTD_copyCCtx() :
117709 + *  Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
117710 + *  Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
117711 + *  pledgedSrcSize==0 means "unknown".
117712 +*   @return : 0, or an error code */
117713 +size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
117715 +    ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
117716 +    ZSTD_buffered_policy_e const zbuff = srcCCtx->bufferedPolicy;
117717 +    ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1);
117718 +    if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
117719 +    fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN);
117721 +    return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx,
117722 +                                fParams, pledgedSrcSize,
117723 +                                zbuff);
117727 +#define ZSTD_ROWSIZE 16
117728 +/*! ZSTD_reduceTable() :
117729 + *  reduce table indexes by `reducerValue`, or squash to zero.
117730 + *  PreserveMark preserves "unsorted mark" for btlazy2 strategy.
117731 + *  It must be set to a clear 0/1 value, to remove branch during inlining.
117732 + *  Presume table size is a multiple of ZSTD_ROWSIZE
117733 + *  to help auto-vectorization */
117734 +FORCE_INLINE_TEMPLATE void
117735 +ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark)
117737 +    int const nbRows = (int)size / ZSTD_ROWSIZE;
117738 +    int cellNb = 0;
117739 +    int rowNb;
117740 +    assert((size & (ZSTD_ROWSIZE-1)) == 0);  /* multiple of ZSTD_ROWSIZE */
117741 +    assert(size < (1U<<31));   /* can be casted to int */
117744 +    for (rowNb=0 ; rowNb < nbRows ; rowNb++) {
117745 +        int column;
117746 +        for (column=0; column<ZSTD_ROWSIZE; column++) {
117747 +            if (preserveMark) {
117748 +                U32 const adder = (table[cellNb] == ZSTD_DUBT_UNSORTED_MARK) ? reducerValue : 0;
117749 +                table[cellNb] += adder;
117750 +            }
117751 +            if (table[cellNb] < reducerValue) table[cellNb] = 0;
117752 +            else table[cellNb] -= reducerValue;
117753 +            cellNb++;
117754 +    }   }
117757 +static void ZSTD_reduceTable(U32* const table, U32 const size, U32 const reducerValue)
117759 +    ZSTD_reduceTable_internal(table, size, reducerValue, 0);
117762 +static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const reducerValue)
117764 +    ZSTD_reduceTable_internal(table, size, reducerValue, 1);
117767 +/*! ZSTD_reduceIndex() :
117768 +*   rescale all indexes to avoid future overflow (indexes are U32) */
117769 +static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue)
117771 +    {   U32 const hSize = (U32)1 << params->cParams.hashLog;
117772 +        ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);
117773 +    }
117775 +    if (params->cParams.strategy != ZSTD_fast) {
117776 +        U32 const chainSize = (U32)1 << params->cParams.chainLog;
117777 +        if (params->cParams.strategy == ZSTD_btlazy2)
117778 +            ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue);
117779 +        else
117780 +            ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue);
117781 +    }
117783 +    if (ms->hashLog3) {
117784 +        U32 const h3Size = (U32)1 << ms->hashLog3;
117785 +        ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue);
117786 +    }
117790 +/*-*******************************************************
117791 +*  Block entropic compression
117792 +*********************************************************/
117794 +/* See doc/zstd_compression_format.md for detailed format description */
117796 +void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
117798 +    const seqDef* const sequences = seqStorePtr->sequencesStart;
117799 +    BYTE* const llCodeTable = seqStorePtr->llCode;
117800 +    BYTE* const ofCodeTable = seqStorePtr->ofCode;
117801 +    BYTE* const mlCodeTable = seqStorePtr->mlCode;
117802 +    U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
117803 +    U32 u;
117804 +    assert(nbSeq <= seqStorePtr->maxNbSeq);
117805 +    for (u=0; u<nbSeq; u++) {
117806 +        U32 const llv = sequences[u].litLength;
117807 +        U32 const mlv = sequences[u].matchLength;
117808 +        llCodeTable[u] = (BYTE)ZSTD_LLcode(llv);
117809 +        ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
117810 +        mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv);
117811 +    }
117812 +    if (seqStorePtr->longLengthID==1)
117813 +        llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
117814 +    if (seqStorePtr->longLengthID==2)
117815 +        mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
117818 +/* ZSTD_useTargetCBlockSize():
117819 + * Returns if target compressed block size param is being used.
117820 + * If used, compression will do best effort to make a compressed block size to be around targetCBlockSize.
117821 + * Returns 1 if true, 0 otherwise. */
117822 +static int ZSTD_useTargetCBlockSize(const ZSTD_CCtx_params* cctxParams)
117824 +    DEBUGLOG(5, "ZSTD_useTargetCBlockSize (targetCBlockSize=%zu)", cctxParams->targetCBlockSize);
117825 +    return (cctxParams->targetCBlockSize != 0);
117828 +/* ZSTD_entropyCompressSequences_internal():
117829 + * actually compresses both literals and sequences */
117830 +MEM_STATIC size_t
117831 +ZSTD_entropyCompressSequences_internal(seqStore_t* seqStorePtr,
117832 +                          const ZSTD_entropyCTables_t* prevEntropy,
117833 +                                ZSTD_entropyCTables_t* nextEntropy,
117834 +                          const ZSTD_CCtx_params* cctxParams,
117835 +                                void* dst, size_t dstCapacity,
117836 +                                void* entropyWorkspace, size_t entropyWkspSize,
117837 +                          const int bmi2)
117839 +    const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
117840 +    ZSTD_strategy const strategy = cctxParams->cParams.strategy;
117841 +    unsigned* count = (unsigned*)entropyWorkspace;
117842 +    FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;
117843 +    FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;
117844 +    FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable;
117845 +    U32 LLtype, Offtype, MLtype;   /* compressed, raw or rle */
117846 +    const seqDef* const sequences = seqStorePtr->sequencesStart;
117847 +    const BYTE* const ofCodeTable = seqStorePtr->ofCode;
117848 +    const BYTE* const llCodeTable = seqStorePtr->llCode;
117849 +    const BYTE* const mlCodeTable = seqStorePtr->mlCode;
117850 +    BYTE* const ostart = (BYTE*)dst;
117851 +    BYTE* const oend = ostart + dstCapacity;
117852 +    BYTE* op = ostart;
117853 +    size_t const nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
117854 +    BYTE* seqHead;
117855 +    BYTE* lastNCount = NULL;
117857 +    entropyWorkspace = count + (MaxSeq + 1);
117858 +    entropyWkspSize -= (MaxSeq + 1) * sizeof(*count);
117860 +    DEBUGLOG(4, "ZSTD_entropyCompressSequences_internal (nbSeq=%zu)", nbSeq);
117861 +    ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
117862 +    assert(entropyWkspSize >= HUF_WORKSPACE_SIZE);
117864 +    /* Compress literals */
117865 +    {   const BYTE* const literals = seqStorePtr->litStart;
117866 +        size_t const litSize = (size_t)(seqStorePtr->lit - literals);
117867 +        size_t const cSize = ZSTD_compressLiterals(
117868 +                                    &prevEntropy->huf, &nextEntropy->huf,
117869 +                                    cctxParams->cParams.strategy,
117870 +                                    ZSTD_disableLiteralsCompression(cctxParams),
117871 +                                    op, dstCapacity,
117872 +                                    literals, litSize,
117873 +                                    entropyWorkspace, entropyWkspSize,
117874 +                                    bmi2);
117875 +        FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed");
117876 +        assert(cSize <= dstCapacity);
117877 +        op += cSize;
117878 +    }
117880 +    /* Sequences Header */
117881 +    RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
117882 +                    dstSize_tooSmall, "Can't fit seq hdr in output buf!");
117883 +    if (nbSeq < 128) {
117884 +        *op++ = (BYTE)nbSeq;
117885 +    } else if (nbSeq < LONGNBSEQ) {
117886 +        op[0] = (BYTE)((nbSeq>>8) + 0x80);
117887 +        op[1] = (BYTE)nbSeq;
117888 +        op+=2;
117889 +    } else {
117890 +        op[0]=0xFF;
117891 +        MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ));
117892 +        op+=3;
117893 +    }
117894 +    assert(op <= oend);
117895 +    if (nbSeq==0) {
117896 +        /* Copy the old tables over as if we repeated them */
117897 +        ZSTD_memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
117898 +        return (size_t)(op - ostart);
117899 +    }
117901 +    /* seqHead : flags for FSE encoding type */
117902 +    seqHead = op++;
117903 +    assert(op <= oend);
117905 +    /* convert length/distances into codes */
117906 +    ZSTD_seqToCodes(seqStorePtr);
117907 +    /* build CTable for Literal Lengths */
117908 +    {   unsigned max = MaxLL;
117909 +        size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize);   /* can't fail */
117910 +        DEBUGLOG(5, "Building LL table");
117911 +        nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode;
117912 +        LLtype = ZSTD_selectEncodingType(&nextEntropy->fse.litlength_repeatMode,
117913 +                                        count, max, mostFrequent, nbSeq,
117914 +                                        LLFSELog, prevEntropy->fse.litlengthCTable,
117915 +                                        LL_defaultNorm, LL_defaultNormLog,
117916 +                                        ZSTD_defaultAllowed, strategy);
117917 +        assert(set_basic < set_compressed && set_rle < set_compressed);
117918 +        assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
117919 +        {   size_t const countSize = ZSTD_buildCTable(
117920 +                op, (size_t)(oend - op),
117921 +                CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
117922 +                count, max, llCodeTable, nbSeq,
117923 +                LL_defaultNorm, LL_defaultNormLog, MaxLL,
117924 +                prevEntropy->fse.litlengthCTable,
117925 +                sizeof(prevEntropy->fse.litlengthCTable),
117926 +                entropyWorkspace, entropyWkspSize);
117927 +            FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for LitLens failed");
117928 +            if (LLtype == set_compressed)
117929 +                lastNCount = op;
117930 +            op += countSize;
117931 +            assert(op <= oend);
117932 +    }   }
117933 +    /* build CTable for Offsets */
117934 +    {   unsigned max = MaxOff;
117935 +        size_t const mostFrequent = HIST_countFast_wksp(
117936 +            count, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize);  /* can't fail */
117937 +        /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
117938 +        ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
117939 +        DEBUGLOG(5, "Building OF table");
117940 +        nextEntropy->fse.offcode_repeatMode = prevEntropy->fse.offcode_repeatMode;
117941 +        Offtype = ZSTD_selectEncodingType(&nextEntropy->fse.offcode_repeatMode,
117942 +                                        count, max, mostFrequent, nbSeq,
117943 +                                        OffFSELog, prevEntropy->fse.offcodeCTable,
117944 +                                        OF_defaultNorm, OF_defaultNormLog,
117945 +                                        defaultPolicy, strategy);
117946 +        assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
117947 +        {   size_t const countSize = ZSTD_buildCTable(
117948 +                op, (size_t)(oend - op),
117949 +                CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
117950 +                count, max, ofCodeTable, nbSeq,
117951 +                OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
117952 +                prevEntropy->fse.offcodeCTable,
117953 +                sizeof(prevEntropy->fse.offcodeCTable),
117954 +                entropyWorkspace, entropyWkspSize);
117955 +            FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for Offsets failed");
117956 +            if (Offtype == set_compressed)
117957 +                lastNCount = op;
117958 +            op += countSize;
117959 +            assert(op <= oend);
117960 +    }   }
117961 +    /* build CTable for MatchLengths */
117962 +    {   unsigned max = MaxML;
117963 +        size_t const mostFrequent = HIST_countFast_wksp(
117964 +            count, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize);   /* can't fail */
117965 +        DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
117966 +        nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode;
117967 +        MLtype = ZSTD_selectEncodingType(&nextEntropy->fse.matchlength_repeatMode,
117968 +                                        count, max, mostFrequent, nbSeq,
117969 +                                        MLFSELog, prevEntropy->fse.matchlengthCTable,
117970 +                                        ML_defaultNorm, ML_defaultNormLog,
117971 +                                        ZSTD_defaultAllowed, strategy);
117972 +        assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
117973 +        {   size_t const countSize = ZSTD_buildCTable(
117974 +                op, (size_t)(oend - op),
117975 +                CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
117976 +                count, max, mlCodeTable, nbSeq,
117977 +                ML_defaultNorm, ML_defaultNormLog, MaxML,
117978 +                prevEntropy->fse.matchlengthCTable,
117979 +                sizeof(prevEntropy->fse.matchlengthCTable),
117980 +                entropyWorkspace, entropyWkspSize);
117981 +            FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for MatchLengths failed");
117982 +            if (MLtype == set_compressed)
117983 +                lastNCount = op;
117984 +            op += countSize;
117985 +            assert(op <= oend);
117986 +    }   }
117988 +    *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
117990 +    {   size_t const bitstreamSize = ZSTD_encodeSequences(
117991 +                                        op, (size_t)(oend - op),
117992 +                                        CTable_MatchLength, mlCodeTable,
117993 +                                        CTable_OffsetBits, ofCodeTable,
117994 +                                        CTable_LitLength, llCodeTable,
117995 +                                        sequences, nbSeq,
117996 +                                        longOffsets, bmi2);
117997 +        FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed");
117998 +        op += bitstreamSize;
117999 +        assert(op <= oend);
118000 +        /* zstd versions <= 1.3.4 mistakenly report corruption when
118001 +         * FSE_readNCount() receives a buffer < 4 bytes.
118002 +         * Fixed by https://github.com/facebook/zstd/pull/1146.
118003 +         * This can happen when the last set_compressed table present is 2
118004 +         * bytes and the bitstream is only one byte.
118005 +         * In this exceedingly rare case, we will simply emit an uncompressed
118006 +         * block, since it isn't worth optimizing.
118007 +         */
118008 +        if (lastNCount && (op - lastNCount) < 4) {
118009 +            /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
118010 +            assert(op - lastNCount == 3);
118011 +            DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
118012 +                        "emitting an uncompressed block.");
118013 +            return 0;
118014 +        }
118015 +    }
118017 +    DEBUGLOG(5, "compressed block size : %u", (unsigned)(op - ostart));
118018 +    return (size_t)(op - ostart);
118021 +MEM_STATIC size_t
118022 +ZSTD_entropyCompressSequences(seqStore_t* seqStorePtr,
118023 +                       const ZSTD_entropyCTables_t* prevEntropy,
118024 +                             ZSTD_entropyCTables_t* nextEntropy,
118025 +                       const ZSTD_CCtx_params* cctxParams,
118026 +                             void* dst, size_t dstCapacity,
118027 +                             size_t srcSize,
118028 +                             void* entropyWorkspace, size_t entropyWkspSize,
118029 +                             int bmi2)
118031 +    size_t const cSize = ZSTD_entropyCompressSequences_internal(
118032 +                            seqStorePtr, prevEntropy, nextEntropy, cctxParams,
118033 +                            dst, dstCapacity,
118034 +                            entropyWorkspace, entropyWkspSize, bmi2);
118035 +    if (cSize == 0) return 0;
118036 +    /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
118037 +     * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
118038 +     */
118039 +    if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))
118040 +        return 0;  /* block not compressed */
118041 +    FORWARD_IF_ERROR(cSize, "ZSTD_entropyCompressSequences_internal failed");
118043 +    /* Check compressibility */
118044 +    {   size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
118045 +        if (cSize >= maxCSize) return 0;  /* block not compressed */
118046 +    }
118047 +    DEBUGLOG(4, "ZSTD_entropyCompressSequences() cSize: %zu\n", cSize);
118048 +    return cSize;
118051 +/* ZSTD_selectBlockCompressor() :
118052 + * Not static, but internal use only (used by long distance matcher)
118053 + * assumption : strat is a valid strategy */
118054 +ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode)
118056 +    static const ZSTD_blockCompressor blockCompressor[4][ZSTD_STRATEGY_MAX+1] = {
118057 +        { ZSTD_compressBlock_fast  /* default for 0 */,
118058 +          ZSTD_compressBlock_fast,
118059 +          ZSTD_compressBlock_doubleFast,
118060 +          ZSTD_compressBlock_greedy,
118061 +          ZSTD_compressBlock_lazy,
118062 +          ZSTD_compressBlock_lazy2,
118063 +          ZSTD_compressBlock_btlazy2,
118064 +          ZSTD_compressBlock_btopt,
118065 +          ZSTD_compressBlock_btultra,
118066 +          ZSTD_compressBlock_btultra2 },
118067 +        { ZSTD_compressBlock_fast_extDict  /* default for 0 */,
118068 +          ZSTD_compressBlock_fast_extDict,
118069 +          ZSTD_compressBlock_doubleFast_extDict,
118070 +          ZSTD_compressBlock_greedy_extDict,
118071 +          ZSTD_compressBlock_lazy_extDict,
118072 +          ZSTD_compressBlock_lazy2_extDict,
118073 +          ZSTD_compressBlock_btlazy2_extDict,
118074 +          ZSTD_compressBlock_btopt_extDict,
118075 +          ZSTD_compressBlock_btultra_extDict,
118076 +          ZSTD_compressBlock_btultra_extDict },
118077 +        { ZSTD_compressBlock_fast_dictMatchState  /* default for 0 */,
118078 +          ZSTD_compressBlock_fast_dictMatchState,
118079 +          ZSTD_compressBlock_doubleFast_dictMatchState,
118080 +          ZSTD_compressBlock_greedy_dictMatchState,
118081 +          ZSTD_compressBlock_lazy_dictMatchState,
118082 +          ZSTD_compressBlock_lazy2_dictMatchState,
118083 +          ZSTD_compressBlock_btlazy2_dictMatchState,
118084 +          ZSTD_compressBlock_btopt_dictMatchState,
118085 +          ZSTD_compressBlock_btultra_dictMatchState,
118086 +          ZSTD_compressBlock_btultra_dictMatchState },
118087 +        { NULL  /* default for 0 */,
118088 +          NULL,
118089 +          NULL,
118090 +          ZSTD_compressBlock_greedy_dedicatedDictSearch,
118091 +          ZSTD_compressBlock_lazy_dedicatedDictSearch,
118092 +          ZSTD_compressBlock_lazy2_dedicatedDictSearch,
118093 +          NULL,
118094 +          NULL,
118095 +          NULL,
118096 +          NULL }
118097 +    };
118098 +    ZSTD_blockCompressor selectedCompressor;
118099 +    ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
118101 +    assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
118102 +    selectedCompressor = blockCompressor[(int)dictMode][(int)strat];
118103 +    assert(selectedCompressor != NULL);
118104 +    return selectedCompressor;
118107 +static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
118108 +                                   const BYTE* anchor, size_t lastLLSize)
118110 +    ZSTD_memcpy(seqStorePtr->lit, anchor, lastLLSize);
118111 +    seqStorePtr->lit += lastLLSize;
118114 +void ZSTD_resetSeqStore(seqStore_t* ssPtr)
118116 +    ssPtr->lit = ssPtr->litStart;
118117 +    ssPtr->sequences = ssPtr->sequencesStart;
118118 +    ssPtr->longLengthID = 0;
118121 +typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e;
118123 +static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
118125 +    ZSTD_matchState_t* const ms = &zc->blockState.matchState;
118126 +    DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize);
118127 +    assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
118128 +    /* Assert that we have correctly flushed the ctx params into the ms's copy */
118129 +    ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams);
118130 +    if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
118131 +        if (zc->appliedParams.cParams.strategy >= ZSTD_btopt) {
118132 +            ZSTD_ldm_skipRawSeqStoreBytes(&zc->externSeqStore, srcSize);
118133 +        } else {
118134 +            ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch);
118135 +        }
118136 +        return ZSTDbss_noCompress; /* don't even attempt compression below a certain srcSize */
118137 +    }
118138 +    ZSTD_resetSeqStore(&(zc->seqStore));
118139 +    /* required for optimal parser to read stats from dictionary */
118140 +    ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy;
118141 +    /* tell the optimal parser how we expect to compress literals */
118142 +    ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode;
118143 +    /* a gap between an attached dict and the current window is not safe,
118144 +     * they must remain adjacent,
118145 +     * and when that stops being the case, the dict must be unset */
118146 +    assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit);
118148 +    /* limited update after a very long match */
118149 +    {   const BYTE* const base = ms->window.base;
118150 +        const BYTE* const istart = (const BYTE*)src;
118151 +        const U32 curr = (U32)(istart-base);
118152 +        if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1));   /* ensure no overflow */
118153 +        if (curr > ms->nextToUpdate + 384)
118154 +            ms->nextToUpdate = curr - MIN(192, (U32)(curr - ms->nextToUpdate - 384));
118155 +    }
118157 +    /* select and store sequences */
118158 +    {   ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms);
118159 +        size_t lastLLSize;
118160 +        {   int i;
118161 +            for (i = 0; i < ZSTD_REP_NUM; ++i)
118162 +                zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i];
118163 +        }
118164 +        if (zc->externSeqStore.pos < zc->externSeqStore.size) {
118165 +            assert(!zc->appliedParams.ldmParams.enableLdm);
118166 +            /* Updates ldmSeqStore.pos */
118167 +            lastLLSize =
118168 +                ZSTD_ldm_blockCompress(&zc->externSeqStore,
118169 +                                       ms, &zc->seqStore,
118170 +                                       zc->blockState.nextCBlock->rep,
118171 +                                       src, srcSize);
118172 +            assert(zc->externSeqStore.pos <= zc->externSeqStore.size);
118173 +        } else if (zc->appliedParams.ldmParams.enableLdm) {
118174 +            rawSeqStore_t ldmSeqStore = kNullRawSeqStore;
118176 +            ldmSeqStore.seq = zc->ldmSequences;
118177 +            ldmSeqStore.capacity = zc->maxNbLdmSequences;
118178 +            /* Updates ldmSeqStore.size */
118179 +            FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
118180 +                                               &zc->appliedParams.ldmParams,
118181 +                                               src, srcSize), "");
118182 +            /* Updates ldmSeqStore.pos */
118183 +            lastLLSize =
118184 +                ZSTD_ldm_blockCompress(&ldmSeqStore,
118185 +                                       ms, &zc->seqStore,
118186 +                                       zc->blockState.nextCBlock->rep,
118187 +                                       src, srcSize);
118188 +            assert(ldmSeqStore.pos == ldmSeqStore.size);
118189 +        } else {   /* not long range mode */
118190 +            ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, dictMode);
118191 +            ms->ldmSeqStore = NULL;
118192 +            lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
118193 +        }
118194 +        {   const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
118195 +            ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);
118196 +    }   }
118197 +    return ZSTDbss_compress;
118200 +static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
118202 +    const seqStore_t* seqStore = ZSTD_getSeqStore(zc);
118203 +    const seqDef* seqStoreSeqs = seqStore->sequencesStart;
118204 +    size_t seqStoreSeqSize = seqStore->sequences - seqStoreSeqs;
118205 +    size_t seqStoreLiteralsSize = (size_t)(seqStore->lit - seqStore->litStart);
118206 +    size_t literalsRead = 0;
118207 +    size_t lastLLSize;
118209 +    ZSTD_Sequence* outSeqs = &zc->seqCollector.seqStart[zc->seqCollector.seqIndex];
118210 +    size_t i;
118211 +    repcodes_t updatedRepcodes;
118213 +    assert(zc->seqCollector.seqIndex + 1 < zc->seqCollector.maxSequences);
118214 +    /* Ensure we have enough space for last literals "sequence" */
118215 +    assert(zc->seqCollector.maxSequences >= seqStoreSeqSize + 1);
118216 +    ZSTD_memcpy(updatedRepcodes.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
118217 +    for (i = 0; i < seqStoreSeqSize; ++i) {
118218 +        U32 rawOffset = seqStoreSeqs[i].offset - ZSTD_REP_NUM;
118219 +        outSeqs[i].litLength = seqStoreSeqs[i].litLength;
118220 +        outSeqs[i].matchLength = seqStoreSeqs[i].matchLength + MINMATCH;
118221 +        outSeqs[i].rep = 0;
118223 +        if (i == seqStore->longLengthPos) {
118224 +            if (seqStore->longLengthID == 1) {
118225 +                outSeqs[i].litLength += 0x10000;
118226 +            } else if (seqStore->longLengthID == 2) {
118227 +                outSeqs[i].matchLength += 0x10000;
118228 +            }
118229 +        }
118231 +        if (seqStoreSeqs[i].offset <= ZSTD_REP_NUM) {
118232 +            /* Derive the correct offset corresponding to a repcode */
118233 +            outSeqs[i].rep = seqStoreSeqs[i].offset;
118234 +            if (outSeqs[i].litLength != 0) {
118235 +                rawOffset = updatedRepcodes.rep[outSeqs[i].rep - 1];
118236 +            } else {
118237 +                if (outSeqs[i].rep == 3) {
118238 +                    rawOffset = updatedRepcodes.rep[0] - 1;
118239 +                } else {
118240 +                    rawOffset = updatedRepcodes.rep[outSeqs[i].rep];
118241 +                }
118242 +            }
118243 +        }
118244 +        outSeqs[i].offset = rawOffset;
118245 +        /* seqStoreSeqs[i].offset == offCode+1, and ZSTD_updateRep() expects offCode
118246 +           so we provide seqStoreSeqs[i].offset - 1 */
118247 +        updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep,
118248 +                                         seqStoreSeqs[i].offset - 1,
118249 +                                         seqStoreSeqs[i].litLength == 0);
118250 +        literalsRead += outSeqs[i].litLength;
118251 +    }
118252 +    /* Insert last literals (if any exist) in the block as a sequence with ml == off == 0.
118253 +     * If there are no last literals, then we'll emit (of: 0, ml: 0, ll: 0), which is a marker
118254 +     * for the block boundary, according to the API.
118255 +     */
118256 +    assert(seqStoreLiteralsSize >= literalsRead);
118257 +    lastLLSize = seqStoreLiteralsSize - literalsRead;
118258 +    outSeqs[i].litLength = (U32)lastLLSize;
118259 +    outSeqs[i].matchLength = outSeqs[i].offset = outSeqs[i].rep = 0;
118260 +    seqStoreSeqSize++;
118261 +    zc->seqCollector.seqIndex += seqStoreSeqSize;
118264 +size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
118265 +                              size_t outSeqsSize, const void* src, size_t srcSize)
118267 +    const size_t dstCapacity = ZSTD_compressBound(srcSize);
118268 +    void* dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem);
118269 +    SeqCollector seqCollector;
118271 +    RETURN_ERROR_IF(dst == NULL, memory_allocation, "NULL pointer!");
118273 +    seqCollector.collectSequences = 1;
118274 +    seqCollector.seqStart = outSeqs;
118275 +    seqCollector.seqIndex = 0;
118276 +    seqCollector.maxSequences = outSeqsSize;
118277 +    zc->seqCollector = seqCollector;
118279 +    ZSTD_compress2(zc, dst, dstCapacity, src, srcSize);
118280 +    ZSTD_customFree(dst, ZSTD_defaultCMem);
118281 +    return zc->seqCollector.seqIndex;
118284 +size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize) {
118285 +    size_t in = 0;
118286 +    size_t out = 0;
118287 +    for (; in < seqsSize; ++in) {
118288 +        if (sequences[in].offset == 0 && sequences[in].matchLength == 0) {
118289 +            if (in != seqsSize - 1) {
118290 +                sequences[in+1].litLength += sequences[in].litLength;
118291 +            }
118292 +        } else {
118293 +            sequences[out] = sequences[in];
118294 +            ++out;
118295 +        }
118296 +    }
118297 +    return out;
118300 +/* Unrolled loop to read four size_ts of input at a time. Returns 1 if is RLE, 0 if not. */
118301 +static int ZSTD_isRLE(const BYTE* src, size_t length) {
118302 +    const BYTE* ip = src;
118303 +    const BYTE value = ip[0];
118304 +    const size_t valueST = (size_t)((U64)value * 0x0101010101010101ULL);
118305 +    const size_t unrollSize = sizeof(size_t) * 4;
118306 +    const size_t unrollMask = unrollSize - 1;
118307 +    const size_t prefixLength = length & unrollMask;
118308 +    size_t i;
118309 +    size_t u;
118310 +    if (length == 1) return 1;
118311 +    /* Check if prefix is RLE first before using unrolled loop */
118312 +    if (prefixLength && ZSTD_count(ip+1, ip, ip+prefixLength) != prefixLength-1) {
118313 +        return 0;
118314 +    }
118315 +    for (i = prefixLength; i != length; i += unrollSize) {
118316 +        for (u = 0; u < unrollSize; u += sizeof(size_t)) {
118317 +            if (MEM_readST(ip + i + u) != valueST) {
118318 +                return 0;
118319 +            }
118320 +        }
118321 +    }
118322 +    return 1;
118325 +/* Returns true if the given block may be RLE.
118326 + * This is just a heuristic based on the compressibility.
118327 + * It may return both false positives and false negatives.
118328 + */
118329 +static int ZSTD_maybeRLE(seqStore_t const* seqStore)
118331 +    size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart);
118332 +    size_t const nbLits = (size_t)(seqStore->lit - seqStore->litStart);
118334 +    return nbSeqs < 4 && nbLits < 10;
118337 +static void ZSTD_confirmRepcodesAndEntropyTables(ZSTD_CCtx* zc)
118339 +    ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock;
118340 +    zc->blockState.prevCBlock = zc->blockState.nextCBlock;
118341 +    zc->blockState.nextCBlock = tmp;
118344 +static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
118345 +                                        void* dst, size_t dstCapacity,
118346 +                                        const void* src, size_t srcSize, U32 frame)
118348 +    /* This the upper bound for the length of an rle block.
118349 +     * This isn't the actual upper bound. Finding the real threshold
118350 +     * needs further investigation.
118351 +     */
118352 +    const U32 rleMaxLength = 25;
118353 +    size_t cSize;
118354 +    const BYTE* ip = (const BYTE*)src;
118355 +    BYTE* op = (BYTE*)dst;
118356 +    DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
118357 +                (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,
118358 +                (unsigned)zc->blockState.matchState.nextToUpdate);
118360 +    {   const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
118361 +        FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
118362 +        if (bss == ZSTDbss_noCompress) { cSize = 0; goto out; }
118363 +    }
118365 +    if (zc->seqCollector.collectSequences) {
118366 +        ZSTD_copyBlockSequences(zc);
118367 +        ZSTD_confirmRepcodesAndEntropyTables(zc);
118368 +        return 0;
118369 +    }
118371 +    /* encode sequences and literals */
118372 +    cSize = ZSTD_entropyCompressSequences(&zc->seqStore,
118373 +            &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
118374 +            &zc->appliedParams,
118375 +            dst, dstCapacity,
118376 +            srcSize,
118377 +            zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
118378 +            zc->bmi2);
118380 +    if (zc->seqCollector.collectSequences) {
118381 +        ZSTD_copyBlockSequences(zc);
118382 +        return 0;
118383 +    }
118386 +    if (frame &&
118387 +        /* We don't want to emit our first block as a RLE even if it qualifies because
118388 +         * doing so will cause the decoder (cli only) to throw a "should consume all input error."
118389 +         * This is only an issue for zstd <= v1.4.3
118390 +         */
118391 +        !zc->isFirstBlock &&
118392 +        cSize < rleMaxLength &&
118393 +        ZSTD_isRLE(ip, srcSize))
118394 +    {
118395 +        cSize = 1;
118396 +        op[0] = ip[0];
118397 +    }
118399 +out:
118400 +    if (!ZSTD_isError(cSize) && cSize > 1) {
118401 +        ZSTD_confirmRepcodesAndEntropyTables(zc);
118402 +    }
118403 +    /* We check that dictionaries have offset codes available for the first
118404 +     * block. After the first block, the offcode table might not have large
118405 +     * enough codes to represent the offsets in the data.
118406 +     */
118407 +    if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
118408 +        zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
118410 +    return cSize;
118413 +static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc,
118414 +                               void* dst, size_t dstCapacity,
118415 +                               const void* src, size_t srcSize,
118416 +                               const size_t bss, U32 lastBlock)
118418 +    DEBUGLOG(6, "Attempting ZSTD_compressSuperBlock()");
118419 +    if (bss == ZSTDbss_compress) {
118420 +        if (/* We don't want to emit our first block as a RLE even if it qualifies because
118421 +            * doing so will cause the decoder (cli only) to throw a "should consume all input error."
118422 +            * This is only an issue for zstd <= v1.4.3
118423 +            */
118424 +            !zc->isFirstBlock &&
118425 +            ZSTD_maybeRLE(&zc->seqStore) &&
118426 +            ZSTD_isRLE((BYTE const*)src, srcSize))
118427 +        {
118428 +            return ZSTD_rleCompressBlock(dst, dstCapacity, *(BYTE const*)src, srcSize, lastBlock);
118429 +        }
118430 +        /* Attempt superblock compression.
118431 +         *
118432 +         * Note that compressed size of ZSTD_compressSuperBlock() is not bound by the
118433 +         * standard ZSTD_compressBound(). This is a problem, because even if we have
118434 +         * space now, taking an extra byte now could cause us to run out of space later
118435 +         * and violate ZSTD_compressBound().
118436 +         *
118437 +         * Define blockBound(blockSize) = blockSize + ZSTD_blockHeaderSize.
118438 +         *
118439 +         * In order to respect ZSTD_compressBound() we must attempt to emit a raw
118440 +         * uncompressed block in these cases:
118441 +         *   * cSize == 0: Return code for an uncompressed block.
118442 +         *   * cSize == dstSize_tooSmall: We may have expanded beyond blockBound(srcSize).
118443 +         *     ZSTD_noCompressBlock() will return dstSize_tooSmall if we are really out of
118444 +         *     output space.
118445 +         *   * cSize >= blockBound(srcSize): We have expanded the block too much so
118446 +         *     emit an uncompressed block.
118447 +         */
118448 +        {
118449 +            size_t const cSize = ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock);
118450 +            if (cSize != ERROR(dstSize_tooSmall)) {
118451 +                size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy);
118452 +                FORWARD_IF_ERROR(cSize, "ZSTD_compressSuperBlock failed");
118453 +                if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) {
118454 +                    ZSTD_confirmRepcodesAndEntropyTables(zc);
118455 +                    return cSize;
118456 +                }
118457 +            }
118458 +        }
118459 +    }
118461 +    DEBUGLOG(6, "Resorting to ZSTD_noCompressBlock()");
118462 +    /* Superblock compression failed, attempt to emit a single no compress block.
118463 +     * The decoder will be able to stream this block since it is uncompressed.
118464 +     */
118465 +    return ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock);
118468 +static size_t ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx* zc,
118469 +                               void* dst, size_t dstCapacity,
118470 +                               const void* src, size_t srcSize,
118471 +                               U32 lastBlock)
118473 +    size_t cSize = 0;
118474 +    const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
118475 +    DEBUGLOG(5, "ZSTD_compressBlock_targetCBlockSize (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u, srcSize=%zu)",
118476 +                (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate, srcSize);
118477 +    FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
118479 +    cSize = ZSTD_compressBlock_targetCBlockSize_body(zc, dst, dstCapacity, src, srcSize, bss, lastBlock);
118480 +    FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize_body failed");
118482 +    if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
118483 +        zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
118485 +    return cSize;
118488 +static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,
118489 +                                         ZSTD_cwksp* ws,
118490 +                                         ZSTD_CCtx_params const* params,
118491 +                                         void const* ip,
118492 +                                         void const* iend)
118494 +    if (ZSTD_window_needOverflowCorrection(ms->window, iend)) {
118495 +        U32 const maxDist = (U32)1 << params->cParams.windowLog;
118496 +        U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy);
118497 +        U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip);
118498 +        ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
118499 +        ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
118500 +        ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
118501 +        ZSTD_cwksp_mark_tables_dirty(ws);
118502 +        ZSTD_reduceIndex(ms, params, correction);
118503 +        ZSTD_cwksp_mark_tables_clean(ws);
118504 +        if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
118505 +        else ms->nextToUpdate -= correction;
118506 +        /* invalidate dictionaries on overflow correction */
118507 +        ms->loadedDictEnd = 0;
118508 +        ms->dictMatchState = NULL;
118509 +    }
118512 +/*! ZSTD_compress_frameChunk() :
118513 +*   Compress a chunk of data into one or multiple blocks.
118514 +*   All blocks will be terminated, all input will be consumed.
118515 +*   Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
118516 +*   Frame is supposed already started (header already produced)
118517 +*   @return : compressed size, or an error code
118519 +static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
118520 +                                     void* dst, size_t dstCapacity,
118521 +                               const void* src, size_t srcSize,
118522 +                                     U32 lastFrameChunk)
118524 +    size_t blockSize = cctx->blockSize;
118525 +    size_t remaining = srcSize;
118526 +    const BYTE* ip = (const BYTE*)src;
118527 +    BYTE* const ostart = (BYTE*)dst;
118528 +    BYTE* op = ostart;
118529 +    U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
118531 +    assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX);
118533 +    DEBUGLOG(4, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize);
118534 +    if (cctx->appliedParams.fParams.checksumFlag && srcSize)
118535 +        xxh64_update(&cctx->xxhState, src, srcSize);
118537 +    while (remaining) {
118538 +        ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
118539 +        U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
118541 +        RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE,
118542 +                        dstSize_tooSmall,
118543 +                        "not enough space to store compressed block");
118544 +        if (remaining < blockSize) blockSize = remaining;
118546 +        ZSTD_overflowCorrectIfNeeded(
118547 +            ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize);
118548 +        ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
118550 +        /* Ensure hash/chain table insertion resumes no sooner than lowlimit */
118551 +        if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit;
118553 +        {   size_t cSize;
118554 +            if (ZSTD_useTargetCBlockSize(&cctx->appliedParams)) {
118555 +                cSize = ZSTD_compressBlock_targetCBlockSize(cctx, op, dstCapacity, ip, blockSize, lastBlock);
118556 +                FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize failed");
118557 +                assert(cSize > 0);
118558 +                assert(cSize <= blockSize + ZSTD_blockHeaderSize);
118559 +            } else {
118560 +                cSize = ZSTD_compressBlock_internal(cctx,
118561 +                                        op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,
118562 +                                        ip, blockSize, 1 /* frame */);
118563 +                FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_internal failed");
118565 +                if (cSize == 0) {  /* block is not compressible */
118566 +                    cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
118567 +                    FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
118568 +                } else {
118569 +                    U32 const cBlockHeader = cSize == 1 ?
118570 +                        lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) :
118571 +                        lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
118572 +                    MEM_writeLE24(op, cBlockHeader);
118573 +                    cSize += ZSTD_blockHeaderSize;
118574 +                }
118575 +            }
118578 +            ip += blockSize;
118579 +            assert(remaining >= blockSize);
118580 +            remaining -= blockSize;
118581 +            op += cSize;
118582 +            assert(dstCapacity >= cSize);
118583 +            dstCapacity -= cSize;
118584 +            cctx->isFirstBlock = 0;
118585 +            DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u",
118586 +                        (unsigned)cSize);
118587 +    }   }
118589 +    if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
118590 +    return (size_t)(op-ostart);
118594 +static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
118595 +                                    const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID)
118596 +{   BYTE* const op = (BYTE*)dst;
118597 +    U32   const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536);   /* 0-3 */
118598 +    U32   const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength;   /* 0-3 */
118599 +    U32   const checksumFlag = params->fParams.checksumFlag>0;
118600 +    U32   const windowSize = (U32)1 << params->cParams.windowLog;
118601 +    U32   const singleSegment = params->fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
118602 +    BYTE  const windowLogByte = (BYTE)((params->cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
118603 +    U32   const fcsCode = params->fParams.contentSizeFlag ?
118604 +                     (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0;  /* 0-3 */
118605 +    BYTE  const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
118606 +    size_t pos=0;
118608 +    assert(!(params->fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
118609 +    RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall,
118610 +                    "dst buf is too small to fit worst-case frame header size.");
118611 +    DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
118612 +                !params->fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
118613 +    if (params->format == ZSTD_f_zstd1) {
118614 +        MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
118615 +        pos = 4;
118616 +    }
118617 +    op[pos++] = frameHeaderDescriptionByte;
118618 +    if (!singleSegment) op[pos++] = windowLogByte;
118619 +    switch(dictIDSizeCode)
118620 +    {
118621 +        default:  assert(0); /* impossible */
118622 +        case 0 : break;
118623 +        case 1 : op[pos] = (BYTE)(dictID); pos++; break;
118624 +        case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break;
118625 +        case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break;
118626 +    }
118627 +    switch(fcsCode)
118628 +    {
118629 +        default:  assert(0); /* impossible */
118630 +        case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break;
118631 +        case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break;
118632 +        case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break;
118633 +        case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break;
118634 +    }
118635 +    return pos;
118638 +/* ZSTD_writeSkippableFrame_advanced() :
118639 + * Writes out a skippable frame with the specified magic number variant (16 are supported),
118640 + * from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15, and the desired source data.
118642 + * Returns the total number of bytes written, or a ZSTD error code.
118643 + */
118644 +size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
118645 +                                const void* src, size_t srcSize, unsigned magicVariant) {
118646 +    BYTE* op = (BYTE*)dst;
118647 +    RETURN_ERROR_IF(dstCapacity < srcSize + ZSTD_SKIPPABLEHEADERSIZE /* Skippable frame overhead */,
118648 +                    dstSize_tooSmall, "Not enough room for skippable frame");
118649 +    RETURN_ERROR_IF(srcSize > (unsigned)0xFFFFFFFF, srcSize_wrong, "Src size too large for skippable frame");
118650 +    RETURN_ERROR_IF(magicVariant > 15, parameter_outOfBound, "Skippable frame magic number variant not supported");
118652 +    MEM_writeLE32(op, (U32)(ZSTD_MAGIC_SKIPPABLE_START + magicVariant));
118653 +    MEM_writeLE32(op+4, (U32)srcSize);
118654 +    ZSTD_memcpy(op+8, src, srcSize);
118655 +    return srcSize + ZSTD_SKIPPABLEHEADERSIZE;
118658 +/* ZSTD_writeLastEmptyBlock() :
118659 + * output an empty Block with end-of-frame mark to complete a frame
118660 + * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
118661 + *           or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
118662 + */
118663 +size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
118665 +    RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall,
118666 +                    "dst buf is too small to write frame trailer empty block.");
118667 +    {   U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1);  /* 0 size */
118668 +        MEM_writeLE24(dst, cBlockHeader24);
118669 +        return ZSTD_blockHeaderSize;
118670 +    }
118673 +size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
118675 +    RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong,
118676 +                    "wrong cctx stage");
118677 +    RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm,
118678 +                    parameter_unsupported,
118679 +                    "incompatible with ldm");
118680 +    cctx->externSeqStore.seq = seq;
118681 +    cctx->externSeqStore.size = nbSeq;
118682 +    cctx->externSeqStore.capacity = nbSeq;
118683 +    cctx->externSeqStore.pos = 0;
118684 +    cctx->externSeqStore.posInSequence = 0;
118685 +    return 0;
118689 +static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
118690 +                              void* dst, size_t dstCapacity,
118691 +                        const void* src, size_t srcSize,
118692 +                               U32 frame, U32 lastFrameChunk)
118694 +    ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
118695 +    size_t fhSize = 0;
118697 +    DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
118698 +                cctx->stage, (unsigned)srcSize);
118699 +    RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong,
118700 +                    "missing init (ZSTD_compressBegin)");
118702 +    if (frame && (cctx->stage==ZSTDcs_init)) {
118703 +        fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams,
118704 +                                       cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
118705 +        FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
118706 +        assert(fhSize <= dstCapacity);
118707 +        dstCapacity -= fhSize;
118708 +        dst = (char*)dst + fhSize;
118709 +        cctx->stage = ZSTDcs_ongoing;
118710 +    }
118712 +    if (!srcSize) return fhSize;  /* do not generate an empty block if no input */
118714 +    if (!ZSTD_window_update(&ms->window, src, srcSize)) {
118715 +        ms->nextToUpdate = ms->window.dictLimit;
118716 +    }
118717 +    if (cctx->appliedParams.ldmParams.enableLdm) {
118718 +        ZSTD_window_update(&cctx->ldmState.window, src, srcSize);
118719 +    }
118721 +    if (!frame) {
118722 +        /* overflow check and correction for block mode */
118723 +        ZSTD_overflowCorrectIfNeeded(
118724 +            ms, &cctx->workspace, &cctx->appliedParams,
118725 +            src, (BYTE const*)src + srcSize);
118726 +    }
118728 +    DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize);
118729 +    {   size_t const cSize = frame ?
118730 +                             ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
118731 +                             ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */);
118732 +        FORWARD_IF_ERROR(cSize, "%s", frame ? "ZSTD_compress_frameChunk failed" : "ZSTD_compressBlock_internal failed");
118733 +        cctx->consumedSrcSize += srcSize;
118734 +        cctx->producedCSize += (cSize + fhSize);
118735 +        assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
118736 +        if (cctx->pledgedSrcSizePlusOne != 0) {  /* control src size */
118737 +            ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
118738 +            RETURN_ERROR_IF(
118739 +                cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne,
118740 +                srcSize_wrong,
118741 +                "error : pledgedSrcSize = %u, while realSrcSize >= %u",
118742 +                (unsigned)cctx->pledgedSrcSizePlusOne-1,
118743 +                (unsigned)cctx->consumedSrcSize);
118744 +        }
118745 +        return cSize + fhSize;
118746 +    }
118749 +size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
118750 +                              void* dst, size_t dstCapacity,
118751 +                        const void* src, size_t srcSize)
118753 +    DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize);
118754 +    return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
118758 +size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
118760 +    ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams;
118761 +    assert(!ZSTD_checkCParams(cParams));
118762 +    return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog);
118765 +size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
118767 +    DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize);
118768 +    { size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
118769 +      RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong, "input is larger than a block"); }
118771 +    return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
118774 +/*! ZSTD_loadDictionaryContent() :
118775 + *  @return : 0, or an error code
118776 + */
118777 +static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
118778 +                                         ldmState_t* ls,
118779 +                                         ZSTD_cwksp* ws,
118780 +                                         ZSTD_CCtx_params const* params,
118781 +                                         const void* src, size_t srcSize,
118782 +                                         ZSTD_dictTableLoadMethod_e dtlm)
118784 +    const BYTE* ip = (const BYTE*) src;
118785 +    const BYTE* const iend = ip + srcSize;
118787 +    ZSTD_window_update(&ms->window, src, srcSize);
118788 +    ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
118790 +    if (params->ldmParams.enableLdm && ls != NULL) {
118791 +        ZSTD_window_update(&ls->window, src, srcSize);
118792 +        ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base);
118793 +    }
118795 +    /* Assert that we the ms params match the params we're being given */
118796 +    ZSTD_assertEqualCParams(params->cParams, ms->cParams);
118798 +    if (srcSize <= HASH_READ_SIZE) return 0;
118800 +    while (iend - ip > HASH_READ_SIZE) {
118801 +        size_t const remaining = (size_t)(iend - ip);
118802 +        size_t const chunk = MIN(remaining, ZSTD_CHUNKSIZE_MAX);
118803 +        const BYTE* const ichunk = ip + chunk;
118805 +        ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, ichunk);
118807 +        if (params->ldmParams.enableLdm && ls != NULL)
118808 +            ZSTD_ldm_fillHashTable(ls, (const BYTE*)src, (const BYTE*)src + srcSize, &params->ldmParams);
118810 +        switch(params->cParams.strategy)
118811 +        {
118812 +        case ZSTD_fast:
118813 +            ZSTD_fillHashTable(ms, ichunk, dtlm);
118814 +            break;
118815 +        case ZSTD_dfast:
118816 +            ZSTD_fillDoubleHashTable(ms, ichunk, dtlm);
118817 +            break;
118819 +        case ZSTD_greedy:
118820 +        case ZSTD_lazy:
118821 +        case ZSTD_lazy2:
118822 +            if (chunk >= HASH_READ_SIZE && ms->dedicatedDictSearch) {
118823 +                assert(chunk == remaining); /* must load everything in one go */
118824 +                ZSTD_dedicatedDictSearch_lazy_loadDictionary(ms, ichunk-HASH_READ_SIZE);
118825 +            } else if (chunk >= HASH_READ_SIZE) {
118826 +                ZSTD_insertAndFindFirstIndex(ms, ichunk-HASH_READ_SIZE);
118827 +            }
118828 +            break;
118830 +        case ZSTD_btlazy2:   /* we want the dictionary table fully sorted */
118831 +        case ZSTD_btopt:
118832 +        case ZSTD_btultra:
118833 +        case ZSTD_btultra2:
118834 +            if (chunk >= HASH_READ_SIZE)
118835 +                ZSTD_updateTree(ms, ichunk-HASH_READ_SIZE, ichunk);
118836 +            break;
118838 +        default:
118839 +            assert(0);  /* not possible : not a valid strategy id */
118840 +        }
118842 +        ip = ichunk;
118843 +    }
118845 +    ms->nextToUpdate = (U32)(iend - ms->window.base);
118846 +    return 0;
118850 +/* Dictionaries that assign zero probability to symbols that show up causes problems
118851 + * when FSE encoding. Mark dictionaries with zero probability symbols as FSE_repeat_check
118852 + * and only dictionaries with 100% valid symbols can be assumed valid.
118853 + */
118854 +static FSE_repeat ZSTD_dictNCountRepeat(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue)
118856 +    U32 s;
118857 +    if (dictMaxSymbolValue < maxSymbolValue) {
118858 +        return FSE_repeat_check;
118859 +    }
118860 +    for (s = 0; s <= maxSymbolValue; ++s) {
118861 +        if (normalizedCounter[s] == 0) {
118862 +            return FSE_repeat_check;
118863 +        }
118864 +    }
118865 +    return FSE_repeat_valid;
118868 +size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
118869 +                         const void* const dict, size_t dictSize)
118871 +    short offcodeNCount[MaxOff+1];
118872 +    unsigned offcodeMaxValue = MaxOff;
118873 +    const BYTE* dictPtr = (const BYTE*)dict;    /* skip magic num and dict ID */
118874 +    const BYTE* const dictEnd = dictPtr + dictSize;
118875 +    dictPtr += 8;
118876 +    bs->entropy.huf.repeatMode = HUF_repeat_check;
118878 +    {   unsigned maxSymbolValue = 255;
118879 +        unsigned hasZeroWeights = 1;
118880 +        size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr,
118881 +            dictEnd-dictPtr, &hasZeroWeights);
118883 +        /* We only set the loaded table as valid if it contains all non-zero
118884 +         * weights. Otherwise, we set it to check */
118885 +        if (!hasZeroWeights)
118886 +            bs->entropy.huf.repeatMode = HUF_repeat_valid;
118888 +        RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted, "");
118889 +        RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted, "");
118890 +        dictPtr += hufHeaderSize;
118891 +    }
118893 +    {   unsigned offcodeLog;
118894 +        size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
118895 +        RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, "");
118896 +        RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, "");
118897 +        /* fill all offset symbols to avoid garbage at end of table */
118898 +        RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
118899 +                bs->entropy.fse.offcodeCTable,
118900 +                offcodeNCount, MaxOff, offcodeLog,
118901 +                workspace, HUF_WORKSPACE_SIZE)),
118902 +            dictionary_corrupted, "");
118903 +        /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
118904 +        dictPtr += offcodeHeaderSize;
118905 +    }
118907 +    {   short matchlengthNCount[MaxML+1];
118908 +        unsigned matchlengthMaxValue = MaxML, matchlengthLog;
118909 +        size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
118910 +        RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, "");
118911 +        RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, "");
118912 +        RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
118913 +                bs->entropy.fse.matchlengthCTable,
118914 +                matchlengthNCount, matchlengthMaxValue, matchlengthLog,
118915 +                workspace, HUF_WORKSPACE_SIZE)),
118916 +            dictionary_corrupted, "");
118917 +        bs->entropy.fse.matchlength_repeatMode = ZSTD_dictNCountRepeat(matchlengthNCount, matchlengthMaxValue, MaxML);
118918 +        dictPtr += matchlengthHeaderSize;
118919 +    }
118921 +    {   short litlengthNCount[MaxLL+1];
118922 +        unsigned litlengthMaxValue = MaxLL, litlengthLog;
118923 +        size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
118924 +        RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, "");
118925 +        RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, "");
118926 +        RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
118927 +                bs->entropy.fse.litlengthCTable,
118928 +                litlengthNCount, litlengthMaxValue, litlengthLog,
118929 +                workspace, HUF_WORKSPACE_SIZE)),
118930 +            dictionary_corrupted, "");
118931 +        bs->entropy.fse.litlength_repeatMode = ZSTD_dictNCountRepeat(litlengthNCount, litlengthMaxValue, MaxLL);
118932 +        dictPtr += litlengthHeaderSize;
118933 +    }
118935 +    RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, "");
118936 +    bs->rep[0] = MEM_readLE32(dictPtr+0);
118937 +    bs->rep[1] = MEM_readLE32(dictPtr+4);
118938 +    bs->rep[2] = MEM_readLE32(dictPtr+8);
118939 +    dictPtr += 12;
118941 +    {   size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
118942 +        U32 offcodeMax = MaxOff;
118943 +        if (dictContentSize <= ((U32)-1) - 128 KB) {
118944 +            U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
118945 +            offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
118946 +        }
118947 +        /* All offset values <= dictContentSize + 128 KB must be representable for a valid table */
118948 +        bs->entropy.fse.offcode_repeatMode = ZSTD_dictNCountRepeat(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff));
118950 +        /* All repCodes must be <= dictContentSize and != 0 */
118951 +        {   U32 u;
118952 +            for (u=0; u<3; u++) {
118953 +                RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted, "");
118954 +                RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted, "");
118955 +    }   }   }
118957 +    return dictPtr - (const BYTE*)dict;
118960 +/* Dictionary format :
118961 + * See :
118962 + * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#dictionary-format
118963 + */
118964 +/*! ZSTD_loadZstdDictionary() :
118965 + * @return : dictID, or an error code
118966 + *  assumptions : magic number supposed already checked
118967 + *                dictSize supposed >= 8
118968 + */
118969 +static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
118970 +                                      ZSTD_matchState_t* ms,
118971 +                                      ZSTD_cwksp* ws,
118972 +                                      ZSTD_CCtx_params const* params,
118973 +                                      const void* dict, size_t dictSize,
118974 +                                      ZSTD_dictTableLoadMethod_e dtlm,
118975 +                                      void* workspace)
118977 +    const BYTE* dictPtr = (const BYTE*)dict;
118978 +    const BYTE* const dictEnd = dictPtr + dictSize;
118979 +    size_t dictID;
118980 +    size_t eSize;
118982 +    ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
118983 +    assert(dictSize >= 8);
118984 +    assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY);
118986 +    dictID = params->fParams.noDictIDFlag ? 0 :  MEM_readLE32(dictPtr + 4 /* skip magic number */ );
118987 +    eSize = ZSTD_loadCEntropy(bs, workspace, dict, dictSize);
118988 +    FORWARD_IF_ERROR(eSize, "ZSTD_loadCEntropy failed");
118989 +    dictPtr += eSize;
118991 +    {
118992 +        size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
118993 +        FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(
118994 +            ms, NULL, ws, params, dictPtr, dictContentSize, dtlm), "");
118995 +    }
118996 +    return dictID;
118999 +/** ZSTD_compress_insertDictionary() :
119000 +*   @return : dictID, or an error code */
119001 +static size_t
119002 +ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
119003 +                               ZSTD_matchState_t* ms,
119004 +                               ldmState_t* ls,
119005 +                               ZSTD_cwksp* ws,
119006 +                         const ZSTD_CCtx_params* params,
119007 +                         const void* dict, size_t dictSize,
119008 +                               ZSTD_dictContentType_e dictContentType,
119009 +                               ZSTD_dictTableLoadMethod_e dtlm,
119010 +                               void* workspace)
119012 +    DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
119013 +    if ((dict==NULL) || (dictSize<8)) {
119014 +        RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
119015 +        return 0;
119016 +    }
119018 +    ZSTD_reset_compressedBlockState(bs);
119020 +    /* dict restricted modes */
119021 +    if (dictContentType == ZSTD_dct_rawContent)
119022 +        return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm);
119024 +    if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
119025 +        if (dictContentType == ZSTD_dct_auto) {
119026 +            DEBUGLOG(4, "raw content dictionary detected");
119027 +            return ZSTD_loadDictionaryContent(
119028 +                ms, ls, ws, params, dict, dictSize, dtlm);
119029 +        }
119030 +        RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
119031 +        assert(0);   /* impossible */
119032 +    }
119034 +    /* dict as full zstd dictionary */
119035 +    return ZSTD_loadZstdDictionary(
119036 +        bs, ms, ws, params, dict, dictSize, dtlm, workspace);
119039 +#define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB)
119040 +#define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6ULL)
119042 +/*! ZSTD_compressBegin_internal() :
119043 + * @return : 0, or an error code */
119044 +static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
119045 +                                    const void* dict, size_t dictSize,
119046 +                                    ZSTD_dictContentType_e dictContentType,
119047 +                                    ZSTD_dictTableLoadMethod_e dtlm,
119048 +                                    const ZSTD_CDict* cdict,
119049 +                                    const ZSTD_CCtx_params* params, U64 pledgedSrcSize,
119050 +                                    ZSTD_buffered_policy_e zbuff)
119052 +    DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params->cParams.windowLog);
119053 +    /* params are supposed to be fully validated at this point */
119054 +    assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
119055 +    assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
119056 +    if ( (cdict)
119057 +      && (cdict->dictContentSize > 0)
119058 +      && ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
119059 +        || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
119060 +        || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
119061 +        || cdict->compressionLevel == 0)
119062 +      && (params->attachDictPref != ZSTD_dictForceLoad) ) {
119063 +        return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
119064 +    }
119066 +    FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, *params, pledgedSrcSize,
119067 +                                     ZSTDcrp_makeClean, zbuff) , "");
119068 +    {   size_t const dictID = cdict ?
119069 +                ZSTD_compress_insertDictionary(
119070 +                        cctx->blockState.prevCBlock, &cctx->blockState.matchState,
119071 +                        &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent,
119072 +                        cdict->dictContentSize, cdict->dictContentType, dtlm,
119073 +                        cctx->entropyWorkspace)
119074 +              : ZSTD_compress_insertDictionary(
119075 +                        cctx->blockState.prevCBlock, &cctx->blockState.matchState,
119076 +                        &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize,
119077 +                        dictContentType, dtlm, cctx->entropyWorkspace);
119078 +        FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
119079 +        assert(dictID <= UINT_MAX);
119080 +        cctx->dictID = (U32)dictID;
119081 +        cctx->dictContentSize = cdict ? cdict->dictContentSize : dictSize;
119082 +    }
119083 +    return 0;
119086 +size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
119087 +                                    const void* dict, size_t dictSize,
119088 +                                    ZSTD_dictContentType_e dictContentType,
119089 +                                    ZSTD_dictTableLoadMethod_e dtlm,
119090 +                                    const ZSTD_CDict* cdict,
119091 +                                    const ZSTD_CCtx_params* params,
119092 +                                    unsigned long long pledgedSrcSize)
119094 +    DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params->cParams.windowLog);
119095 +    /* compression parameters verification and optimization */
119096 +    FORWARD_IF_ERROR( ZSTD_checkCParams(params->cParams) , "");
119097 +    return ZSTD_compressBegin_internal(cctx,
119098 +                                       dict, dictSize, dictContentType, dtlm,
119099 +                                       cdict,
119100 +                                       params, pledgedSrcSize,
119101 +                                       ZSTDb_not_buffered);
119104 +/*! ZSTD_compressBegin_advanced() :
119105 +*   @return : 0, or an error code */
119106 +size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
119107 +                             const void* dict, size_t dictSize,
119108 +                                   ZSTD_parameters params, unsigned long long pledgedSrcSize)
119110 +    ZSTD_CCtx_params cctxParams;
119111 +    ZSTD_CCtxParams_init_internal(&cctxParams, &params, ZSTD_NO_CLEVEL);
119112 +    return ZSTD_compressBegin_advanced_internal(cctx,
119113 +                                            dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast,
119114 +                                            NULL /*cdict*/,
119115 +                                            &cctxParams, pledgedSrcSize);
119118 +size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
119120 +    ZSTD_CCtx_params cctxParams;
119121 +    {
119122 +        ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict);
119123 +        ZSTD_CCtxParams_init_internal(&cctxParams, &params, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel);
119124 +    }
119125 +    DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize);
119126 +    return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
119127 +                                       &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
119130 +size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
119132 +    return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel);
119136 +/*! ZSTD_writeEpilogue() :
119137 +*   Ends a frame.
119138 +*   @return : nb of bytes written into dst (or an error code) */
119139 +static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
119141 +    BYTE* const ostart = (BYTE*)dst;
119142 +    BYTE* op = ostart;
119143 +    size_t fhSize = 0;
119145 +    DEBUGLOG(4, "ZSTD_writeEpilogue");
119146 +    RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing");
119148 +    /* special case : empty frame */
119149 +    if (cctx->stage == ZSTDcs_init) {
119150 +        fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);
119151 +        FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
119152 +        dstCapacity -= fhSize;
119153 +        op += fhSize;
119154 +        cctx->stage = ZSTDcs_ongoing;
119155 +    }
119157 +    if (cctx->stage != ZSTDcs_ending) {
119158 +        /* write one last empty block, make it the "last" block */
119159 +        U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
119160 +        RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for epilogue");
119161 +        MEM_writeLE32(op, cBlockHeader24);
119162 +        op += ZSTD_blockHeaderSize;
119163 +        dstCapacity -= ZSTD_blockHeaderSize;
119164 +    }
119166 +    if (cctx->appliedParams.fParams.checksumFlag) {
119167 +        U32 const checksum = (U32) xxh64_digest(&cctx->xxhState);
119168 +        RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
119169 +        DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum);
119170 +        MEM_writeLE32(op, checksum);
119171 +        op += 4;
119172 +    }
119174 +    cctx->stage = ZSTDcs_created;  /* return to "created but no init" status */
119175 +    return op-ostart;
119178 +void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize)
119180 +    (void)cctx;
119181 +    (void)extraCSize;
119184 +size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
119185 +                         void* dst, size_t dstCapacity,
119186 +                   const void* src, size_t srcSize)
119188 +    size_t endResult;
119189 +    size_t const cSize = ZSTD_compressContinue_internal(cctx,
119190 +                                dst, dstCapacity, src, srcSize,
119191 +                                1 /* frame mode */, 1 /* last chunk */);
119192 +    FORWARD_IF_ERROR(cSize, "ZSTD_compressContinue_internal failed");
119193 +    endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
119194 +    FORWARD_IF_ERROR(endResult, "ZSTD_writeEpilogue failed");
119195 +    assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
119196 +    if (cctx->pledgedSrcSizePlusOne != 0) {  /* control src size */
119197 +        ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
119198 +        DEBUGLOG(4, "end of frame : controlling src size");
119199 +        RETURN_ERROR_IF(
119200 +            cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1,
119201 +            srcSize_wrong,
119202 +             "error : pledgedSrcSize = %u, while realSrcSize = %u",
119203 +            (unsigned)cctx->pledgedSrcSizePlusOne-1,
119204 +            (unsigned)cctx->consumedSrcSize);
119205 +    }
119206 +    ZSTD_CCtx_trace(cctx, endResult);
119207 +    return cSize + endResult;
119210 +size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
119211 +                               void* dst, size_t dstCapacity,
119212 +                         const void* src, size_t srcSize,
119213 +                         const void* dict,size_t dictSize,
119214 +                               ZSTD_parameters params)
119216 +    ZSTD_CCtx_params cctxParams;
119217 +    DEBUGLOG(4, "ZSTD_compress_advanced");
119218 +    FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), "");
119219 +    ZSTD_CCtxParams_init_internal(&cctxParams, &params, ZSTD_NO_CLEVEL);
119220 +    return ZSTD_compress_advanced_internal(cctx,
119221 +                                           dst, dstCapacity,
119222 +                                           src, srcSize,
119223 +                                           dict, dictSize,
119224 +                                           &cctxParams);
119227 +/* Internal */
119228 +size_t ZSTD_compress_advanced_internal(
119229 +        ZSTD_CCtx* cctx,
119230 +        void* dst, size_t dstCapacity,
119231 +        const void* src, size_t srcSize,
119232 +        const void* dict,size_t dictSize,
119233 +        const ZSTD_CCtx_params* params)
119235 +    DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize);
119236 +    FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
119237 +                         dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
119238 +                         params, srcSize, ZSTDb_not_buffered) , "");
119239 +    return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
119242 +size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx,
119243 +                               void* dst, size_t dstCapacity,
119244 +                         const void* src, size_t srcSize,
119245 +                         const void* dict, size_t dictSize,
119246 +                               int compressionLevel)
119248 +    ZSTD_CCtx_params cctxParams;
119249 +    {
119250 +        ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, srcSize, dict ? dictSize : 0, ZSTD_cpm_noAttachDict);
119251 +        assert(params.fParams.contentSizeFlag == 1);
119252 +        ZSTD_CCtxParams_init_internal(&cctxParams, &params, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT: compressionLevel);
119253 +    }
119254 +    DEBUGLOG(4, "ZSTD_compress_usingDict (srcSize=%u)", (unsigned)srcSize);
119255 +    return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctxParams);
119258 +size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
119259 +                         void* dst, size_t dstCapacity,
119260 +                   const void* src, size_t srcSize,
119261 +                         int compressionLevel)
119263 +    DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (unsigned)srcSize);
119264 +    assert(cctx != NULL);
119265 +    return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
119268 +size_t ZSTD_compress(void* dst, size_t dstCapacity,
119269 +               const void* src, size_t srcSize,
119270 +                     int compressionLevel)
119272 +    size_t result;
119273 +    ZSTD_CCtx* cctx = ZSTD_createCCtx();
119274 +    RETURN_ERROR_IF(!cctx, memory_allocation, "ZSTD_createCCtx failed");
119275 +    result = ZSTD_compressCCtx(cctx, dst, dstCapacity, src, srcSize, compressionLevel);
119276 +    ZSTD_freeCCtx(cctx);
119277 +    return result;
119281 +/* =====  Dictionary API  ===== */
119283 +/*! ZSTD_estimateCDictSize_advanced() :
119284 + *  Estimate amount of memory that will be needed to create a dictionary with following arguments */
119285 +size_t ZSTD_estimateCDictSize_advanced(
119286 +        size_t dictSize, ZSTD_compressionParameters cParams,
119287 +        ZSTD_dictLoadMethod_e dictLoadMethod)
119289 +    DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict));
119290 +    return ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
119291 +         + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
119292 +         + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0)
119293 +         + (dictLoadMethod == ZSTD_dlm_byRef ? 0
119294 +            : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *))));
119297 +size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
119299 +    ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
119300 +    return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy);
119303 +size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
119305 +    if (cdict==NULL) return 0;   /* support sizeof on NULL */
119306 +    DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict));
119307 +    /* cdict may be in the workspace */
119308 +    return (cdict->workspace.workspace == cdict ? 0 : sizeof(*cdict))
119309 +        + ZSTD_cwksp_sizeof(&cdict->workspace);
119312 +static size_t ZSTD_initCDict_internal(
119313 +                    ZSTD_CDict* cdict,
119314 +              const void* dictBuffer, size_t dictSize,
119315 +                    ZSTD_dictLoadMethod_e dictLoadMethod,
119316 +                    ZSTD_dictContentType_e dictContentType,
119317 +                    ZSTD_CCtx_params params)
119319 +    DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType);
119320 +    assert(!ZSTD_checkCParams(params.cParams));
119321 +    cdict->matchState.cParams = params.cParams;
119322 +    cdict->matchState.dedicatedDictSearch = params.enableDedicatedDictSearch;
119323 +    if (cdict->matchState.dedicatedDictSearch && dictSize > ZSTD_CHUNKSIZE_MAX) {
119324 +        cdict->matchState.dedicatedDictSearch = 0;
119325 +    }
119326 +    if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
119327 +        cdict->dictContent = dictBuffer;
119328 +    } else {
119329 +         void *internalBuffer = ZSTD_cwksp_reserve_object(&cdict->workspace, ZSTD_cwksp_align(dictSize, sizeof(void*)));
119330 +        RETURN_ERROR_IF(!internalBuffer, memory_allocation, "NULL pointer!");
119331 +        cdict->dictContent = internalBuffer;
119332 +        ZSTD_memcpy(internalBuffer, dictBuffer, dictSize);
119333 +    }
119334 +    cdict->dictContentSize = dictSize;
119335 +    cdict->dictContentType = dictContentType;
119337 +    cdict->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cdict->workspace, HUF_WORKSPACE_SIZE);
119340 +    /* Reset the state to no dictionary */
119341 +    ZSTD_reset_compressedBlockState(&cdict->cBlockState);
119342 +    FORWARD_IF_ERROR(ZSTD_reset_matchState(
119343 +        &cdict->matchState,
119344 +        &cdict->workspace,
119345 +        &params.cParams,
119346 +        ZSTDcrp_makeClean,
119347 +        ZSTDirp_reset,
119348 +        ZSTD_resetTarget_CDict), "");
119349 +    /* (Maybe) load the dictionary
119350 +     * Skips loading the dictionary if it is < 8 bytes.
119351 +     */
119352 +    {   params.compressionLevel = ZSTD_CLEVEL_DEFAULT;
119353 +        params.fParams.contentSizeFlag = 1;
119354 +        {   size_t const dictID = ZSTD_compress_insertDictionary(
119355 +                    &cdict->cBlockState, &cdict->matchState, NULL, &cdict->workspace,
119356 +                    &params, cdict->dictContent, cdict->dictContentSize,
119357 +                    dictContentType, ZSTD_dtlm_full, cdict->entropyWorkspace);
119358 +            FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
119359 +            assert(dictID <= (size_t)(U32)-1);
119360 +            cdict->dictID = (U32)dictID;
119361 +        }
119362 +    }
119364 +    return 0;
119367 +static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize,
119368 +                                      ZSTD_dictLoadMethod_e dictLoadMethod,
119369 +                                      ZSTD_compressionParameters cParams, ZSTD_customMem customMem)
119371 +    if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
119373 +    {   size_t const workspaceSize =
119374 +            ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) +
119375 +            ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) +
119376 +            ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0) +
119377 +            (dictLoadMethod == ZSTD_dlm_byRef ? 0
119378 +             : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))));
119379 +        void* const workspace = ZSTD_customMalloc(workspaceSize, customMem);
119380 +        ZSTD_cwksp ws;
119381 +        ZSTD_CDict* cdict;
119383 +        if (!workspace) {
119384 +            ZSTD_customFree(workspace, customMem);
119385 +            return NULL;
119386 +        }
119388 +        ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_dynamic_alloc);
119390 +        cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
119391 +        assert(cdict != NULL);
119392 +        ZSTD_cwksp_move(&cdict->workspace, &ws);
119393 +        cdict->customMem = customMem;
119394 +        cdict->compressionLevel = ZSTD_NO_CLEVEL; /* signals advanced API usage */
119396 +        return cdict;
119397 +    }
119400 +ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
119401 +                                      ZSTD_dictLoadMethod_e dictLoadMethod,
119402 +                                      ZSTD_dictContentType_e dictContentType,
119403 +                                      ZSTD_compressionParameters cParams,
119404 +                                      ZSTD_customMem customMem)
119406 +    ZSTD_CCtx_params cctxParams;
119407 +    ZSTD_memset(&cctxParams, 0, sizeof(cctxParams));
119408 +    ZSTD_CCtxParams_init(&cctxParams, 0);
119409 +    cctxParams.cParams = cParams;
119410 +    cctxParams.customMem = customMem;
119411 +    return ZSTD_createCDict_advanced2(
119412 +        dictBuffer, dictSize,
119413 +        dictLoadMethod, dictContentType,
119414 +        &cctxParams, customMem);
119417 +ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced2(
119418 +        const void* dict, size_t dictSize,
119419 +        ZSTD_dictLoadMethod_e dictLoadMethod,
119420 +        ZSTD_dictContentType_e dictContentType,
119421 +        const ZSTD_CCtx_params* originalCctxParams,
119422 +        ZSTD_customMem customMem)
119424 +    ZSTD_CCtx_params cctxParams = *originalCctxParams;
119425 +    ZSTD_compressionParameters cParams;
119426 +    ZSTD_CDict* cdict;
119428 +    DEBUGLOG(3, "ZSTD_createCDict_advanced2, mode %u", (unsigned)dictContentType);
119429 +    if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
119431 +    if (cctxParams.enableDedicatedDictSearch) {
119432 +        cParams = ZSTD_dedicatedDictSearch_getCParams(
119433 +            cctxParams.compressionLevel, dictSize);
119434 +        ZSTD_overrideCParams(&cParams, &cctxParams.cParams);
119435 +    } else {
119436 +        cParams = ZSTD_getCParamsFromCCtxParams(
119437 +            &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
119438 +    }
119440 +    if (!ZSTD_dedicatedDictSearch_isSupported(&cParams)) {
119441 +        /* Fall back to non-DDSS params */
119442 +        cctxParams.enableDedicatedDictSearch = 0;
119443 +        cParams = ZSTD_getCParamsFromCCtxParams(
119444 +            &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
119445 +    }
119447 +    cctxParams.cParams = cParams;
119449 +    cdict = ZSTD_createCDict_advanced_internal(dictSize,
119450 +                        dictLoadMethod, cctxParams.cParams,
119451 +                        customMem);
119453 +    if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
119454 +                                    dict, dictSize,
119455 +                                    dictLoadMethod, dictContentType,
119456 +                                    cctxParams) )) {
119457 +        ZSTD_freeCDict(cdict);
119458 +        return NULL;
119459 +    }
119461 +    return cdict;
119464 +ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
119466 +    ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
119467 +    ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize,
119468 +                                                  ZSTD_dlm_byCopy, ZSTD_dct_auto,
119469 +                                                  cParams, ZSTD_defaultCMem);
119470 +    if (cdict)
119471 +        cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
119472 +    return cdict;
119475 +ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
119477 +    ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
119478 +    ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize,
119479 +                                     ZSTD_dlm_byRef, ZSTD_dct_auto,
119480 +                                     cParams, ZSTD_defaultCMem);
119481 +    if (cdict)
119482 +        cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
119483 +    return cdict;
119486 +size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
119488 +    if (cdict==NULL) return 0;   /* support free on NULL */
119489 +    {   ZSTD_customMem const cMem = cdict->customMem;
119490 +        int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict);
119491 +        ZSTD_cwksp_free(&cdict->workspace, cMem);
119492 +        if (!cdictInWorkspace) {
119493 +            ZSTD_customFree(cdict, cMem);
119494 +        }
119495 +        return 0;
119496 +    }
119499 +/*! ZSTD_initStaticCDict_advanced() :
119500 + *  Generate a digested dictionary in provided memory area.
119501 + *  workspace: The memory area to emplace the dictionary into.
119502 + *             Provided pointer must 8-bytes aligned.
119503 + *             It must outlive dictionary usage.
119504 + *  workspaceSize: Use ZSTD_estimateCDictSize()
119505 + *                 to determine how large workspace must be.
119506 + *  cParams : use ZSTD_getCParams() to transform a compression level
119507 + *            into its relevants cParams.
119508 + * @return : pointer to ZSTD_CDict*, or NULL if error (size too small)
119509 + *  Note : there is no corresponding "free" function.
119510 + *         Since workspace was allocated externally, it must be freed externally.
119511 + */
119512 +const ZSTD_CDict* ZSTD_initStaticCDict(
119513 +                                 void* workspace, size_t workspaceSize,
119514 +                           const void* dict, size_t dictSize,
119515 +                                 ZSTD_dictLoadMethod_e dictLoadMethod,
119516 +                                 ZSTD_dictContentType_e dictContentType,
119517 +                                 ZSTD_compressionParameters cParams)
119519 +    size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0);
119520 +    size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
119521 +                            + (dictLoadMethod == ZSTD_dlm_byRef ? 0
119522 +                               : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))))
119523 +                            + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
119524 +                            + matchStateSize;
119525 +    ZSTD_CDict* cdict;
119526 +    ZSTD_CCtx_params params;
119528 +    if ((size_t)workspace & 7) return NULL;  /* 8-aligned */
119530 +    {
119531 +        ZSTD_cwksp ws;
119532 +        ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc);
119533 +        cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
119534 +        if (cdict == NULL) return NULL;
119535 +        ZSTD_cwksp_move(&cdict->workspace, &ws);
119536 +    }
119538 +    DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
119539 +        (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize));
119540 +    if (workspaceSize < neededSize) return NULL;
119542 +    ZSTD_CCtxParams_init(&params, 0);
119543 +    params.cParams = cParams;
119545 +    if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
119546 +                                              dict, dictSize,
119547 +                                              dictLoadMethod, dictContentType,
119548 +                                              params) ))
119549 +        return NULL;
119551 +    return cdict;
119554 +ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict)
119556 +    assert(cdict != NULL);
119557 +    return cdict->matchState.cParams;
119560 +/*! ZSTD_getDictID_fromCDict() :
119561 + *  Provides the dictID of the dictionary loaded into `cdict`.
119562 + *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
119563 + *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
119564 +unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict)
119566 +    if (cdict==NULL) return 0;
119567 +    return cdict->dictID;
119571 +/* ZSTD_compressBegin_usingCDict_advanced() :
119572 + * cdict must be != NULL */
119573 +size_t ZSTD_compressBegin_usingCDict_advanced(
119574 +    ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
119575 +    ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
119577 +    ZSTD_CCtx_params cctxParams;
119578 +    DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_advanced");
119579 +    RETURN_ERROR_IF(cdict==NULL, dictionary_wrong, "NULL pointer!");
119580 +    /* Initialize the cctxParams from the cdict */
119581 +    {
119582 +        ZSTD_parameters params;
119583 +        params.fParams = fParams;
119584 +        params.cParams = ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
119585 +                        || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
119586 +                        || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
119587 +                        || cdict->compressionLevel == 0 ) ?
119588 +                ZSTD_getCParamsFromCDict(cdict)
119589 +              : ZSTD_getCParams(cdict->compressionLevel,
119590 +                                pledgedSrcSize,
119591 +                                cdict->dictContentSize);
119592 +        ZSTD_CCtxParams_init_internal(&cctxParams, &params, cdict->compressionLevel);
119593 +    }
119594 +    /* Increase window log to fit the entire dictionary and source if the
119595 +     * source size is known. Limit the increase to 19, which is the
119596 +     * window log for compression level 1 with the largest source size.
119597 +     */
119598 +    if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
119599 +        U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19);
119600 +        U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1;
119601 +        cctxParams.cParams.windowLog = MAX(cctxParams.cParams.windowLog, limitedSrcLog);
119602 +    }
119603 +    return ZSTD_compressBegin_internal(cctx,
119604 +                                        NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast,
119605 +                                        cdict,
119606 +                                        &cctxParams, pledgedSrcSize,
119607 +                                        ZSTDb_not_buffered);
119610 +/* ZSTD_compressBegin_usingCDict() :
119611 + * pledgedSrcSize=0 means "unknown"
119612 + * if pledgedSrcSize>0, it will enable contentSizeFlag */
119613 +size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
119615 +    ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
119616 +    DEBUGLOG(4, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u", !fParams.noDictIDFlag);
119617 +    return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);
119620 +size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
119621 +                                void* dst, size_t dstCapacity,
119622 +                                const void* src, size_t srcSize,
119623 +                                const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
119625 +    FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize), "");   /* will check if cdict != NULL */
119626 +    return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
119629 +/*! ZSTD_compress_usingCDict() :
119630 + *  Compression using a digested Dictionary.
119631 + *  Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
119632 + *  Note that compression parameters are decided at CDict creation time
119633 + *  while frame parameters are hardcoded */
119634 +size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
119635 +                                void* dst, size_t dstCapacity,
119636 +                                const void* src, size_t srcSize,
119637 +                                const ZSTD_CDict* cdict)
119639 +    ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
119640 +    return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
119645 +/* ******************************************************************
119646 +*  Streaming
119647 +********************************************************************/
119649 +ZSTD_CStream* ZSTD_createCStream(void)
119651 +    DEBUGLOG(3, "ZSTD_createCStream");
119652 +    return ZSTD_createCStream_advanced(ZSTD_defaultCMem);
119655 +ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize)
119657 +    return ZSTD_initStaticCCtx(workspace, workspaceSize);
119660 +ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem)
119661 +{   /* CStream and CCtx are now same object */
119662 +    return ZSTD_createCCtx_advanced(customMem);
119665 +size_t ZSTD_freeCStream(ZSTD_CStream* zcs)
119667 +    return ZSTD_freeCCtx(zcs);   /* same object */
119672 +/*======   Initialization   ======*/
119674 +size_t ZSTD_CStreamInSize(void)  { return ZSTD_BLOCKSIZE_MAX; }
119676 +size_t ZSTD_CStreamOutSize(void)
119678 +    return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;
119681 +static ZSTD_cParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize)
119683 +    if (cdict != NULL && ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize))
119684 +        return ZSTD_cpm_attachDict;
119685 +    else
119686 +        return ZSTD_cpm_noAttachDict;
119689 +/* ZSTD_resetCStream():
119690 + * pledgedSrcSize == 0 means "unknown" */
119691 +size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss)
119693 +    /* temporary : 0 interpreted as "unknown" during transition period.
119694 +     * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
119695 +     * 0 will be interpreted as "empty" in the future.
119696 +     */
119697 +    U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
119698 +    DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize);
119699 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
119700 +    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
119701 +    return 0;
119704 +/*! ZSTD_initCStream_internal() :
119705 + *  Note : for lib/compress only. Used by zstdmt_compress.c.
119706 + *  Assumption 1 : params are valid
119707 + *  Assumption 2 : either dict, or cdict, is defined, not both */
119708 +size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
119709 +                    const void* dict, size_t dictSize, const ZSTD_CDict* cdict,
119710 +                    const ZSTD_CCtx_params* params,
119711 +                    unsigned long long pledgedSrcSize)
119713 +    DEBUGLOG(4, "ZSTD_initCStream_internal");
119714 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
119715 +    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
119716 +    assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
119717 +    zcs->requestedParams = *params;
119718 +    assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
119719 +    if (dict) {
119720 +        FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
119721 +    } else {
119722 +        /* Dictionary is cleared if !cdict */
119723 +        FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
119724 +    }
119725 +    return 0;
119728 +/* ZSTD_initCStream_usingCDict_advanced() :
119729 + * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
119730 +size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
119731 +                                            const ZSTD_CDict* cdict,
119732 +                                            ZSTD_frameParameters fParams,
119733 +                                            unsigned long long pledgedSrcSize)
119735 +    DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced");
119736 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
119737 +    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
119738 +    zcs->requestedParams.fParams = fParams;
119739 +    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
119740 +    return 0;
119743 +/* note : cdict must outlive compression session */
119744 +size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
119746 +    DEBUGLOG(4, "ZSTD_initCStream_usingCDict");
119747 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
119748 +    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
119749 +    return 0;
119753 +/* ZSTD_initCStream_advanced() :
119754 + * pledgedSrcSize must be exact.
119755 + * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
119756 + * dict is loaded with default parameters ZSTD_dct_auto and ZSTD_dlm_byCopy. */
119757 +size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
119758 +                                 const void* dict, size_t dictSize,
119759 +                                 ZSTD_parameters params, unsigned long long pss)
119761 +    /* for compatibility with older programs relying on this behavior.
119762 +     * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN.
119763 +     * This line will be removed in the future.
119764 +     */
119765 +    U64 const pledgedSrcSize = (pss==0 && params.fParams.contentSizeFlag==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
119766 +    DEBUGLOG(4, "ZSTD_initCStream_advanced");
119767 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
119768 +    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
119769 +    FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , "");
119770 +    ZSTD_CCtxParams_setZstdParams(&zcs->requestedParams, &params);
119771 +    FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
119772 +    return 0;
119775 +size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
119777 +    DEBUGLOG(4, "ZSTD_initCStream_usingDict");
119778 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
119779 +    FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
119780 +    FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
119781 +    return 0;
119784 +size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss)
119786 +    /* temporary : 0 interpreted as "unknown" during transition period.
119787 +     * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
119788 +     * 0 will be interpreted as "empty" in the future.
119789 +     */
119790 +    U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
119791 +    DEBUGLOG(4, "ZSTD_initCStream_srcSize");
119792 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
119793 +    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , "");
119794 +    FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
119795 +    FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
119796 +    return 0;
119799 +size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
119801 +    DEBUGLOG(4, "ZSTD_initCStream");
119802 +    FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
119803 +    FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , "");
119804 +    FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
119805 +    return 0;
119808 +/*======   Compression   ======*/
119810 +static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx)
119812 +    size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos;
119813 +    if (hintInSize==0) hintInSize = cctx->blockSize;
119814 +    return hintInSize;
119817 +/** ZSTD_compressStream_generic():
119818 + *  internal function for all *compressStream*() variants
119819 + *  non-static, because can be called from zstdmt_compress.c
119820 + * @return : hint size for next input */
119821 +static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
119822 +                                          ZSTD_outBuffer* output,
119823 +                                          ZSTD_inBuffer* input,
119824 +                                          ZSTD_EndDirective const flushMode)
119826 +    const char* const istart = (const char*)input->src;
119827 +    const char* const iend = input->size != 0 ? istart + input->size : istart;
119828 +    const char* ip = input->pos != 0 ? istart + input->pos : istart;
119829 +    char* const ostart = (char*)output->dst;
119830 +    char* const oend = output->size != 0 ? ostart + output->size : ostart;
119831 +    char* op = output->pos != 0 ? ostart + output->pos : ostart;
119832 +    U32 someMoreWork = 1;
119834 +    /* check expectations */
119835 +    DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode);
119836 +    if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
119837 +        assert(zcs->inBuff != NULL);
119838 +        assert(zcs->inBuffSize > 0);
119839 +    }
119840 +    if (zcs->appliedParams.outBufferMode == ZSTD_bm_buffered) {
119841 +        assert(zcs->outBuff !=  NULL);
119842 +        assert(zcs->outBuffSize > 0);
119843 +    }
119844 +    assert(output->pos <= output->size);
119845 +    assert(input->pos <= input->size);
119846 +    assert((U32)flushMode <= (U32)ZSTD_e_end);
119848 +    while (someMoreWork) {
119849 +        switch(zcs->streamStage)
119850 +        {
119851 +        case zcss_init:
119852 +            RETURN_ERROR(init_missing, "call ZSTD_initCStream() first!");
119854 +        case zcss_load:
119855 +            if ( (flushMode == ZSTD_e_end)
119856 +              && ( (size_t)(oend-op) >= ZSTD_compressBound(iend-ip)     /* Enough output space */
119857 +                || zcs->appliedParams.outBufferMode == ZSTD_bm_stable)  /* OR we are allowed to return dstSizeTooSmall */
119858 +              && (zcs->inBuffPos == 0) ) {
119859 +                /* shortcut to compression pass directly into output buffer */
119860 +                size_t const cSize = ZSTD_compressEnd(zcs,
119861 +                                                op, oend-op, ip, iend-ip);
119862 +                DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);
119863 +                FORWARD_IF_ERROR(cSize, "ZSTD_compressEnd failed");
119864 +                ip = iend;
119865 +                op += cSize;
119866 +                zcs->frameEnded = 1;
119867 +                ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
119868 +                someMoreWork = 0; break;
119869 +            }
119870 +            /* complete loading into inBuffer in buffered mode */
119871 +            if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
119872 +                size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
119873 +                size_t const loaded = ZSTD_limitCopy(
119874 +                                        zcs->inBuff + zcs->inBuffPos, toLoad,
119875 +                                        ip, iend-ip);
119876 +                zcs->inBuffPos += loaded;
119877 +                if (loaded != 0)
119878 +                    ip += loaded;
119879 +                if ( (flushMode == ZSTD_e_continue)
119880 +                  && (zcs->inBuffPos < zcs->inBuffTarget) ) {
119881 +                    /* not enough input to fill full block : stop here */
119882 +                    someMoreWork = 0; break;
119883 +                }
119884 +                if ( (flushMode == ZSTD_e_flush)
119885 +                  && (zcs->inBuffPos == zcs->inToCompress) ) {
119886 +                    /* empty */
119887 +                    someMoreWork = 0; break;
119888 +                }
119889 +            }
119890 +            /* compress current block (note : this stage cannot be stopped in the middle) */
119891 +            DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode);
119892 +            {   int const inputBuffered = (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered);
119893 +                void* cDst;
119894 +                size_t cSize;
119895 +                size_t oSize = oend-op;
119896 +                size_t const iSize = inputBuffered
119897 +                    ? zcs->inBuffPos - zcs->inToCompress
119898 +                    : MIN((size_t)(iend - ip), zcs->blockSize);
119899 +                if (oSize >= ZSTD_compressBound(iSize) || zcs->appliedParams.outBufferMode == ZSTD_bm_stable)
119900 +                    cDst = op;   /* compress into output buffer, to skip flush stage */
119901 +                else
119902 +                    cDst = zcs->outBuff, oSize = zcs->outBuffSize;
119903 +                if (inputBuffered) {
119904 +                    unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend);
119905 +                    cSize = lastBlock ?
119906 +                            ZSTD_compressEnd(zcs, cDst, oSize,
119907 +                                        zcs->inBuff + zcs->inToCompress, iSize) :
119908 +                            ZSTD_compressContinue(zcs, cDst, oSize,
119909 +                                        zcs->inBuff + zcs->inToCompress, iSize);
119910 +                    FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
119911 +                    zcs->frameEnded = lastBlock;
119912 +                    /* prepare next block */
119913 +                    zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
119914 +                    if (zcs->inBuffTarget > zcs->inBuffSize)
119915 +                        zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
119916 +                    DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
119917 +                            (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize);
119918 +                    if (!lastBlock)
119919 +                        assert(zcs->inBuffTarget <= zcs->inBuffSize);
119920 +                    zcs->inToCompress = zcs->inBuffPos;
119921 +                } else {
119922 +                    unsigned const lastBlock = (ip + iSize == iend);
119923 +                    assert(flushMode == ZSTD_e_end /* Already validated */);
119924 +                    cSize = lastBlock ?
119925 +                            ZSTD_compressEnd(zcs, cDst, oSize, ip, iSize) :
119926 +                            ZSTD_compressContinue(zcs, cDst, oSize, ip, iSize);
119927 +                    /* Consume the input prior to error checking to mirror buffered mode. */
119928 +                    if (iSize > 0)
119929 +                        ip += iSize;
119930 +                    FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
119931 +                    zcs->frameEnded = lastBlock;
119932 +                    if (lastBlock)
119933 +                        assert(ip == iend);
119934 +                }
119935 +                if (cDst == op) {  /* no need to flush */
119936 +                    op += cSize;
119937 +                    if (zcs->frameEnded) {
119938 +                        DEBUGLOG(5, "Frame completed directly in outBuffer");
119939 +                        someMoreWork = 0;
119940 +                        ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
119941 +                    }
119942 +                    break;
119943 +                }
119944 +                zcs->outBuffContentSize = cSize;
119945 +                zcs->outBuffFlushedSize = 0;
119946 +                zcs->streamStage = zcss_flush; /* pass-through to flush stage */
119947 +            }
119948 +           /* fall-through */
119949 +        case zcss_flush:
119950 +            DEBUGLOG(5, "flush stage");
119951 +            assert(zcs->appliedParams.outBufferMode == ZSTD_bm_buffered);
119952 +            {   size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
119953 +                size_t const flushed = ZSTD_limitCopy(op, (size_t)(oend-op),
119954 +                            zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
119955 +                DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u",
119956 +                            (unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed);
119957 +                if (flushed)
119958 +                    op += flushed;
119959 +                zcs->outBuffFlushedSize += flushed;
119960 +                if (toFlush!=flushed) {
119961 +                    /* flush not fully completed, presumably because dst is too small */
119962 +                    assert(op==oend);
119963 +                    someMoreWork = 0;
119964 +                    break;
119965 +                }
119966 +                zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
119967 +                if (zcs->frameEnded) {
119968 +                    DEBUGLOG(5, "Frame completed on flush");
119969 +                    someMoreWork = 0;
119970 +                    ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
119971 +                    break;
119972 +                }
119973 +                zcs->streamStage = zcss_load;
119974 +                break;
119975 +            }
119977 +        default: /* impossible */
119978 +            assert(0);
119979 +        }
119980 +    }
119982 +    input->pos = ip - istart;
119983 +    output->pos = op - ostart;
119984 +    if (zcs->frameEnded) return 0;
119985 +    return ZSTD_nextInputSizeHint(zcs);
119988 +static size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx)
119990 +    return ZSTD_nextInputSizeHint(cctx);
119994 +size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
119996 +    FORWARD_IF_ERROR( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) , "");
119997 +    return ZSTD_nextInputSizeHint_MTorST(zcs);
120000 +/* After a compression call set the expected input/output buffer.
120001 + * This is validated at the start of the next compression call.
120002 + */
120003 +static void ZSTD_setBufferExpectations(ZSTD_CCtx* cctx, ZSTD_outBuffer const* output, ZSTD_inBuffer const* input)
120005 +    if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
120006 +        cctx->expectedInBuffer = *input;
120007 +    }
120008 +    if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) {
120009 +        cctx->expectedOutBufferSize = output->size - output->pos;
120010 +    }
120013 +/* Validate that the input/output buffers match the expectations set by
120014 + * ZSTD_setBufferExpectations.
120015 + */
120016 +static size_t ZSTD_checkBufferStability(ZSTD_CCtx const* cctx,
120017 +                                        ZSTD_outBuffer const* output,
120018 +                                        ZSTD_inBuffer const* input,
120019 +                                        ZSTD_EndDirective endOp)
120021 +    if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
120022 +        ZSTD_inBuffer const expect = cctx->expectedInBuffer;
120023 +        if (expect.src != input->src || expect.pos != input->pos || expect.size != input->size)
120024 +            RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer enabled but input differs!");
120025 +        if (endOp != ZSTD_e_end)
120026 +            RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer can only be used with ZSTD_e_end!");
120027 +    }
120028 +    if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) {
120029 +        size_t const outBufferSize = output->size - output->pos;
120030 +        if (cctx->expectedOutBufferSize != outBufferSize)
120031 +            RETURN_ERROR(dstBuffer_wrong, "ZSTD_c_stableOutBuffer enabled but output size differs!");
120032 +    }
120033 +    return 0;
120036 +static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,
120037 +                                             ZSTD_EndDirective endOp,
120038 +                                             size_t inSize) {
120039 +    ZSTD_CCtx_params params = cctx->requestedParams;
120040 +    ZSTD_prefixDict const prefixDict = cctx->prefixDict;
120041 +    FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) , ""); /* Init the local dict if present. */
120042 +    ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));   /* single usage */
120043 +    assert(prefixDict.dict==NULL || cctx->cdict==NULL);    /* only one can be set */
120044 +    if (cctx->cdict)
120045 +        params.compressionLevel = cctx->cdict->compressionLevel; /* let cdict take priority in terms of compression level */
120046 +    DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage");
120047 +    if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1;  /* auto-fix pledgedSrcSize */
120048 +    {
120049 +        size_t const dictSize = prefixDict.dict
120050 +                ? prefixDict.dictSize
120051 +                : (cctx->cdict ? cctx->cdict->dictContentSize : 0);
120052 +        ZSTD_cParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, &params, cctx->pledgedSrcSizePlusOne - 1);
120053 +        params.cParams = ZSTD_getCParamsFromCCtxParams(
120054 +                &params, cctx->pledgedSrcSizePlusOne-1,
120055 +                dictSize, mode);
120056 +    }
120058 +    if (ZSTD_CParams_shouldEnableLdm(&params.cParams)) {
120059 +        /* Enable LDM by default for optimal parser and window size >= 128MB */
120060 +        DEBUGLOG(4, "LDM enabled by default (window size >= 128MB, strategy >= btopt)");
120061 +        params.ldmParams.enableLdm = 1;
120062 +    }
120064 +    {   U64 const pledgedSrcSize = cctx->pledgedSrcSizePlusOne - 1;
120065 +        assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
120066 +        FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
120067 +                prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, ZSTD_dtlm_fast,
120068 +                cctx->cdict,
120069 +                &params, pledgedSrcSize,
120070 +                ZSTDb_buffered) , "");
120071 +        assert(cctx->appliedParams.nbWorkers == 0);
120072 +        cctx->inToCompress = 0;
120073 +        cctx->inBuffPos = 0;
120074 +        if (cctx->appliedParams.inBufferMode == ZSTD_bm_buffered) {
120075 +            /* for small input: avoid automatic flush on reaching end of block, since
120076 +            * it would require to add a 3-bytes null block to end frame
120077 +            */
120078 +            cctx->inBuffTarget = cctx->blockSize + (cctx->blockSize == pledgedSrcSize);
120079 +        } else {
120080 +            cctx->inBuffTarget = 0;
120081 +        }
120082 +        cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0;
120083 +        cctx->streamStage = zcss_load;
120084 +        cctx->frameEnded = 0;
120085 +    }
120086 +    return 0;
120089 +size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
120090 +                             ZSTD_outBuffer* output,
120091 +                             ZSTD_inBuffer* input,
120092 +                             ZSTD_EndDirective endOp)
120094 +    DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp);
120095 +    /* check conditions */
120096 +    RETURN_ERROR_IF(output->pos > output->size, dstSize_tooSmall, "invalid output buffer");
120097 +    RETURN_ERROR_IF(input->pos  > input->size, srcSize_wrong, "invalid input buffer");
120098 +    RETURN_ERROR_IF((U32)endOp > (U32)ZSTD_e_end, parameter_outOfBound, "invalid endDirective");
120099 +    assert(cctx != NULL);
120101 +    /* transparent initialization stage */
120102 +    if (cctx->streamStage == zcss_init) {
120103 +        FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, input->size), "CompressStream2 initialization failed");
120104 +        ZSTD_setBufferExpectations(cctx, output, input);    /* Set initial buffer expectations now that we've initialized */
120105 +    }
120106 +    /* end of transparent initialization stage */
120108 +    FORWARD_IF_ERROR(ZSTD_checkBufferStability(cctx, output, input, endOp), "invalid buffers");
120109 +    /* compression stage */
120110 +    FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) , "");
120111 +    DEBUGLOG(5, "completed ZSTD_compressStream2");
120112 +    ZSTD_setBufferExpectations(cctx, output, input);
120113 +    return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
120116 +size_t ZSTD_compressStream2_simpleArgs (
120117 +                            ZSTD_CCtx* cctx,
120118 +                            void* dst, size_t dstCapacity, size_t* dstPos,
120119 +                      const void* src, size_t srcSize, size_t* srcPos,
120120 +                            ZSTD_EndDirective endOp)
120122 +    ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
120123 +    ZSTD_inBuffer  input  = { src, srcSize, *srcPos };
120124 +    /* ZSTD_compressStream2() will check validity of dstPos and srcPos */
120125 +    size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp);
120126 +    *dstPos = output.pos;
120127 +    *srcPos = input.pos;
120128 +    return cErr;
120131 +size_t ZSTD_compress2(ZSTD_CCtx* cctx,
120132 +                      void* dst, size_t dstCapacity,
120133 +                      const void* src, size_t srcSize)
120135 +    ZSTD_bufferMode_e const originalInBufferMode = cctx->requestedParams.inBufferMode;
120136 +    ZSTD_bufferMode_e const originalOutBufferMode = cctx->requestedParams.outBufferMode;
120137 +    DEBUGLOG(4, "ZSTD_compress2 (srcSize=%u)", (unsigned)srcSize);
120138 +    ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
120139 +    /* Enable stable input/output buffers. */
120140 +    cctx->requestedParams.inBufferMode = ZSTD_bm_stable;
120141 +    cctx->requestedParams.outBufferMode = ZSTD_bm_stable;
120142 +    {   size_t oPos = 0;
120143 +        size_t iPos = 0;
120144 +        size_t const result = ZSTD_compressStream2_simpleArgs(cctx,
120145 +                                        dst, dstCapacity, &oPos,
120146 +                                        src, srcSize, &iPos,
120147 +                                        ZSTD_e_end);
120148 +        /* Reset to the original values. */
120149 +        cctx->requestedParams.inBufferMode = originalInBufferMode;
120150 +        cctx->requestedParams.outBufferMode = originalOutBufferMode;
120151 +        FORWARD_IF_ERROR(result, "ZSTD_compressStream2_simpleArgs failed");
120152 +        if (result != 0) {  /* compression not completed, due to lack of output space */
120153 +            assert(oPos == dstCapacity);
120154 +            RETURN_ERROR(dstSize_tooSmall, "");
120155 +        }
120156 +        assert(iPos == srcSize);   /* all input is expected consumed */
120157 +        return oPos;
120158 +    }
120161 +typedef struct {
120162 +    U32 idx;             /* Index in array of ZSTD_Sequence */
120163 +    U32 posInSequence;   /* Position within sequence at idx */
120164 +    size_t posInSrc;        /* Number of bytes given by sequences provided so far */
120165 +} ZSTD_sequencePosition;
120167 +/* Returns a ZSTD error code if sequence is not valid */
120168 +static size_t ZSTD_validateSequence(U32 offCode, U32 matchLength,
120169 +                                    size_t posInSrc, U32 windowLog, size_t dictSize, U32 minMatch) {
120170 +    size_t offsetBound;
120171 +    U32 windowSize = 1 << windowLog;
120172 +    /* posInSrc represents the amount of data the the decoder would decode up to this point.
120173 +     * As long as the amount of data decoded is less than or equal to window size, offsets may be
120174 +     * larger than the total length of output decoded in order to reference the dict, even larger than
120175 +     * window size. After output surpasses windowSize, we're limited to windowSize offsets again.
120176 +     */
120177 +    offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize;
120178 +    RETURN_ERROR_IF(offCode > offsetBound + ZSTD_REP_MOVE, corruption_detected, "Offset too large!");
120179 +    RETURN_ERROR_IF(matchLength < minMatch, corruption_detected, "Matchlength too small");
120180 +    return 0;
120183 +/* Returns an offset code, given a sequence's raw offset, the ongoing repcode array, and whether litLength == 0 */
120184 +static U32 ZSTD_finalizeOffCode(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0) {
120185 +    U32 offCode = rawOffset + ZSTD_REP_MOVE;
120186 +    U32 repCode = 0;
120188 +    if (!ll0 && rawOffset == rep[0]) {
120189 +        repCode = 1;
120190 +    } else if (rawOffset == rep[1]) {
120191 +        repCode = 2 - ll0;
120192 +    } else if (rawOffset == rep[2]) {
120193 +        repCode = 3 - ll0;
120194 +    } else if (ll0 && rawOffset == rep[0] - 1) {
120195 +        repCode = 3;
120196 +    }
120197 +    if (repCode) {
120198 +        /* ZSTD_storeSeq expects a number in the range [0, 2] to represent a repcode */
120199 +        offCode = repCode - 1;
120200 +    }
120201 +    return offCode;
120204 +/* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of
120205 + * ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter.
120206 + */
120207 +static size_t ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
120208 +                                                             const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
120209 +                                                             const void* src, size_t blockSize) {
120210 +    U32 idx = seqPos->idx;
120211 +    BYTE const* ip = (BYTE const*)(src);
120212 +    const BYTE* const iend = ip + blockSize;
120213 +    repcodes_t updatedRepcodes;
120214 +    U32 dictSize;
120215 +    U32 litLength;
120216 +    U32 matchLength;
120217 +    U32 ll0;
120218 +    U32 offCode;
120220 +    if (cctx->cdict) {
120221 +        dictSize = (U32)cctx->cdict->dictContentSize;
120222 +    } else if (cctx->prefixDict.dict) {
120223 +        dictSize = (U32)cctx->prefixDict.dictSize;
120224 +    } else {
120225 +        dictSize = 0;
120226 +    }
120227 +    ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
120228 +    for (; (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0) && idx < inSeqsSize; ++idx) {
120229 +        litLength = inSeqs[idx].litLength;
120230 +        matchLength = inSeqs[idx].matchLength;
120231 +        ll0 = litLength == 0;
120232 +        offCode = ZSTD_finalizeOffCode(inSeqs[idx].offset, updatedRepcodes.rep, ll0);
120233 +        updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
120235 +        DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
120236 +        if (cctx->appliedParams.validateSequences) {
120237 +            seqPos->posInSrc += litLength + matchLength;
120238 +            FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
120239 +                                                cctx->appliedParams.cParams.windowLog, dictSize,
120240 +                                                cctx->appliedParams.cParams.minMatch),
120241 +                                                "Sequence validation failed");
120242 +        }
120243 +        RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
120244 +                        "Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
120245 +        ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength - MINMATCH);
120246 +        ip += matchLength + litLength;
120247 +    }
120248 +    ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
120250 +    if (inSeqs[idx].litLength) {
120251 +        DEBUGLOG(6, "Storing last literals of size: %u", inSeqs[idx].litLength);
120252 +        ZSTD_storeLastLiterals(&cctx->seqStore, ip, inSeqs[idx].litLength);
120253 +        ip += inSeqs[idx].litLength;
120254 +        seqPos->posInSrc += inSeqs[idx].litLength;
120255 +    }
120256 +    RETURN_ERROR_IF(ip != iend, corruption_detected, "Blocksize doesn't agree with block delimiter!");
120257 +    seqPos->idx = idx+1;
120258 +    return 0;
120261 +/* Returns the number of bytes to move the current read position back by. Only non-zero
120262 + * if we ended up splitting a sequence. Otherwise, it may return a ZSTD error if something
120263 + * went wrong.
120265 + * This function will attempt to scan through blockSize bytes represented by the sequences
120266 + * in inSeqs, storing any (partial) sequences.
120268 + * Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to
120269 + * avoid splitting a match, or to avoid splitting a match such that it would produce a match
120270 + * smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block.
120271 + */
120272 +static size_t ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
120273 +                                                       const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
120274 +                                                       const void* src, size_t blockSize) {
120275 +    U32 idx = seqPos->idx;
120276 +    U32 startPosInSequence = seqPos->posInSequence;
120277 +    U32 endPosInSequence = seqPos->posInSequence + (U32)blockSize;
120278 +    size_t dictSize;
120279 +    BYTE const* ip = (BYTE const*)(src);
120280 +    BYTE const* iend = ip + blockSize;  /* May be adjusted if we decide to process fewer than blockSize bytes */
120281 +    repcodes_t updatedRepcodes;
120282 +    U32 bytesAdjustment = 0;
120283 +    U32 finalMatchSplit = 0;
120284 +    U32 litLength;
120285 +    U32 matchLength;
120286 +    U32 rawOffset;
120287 +    U32 offCode;
120289 +    if (cctx->cdict) {
120290 +        dictSize = cctx->cdict->dictContentSize;
120291 +    } else if (cctx->prefixDict.dict) {
120292 +        dictSize = cctx->prefixDict.dictSize;
120293 +    } else {
120294 +        dictSize = 0;
120295 +    }
120296 +    DEBUGLOG(5, "ZSTD_copySequencesToSeqStore: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize);
120297 +    DEBUGLOG(5, "Start seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
120298 +    ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
120299 +    while (endPosInSequence && idx < inSeqsSize && !finalMatchSplit) {
120300 +        const ZSTD_Sequence currSeq = inSeqs[idx];
120301 +        litLength = currSeq.litLength;
120302 +        matchLength = currSeq.matchLength;
120303 +        rawOffset = currSeq.offset;
120305 +        /* Modify the sequence depending on where endPosInSequence lies */
120306 +        if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) {
120307 +            if (startPosInSequence >= litLength) {
120308 +                startPosInSequence -= litLength;
120309 +                litLength = 0;
120310 +                matchLength -= startPosInSequence;
120311 +            } else {
120312 +                litLength -= startPosInSequence;
120313 +            }
120314 +            /* Move to the next sequence */
120315 +            endPosInSequence -= currSeq.litLength + currSeq.matchLength;
120316 +            startPosInSequence = 0;
120317 +            idx++;
120318 +        } else {
120319 +            /* This is the final (partial) sequence we're adding from inSeqs, and endPosInSequence
120320 +               does not reach the end of the match. So, we have to split the sequence */
120321 +            DEBUGLOG(6, "Require a split: diff: %u, idx: %u PIS: %u",
120322 +                     currSeq.litLength + currSeq.matchLength - endPosInSequence, idx, endPosInSequence);
120323 +            if (endPosInSequence > litLength) {
120324 +                U32 firstHalfMatchLength;
120325 +                litLength = startPosInSequence >= litLength ? 0 : litLength - startPosInSequence;
120326 +                firstHalfMatchLength = endPosInSequence - startPosInSequence - litLength;
120327 +                if (matchLength > blockSize && firstHalfMatchLength >= cctx->appliedParams.cParams.minMatch) {
120328 +                    /* Only ever split the match if it is larger than the block size */
120329 +                    U32 secondHalfMatchLength = currSeq.matchLength + currSeq.litLength - endPosInSequence;
120330 +                    if (secondHalfMatchLength < cctx->appliedParams.cParams.minMatch) {
120331 +                        /* Move the endPosInSequence backward so that it creates match of minMatch length */
120332 +                        endPosInSequence -= cctx->appliedParams.cParams.minMatch - secondHalfMatchLength;
120333 +                        bytesAdjustment = cctx->appliedParams.cParams.minMatch - secondHalfMatchLength;
120334 +                        firstHalfMatchLength -= bytesAdjustment;
120335 +                    }
120336 +                    matchLength = firstHalfMatchLength;
120337 +                    /* Flag that we split the last match - after storing the sequence, exit the loop,
120338 +                       but keep the value of endPosInSequence */
120339 +                    finalMatchSplit = 1;
120340 +                } else {
120341 +                    /* Move the position in sequence backwards so that we don't split match, and break to store
120342 +                     * the last literals. We use the original currSeq.litLength as a marker for where endPosInSequence
120343 +                     * should go. We prefer to do this whenever it is not necessary to split the match, or if doing so
120344 +                     * would cause the first half of the match to be too small
120345 +                     */
120346 +                    bytesAdjustment = endPosInSequence - currSeq.litLength;
120347 +                    endPosInSequence = currSeq.litLength;
120348 +                    break;
120349 +                }
120350 +            } else {
120351 +                /* This sequence ends inside the literals, break to store the last literals */
120352 +                break;
120353 +            }
120354 +        }
120355 +        /* Check if this offset can be represented with a repcode */
120356 +        {   U32 ll0 = (litLength == 0);
120357 +            offCode = ZSTD_finalizeOffCode(rawOffset, updatedRepcodes.rep, ll0);
120358 +            updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
120359 +        }
120361 +        if (cctx->appliedParams.validateSequences) {
120362 +            seqPos->posInSrc += litLength + matchLength;
120363 +            FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
120364 +                                                   cctx->appliedParams.cParams.windowLog, dictSize,
120365 +                                                   cctx->appliedParams.cParams.minMatch),
120366 +                                                   "Sequence validation failed");
120367 +        }
120368 +        DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
120369 +        RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
120370 +                        "Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
120371 +        ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength - MINMATCH);
120372 +        ip += matchLength + litLength;
120373 +    }
120374 +    DEBUGLOG(5, "Ending seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
120375 +    assert(idx == inSeqsSize || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength);
120376 +    seqPos->idx = idx;
120377 +    seqPos->posInSequence = endPosInSequence;
120378 +    ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
120380 +    iend -= bytesAdjustment;
120381 +    if (ip != iend) {
120382 +        /* Store any last literals */
120383 +        U32 lastLLSize = (U32)(iend - ip);
120384 +        assert(ip <= iend);
120385 +        DEBUGLOG(6, "Storing last literals of size: %u", lastLLSize);
120386 +        ZSTD_storeLastLiterals(&cctx->seqStore, ip, lastLLSize);
120387 +        seqPos->posInSrc += lastLLSize;
120388 +    }
120390 +    return bytesAdjustment;
120393 +typedef size_t (*ZSTD_sequenceCopier) (ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
120394 +                                       const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
120395 +                                       const void* src, size_t blockSize);
120396 +static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode) {
120397 +    ZSTD_sequenceCopier sequenceCopier = NULL;
120398 +    assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, mode));
120399 +    if (mode == ZSTD_sf_explicitBlockDelimiters) {
120400 +        return ZSTD_copySequencesToSeqStoreExplicitBlockDelim;
120401 +    } else if (mode == ZSTD_sf_noBlockDelimiters) {
120402 +        return ZSTD_copySequencesToSeqStoreNoBlockDelim;
120403 +    }
120404 +    assert(sequenceCopier != NULL);
120405 +    return sequenceCopier;
120408 +/* Compress, block-by-block, all of the sequences given.
120410 + * Returns the cumulative size of all compressed blocks (including their headers), otherwise a ZSTD error.
120411 + */
120412 +static size_t ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
120413 +                                              void* dst, size_t dstCapacity,
120414 +                                              const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
120415 +                                              const void* src, size_t srcSize) {
120416 +    size_t cSize = 0;
120417 +    U32 lastBlock;
120418 +    size_t blockSize;
120419 +    size_t compressedSeqsSize;
120420 +    size_t remaining = srcSize;
120421 +    ZSTD_sequencePosition seqPos = {0, 0, 0};
120423 +    BYTE const* ip = (BYTE const*)src;
120424 +    BYTE* op = (BYTE*)dst;
120425 +    ZSTD_sequenceCopier sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters);
120427 +    DEBUGLOG(4, "ZSTD_compressSequences_internal srcSize: %zu, inSeqsSize: %zu", srcSize, inSeqsSize);
120428 +    /* Special case: empty frame */
120429 +    if (remaining == 0) {
120430 +        U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1);
120431 +        RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "No room for empty frame block header");
120432 +        MEM_writeLE32(op, cBlockHeader24);
120433 +        op += ZSTD_blockHeaderSize;
120434 +        dstCapacity -= ZSTD_blockHeaderSize;
120435 +        cSize += ZSTD_blockHeaderSize;
120436 +    }
120438 +    while (remaining) {
120439 +        size_t cBlockSize;
120440 +        size_t additionalByteAdjustment;
120441 +        lastBlock = remaining <= cctx->blockSize;
120442 +        blockSize = lastBlock ? (U32)remaining : (U32)cctx->blockSize;
120443 +        ZSTD_resetSeqStore(&cctx->seqStore);
120444 +        DEBUGLOG(4, "Working on new block. Blocksize: %zu", blockSize);
120446 +        additionalByteAdjustment = sequenceCopier(cctx, &seqPos, inSeqs, inSeqsSize, ip, blockSize);
120447 +        FORWARD_IF_ERROR(additionalByteAdjustment, "Bad sequence copy");
120448 +        blockSize -= additionalByteAdjustment;
120450 +        /* If blocks are too small, emit as a nocompress block */
120451 +        if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
120452 +            cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
120453 +            FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
120454 +            DEBUGLOG(4, "Block too small, writing out nocompress block: cSize: %zu", cBlockSize);
120455 +            cSize += cBlockSize;
120456 +            ip += blockSize;
120457 +            op += cBlockSize;
120458 +            remaining -= blockSize;
120459 +            dstCapacity -= cBlockSize;
120460 +            continue;
120461 +        }
120463 +        compressedSeqsSize = ZSTD_entropyCompressSequences(&cctx->seqStore,
120464 +                                &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy,
120465 +                                &cctx->appliedParams,
120466 +                                op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize,
120467 +                                blockSize,
120468 +                                cctx->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
120469 +                                cctx->bmi2);
120470 +        FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed");
120471 +        DEBUGLOG(4, "Compressed sequences size: %zu", compressedSeqsSize);
120473 +        if (!cctx->isFirstBlock &&
120474 +            ZSTD_maybeRLE(&cctx->seqStore) &&
120475 +            ZSTD_isRLE((BYTE const*)src, srcSize)) {
120476 +            /* We don't want to emit our first block as a RLE even if it qualifies because
120477 +            * doing so will cause the decoder (cli only) to throw a "should consume all input error."
120478 +            * This is only an issue for zstd <= v1.4.3
120479 +            */
120480 +            compressedSeqsSize = 1;
120481 +        }
120483 +        if (compressedSeqsSize == 0) {
120484 +            /* ZSTD_noCompressBlock writes the block header as well */
120485 +            cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
120486 +            FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
120487 +            DEBUGLOG(4, "Writing out nocompress block, size: %zu", cBlockSize);
120488 +        } else if (compressedSeqsSize == 1) {
120489 +            cBlockSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, blockSize, lastBlock);
120490 +            FORWARD_IF_ERROR(cBlockSize, "RLE compress block failed");
120491 +            DEBUGLOG(4, "Writing out RLE block, size: %zu", cBlockSize);
120492 +        } else {
120493 +            U32 cBlockHeader;
120494 +            /* Error checking and repcodes update */
120495 +            ZSTD_confirmRepcodesAndEntropyTables(cctx);
120496 +            if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
120497 +                cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
120499 +            /* Write block header into beginning of block*/
120500 +            cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3);
120501 +            MEM_writeLE24(op, cBlockHeader);
120502 +            cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize;
120503 +            DEBUGLOG(4, "Writing out compressed block, size: %zu", cBlockSize);
120504 +        }
120506 +        cSize += cBlockSize;
120507 +        DEBUGLOG(4, "cSize running total: %zu", cSize);
120509 +        if (lastBlock) {
120510 +            break;
120511 +        } else {
120512 +            ip += blockSize;
120513 +            op += cBlockSize;
120514 +            remaining -= blockSize;
120515 +            dstCapacity -= cBlockSize;
120516 +            cctx->isFirstBlock = 0;
120517 +        }
120518 +    }
120520 +    return cSize;
120523 +size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapacity,
120524 +                              const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
120525 +                              const void* src, size_t srcSize) {
120526 +    BYTE* op = (BYTE*)dst;
120527 +    size_t cSize = 0;
120528 +    size_t compressedBlocksSize = 0;
120529 +    size_t frameHeaderSize = 0;
120531 +    /* Transparent initialization stage, same as compressStream2() */
120532 +    DEBUGLOG(3, "ZSTD_compressSequences()");
120533 +    assert(cctx != NULL);
120534 +    FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, srcSize), "CCtx initialization failed");
120535 +    /* Begin writing output, starting with frame header */
120536 +    frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, &cctx->appliedParams, srcSize, cctx->dictID);
120537 +    op += frameHeaderSize;
120538 +    dstCapacity -= frameHeaderSize;
120539 +    cSize += frameHeaderSize;
120540 +    if (cctx->appliedParams.fParams.checksumFlag && srcSize) {
120541 +        xxh64_update(&cctx->xxhState, src, srcSize);
120542 +    }
120543 +    /* cSize includes block header size and compressed sequences size */
120544 +    compressedBlocksSize = ZSTD_compressSequences_internal(cctx,
120545 +                                                           op, dstCapacity,
120546 +                                                           inSeqs, inSeqsSize,
120547 +                                                           src, srcSize);
120548 +    FORWARD_IF_ERROR(compressedBlocksSize, "Compressing blocks failed!");
120549 +    cSize += compressedBlocksSize;
120550 +    dstCapacity -= compressedBlocksSize;
120552 +    if (cctx->appliedParams.fParams.checksumFlag) {
120553 +        U32 const checksum = (U32) xxh64_digest(&cctx->xxhState);
120554 +        RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
120555 +        DEBUGLOG(4, "Write checksum : %08X", (unsigned)checksum);
120556 +        MEM_writeLE32((char*)dst + cSize, checksum);
120557 +        cSize += 4;
120558 +    }
120560 +    DEBUGLOG(3, "Final compressed size: %zu", cSize);
120561 +    return cSize;
120564 +/*======   Finalize   ======*/
120566 +/*! ZSTD_flushStream() :
120567 + * @return : amount of data remaining to flush */
120568 +size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
120570 +    ZSTD_inBuffer input = { NULL, 0, 0 };
120571 +    return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush);
120575 +size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
120577 +    ZSTD_inBuffer input = { NULL, 0, 0 };
120578 +    size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end);
120579 +    FORWARD_IF_ERROR( remainingToFlush , "ZSTD_compressStream2 failed");
120580 +    if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush;   /* minimal estimation */
120581 +    /* single thread mode : attempt to calculate remaining to flush more precisely */
120582 +    {   size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
120583 +        size_t const checksumSize = (size_t)(zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4);
120584 +        size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize;
120585 +        DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush);
120586 +        return toFlush;
120587 +    }
120591 +/*-=====  Pre-defined compression levels  =====-*/
120593 +#define ZSTD_MAX_CLEVEL     22
120594 +int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
120595 +int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; }
120597 +static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
120598 +{   /* "default" - for any srcSize > 256 KB */
120599 +    /* W,  C,  H,  S,  L, TL, strat */
120600 +    { 19, 12, 13,  1,  6,  1, ZSTD_fast    },  /* base for negative levels */
120601 +    { 19, 13, 14,  1,  7,  0, ZSTD_fast    },  /* level  1 */
120602 +    { 20, 15, 16,  1,  6,  0, ZSTD_fast    },  /* level  2 */
120603 +    { 21, 16, 17,  1,  5,  0, ZSTD_dfast   },  /* level  3 */
120604 +    { 21, 18, 18,  1,  5,  0, ZSTD_dfast   },  /* level  4 */
120605 +    { 21, 18, 19,  2,  5,  2, ZSTD_greedy  },  /* level  5 */
120606 +    { 21, 19, 19,  3,  5,  4, ZSTD_greedy  },  /* level  6 */
120607 +    { 21, 19, 19,  3,  5,  8, ZSTD_lazy    },  /* level  7 */
120608 +    { 21, 19, 19,  3,  5, 16, ZSTD_lazy2   },  /* level  8 */
120609 +    { 21, 19, 20,  4,  5, 16, ZSTD_lazy2   },  /* level  9 */
120610 +    { 22, 20, 21,  4,  5, 16, ZSTD_lazy2   },  /* level 10 */
120611 +    { 22, 21, 22,  4,  5, 16, ZSTD_lazy2   },  /* level 11 */
120612 +    { 22, 21, 22,  5,  5, 16, ZSTD_lazy2   },  /* level 12 */
120613 +    { 22, 21, 22,  5,  5, 32, ZSTD_btlazy2 },  /* level 13 */
120614 +    { 22, 22, 23,  5,  5, 32, ZSTD_btlazy2 },  /* level 14 */
120615 +    { 22, 23, 23,  6,  5, 32, ZSTD_btlazy2 },  /* level 15 */
120616 +    { 22, 22, 22,  5,  5, 48, ZSTD_btopt   },  /* level 16 */
120617 +    { 23, 23, 22,  5,  4, 64, ZSTD_btopt   },  /* level 17 */
120618 +    { 23, 23, 22,  6,  3, 64, ZSTD_btultra },  /* level 18 */
120619 +    { 23, 24, 22,  7,  3,256, ZSTD_btultra2},  /* level 19 */
120620 +    { 25, 25, 23,  7,  3,256, ZSTD_btultra2},  /* level 20 */
120621 +    { 26, 26, 24,  7,  3,512, ZSTD_btultra2},  /* level 21 */
120622 +    { 27, 27, 25,  9,  3,999, ZSTD_btultra2},  /* level 22 */
120624 +{   /* for srcSize <= 256 KB */
120625 +    /* W,  C,  H,  S,  L,  T, strat */
120626 +    { 18, 12, 13,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
120627 +    { 18, 13, 14,  1,  6,  0, ZSTD_fast    },  /* level  1 */
120628 +    { 18, 14, 14,  1,  5,  0, ZSTD_dfast   },  /* level  2 */
120629 +    { 18, 16, 16,  1,  4,  0, ZSTD_dfast   },  /* level  3 */
120630 +    { 18, 16, 17,  2,  5,  2, ZSTD_greedy  },  /* level  4.*/
120631 +    { 18, 18, 18,  3,  5,  2, ZSTD_greedy  },  /* level  5.*/
120632 +    { 18, 18, 19,  3,  5,  4, ZSTD_lazy    },  /* level  6.*/
120633 +    { 18, 18, 19,  4,  4,  4, ZSTD_lazy    },  /* level  7 */
120634 +    { 18, 18, 19,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
120635 +    { 18, 18, 19,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
120636 +    { 18, 18, 19,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
120637 +    { 18, 18, 19,  5,  4, 12, ZSTD_btlazy2 },  /* level 11.*/
120638 +    { 18, 19, 19,  7,  4, 12, ZSTD_btlazy2 },  /* level 12.*/
120639 +    { 18, 18, 19,  4,  4, 16, ZSTD_btopt   },  /* level 13 */
120640 +    { 18, 18, 19,  4,  3, 32, ZSTD_btopt   },  /* level 14.*/
120641 +    { 18, 18, 19,  6,  3,128, ZSTD_btopt   },  /* level 15.*/
120642 +    { 18, 19, 19,  6,  3,128, ZSTD_btultra },  /* level 16.*/
120643 +    { 18, 19, 19,  8,  3,256, ZSTD_btultra },  /* level 17.*/
120644 +    { 18, 19, 19,  6,  3,128, ZSTD_btultra2},  /* level 18.*/
120645 +    { 18, 19, 19,  8,  3,256, ZSTD_btultra2},  /* level 19.*/
120646 +    { 18, 19, 19, 10,  3,512, ZSTD_btultra2},  /* level 20.*/
120647 +    { 18, 19, 19, 12,  3,512, ZSTD_btultra2},  /* level 21.*/
120648 +    { 18, 19, 19, 13,  3,999, ZSTD_btultra2},  /* level 22.*/
120650 +{   /* for srcSize <= 128 KB */
120651 +    /* W,  C,  H,  S,  L,  T, strat */
120652 +    { 17, 12, 12,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
120653 +    { 17, 12, 13,  1,  6,  0, ZSTD_fast    },  /* level  1 */
120654 +    { 17, 13, 15,  1,  5,  0, ZSTD_fast    },  /* level  2 */
120655 +    { 17, 15, 16,  2,  5,  0, ZSTD_dfast   },  /* level  3 */
120656 +    { 17, 17, 17,  2,  4,  0, ZSTD_dfast   },  /* level  4 */
120657 +    { 17, 16, 17,  3,  4,  2, ZSTD_greedy  },  /* level  5 */
120658 +    { 17, 17, 17,  3,  4,  4, ZSTD_lazy    },  /* level  6 */
120659 +    { 17, 17, 17,  3,  4,  8, ZSTD_lazy2   },  /* level  7 */
120660 +    { 17, 17, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
120661 +    { 17, 17, 17,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
120662 +    { 17, 17, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
120663 +    { 17, 17, 17,  5,  4,  8, ZSTD_btlazy2 },  /* level 11 */
120664 +    { 17, 18, 17,  7,  4, 12, ZSTD_btlazy2 },  /* level 12 */
120665 +    { 17, 18, 17,  3,  4, 12, ZSTD_btopt   },  /* level 13.*/
120666 +    { 17, 18, 17,  4,  3, 32, ZSTD_btopt   },  /* level 14.*/
120667 +    { 17, 18, 17,  6,  3,256, ZSTD_btopt   },  /* level 15.*/
120668 +    { 17, 18, 17,  6,  3,128, ZSTD_btultra },  /* level 16.*/
120669 +    { 17, 18, 17,  8,  3,256, ZSTD_btultra },  /* level 17.*/
120670 +    { 17, 18, 17, 10,  3,512, ZSTD_btultra },  /* level 18.*/
120671 +    { 17, 18, 17,  5,  3,256, ZSTD_btultra2},  /* level 19.*/
120672 +    { 17, 18, 17,  7,  3,512, ZSTD_btultra2},  /* level 20.*/
120673 +    { 17, 18, 17,  9,  3,512, ZSTD_btultra2},  /* level 21.*/
120674 +    { 17, 18, 17, 11,  3,999, ZSTD_btultra2},  /* level 22.*/
120676 +{   /* for srcSize <= 16 KB */
120677 +    /* W,  C,  H,  S,  L,  T, strat */
120678 +    { 14, 12, 13,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
120679 +    { 14, 14, 15,  1,  5,  0, ZSTD_fast    },  /* level  1 */
120680 +    { 14, 14, 15,  1,  4,  0, ZSTD_fast    },  /* level  2 */
120681 +    { 14, 14, 15,  2,  4,  0, ZSTD_dfast   },  /* level  3 */
120682 +    { 14, 14, 14,  4,  4,  2, ZSTD_greedy  },  /* level  4 */
120683 +    { 14, 14, 14,  3,  4,  4, ZSTD_lazy    },  /* level  5.*/
120684 +    { 14, 14, 14,  4,  4,  8, ZSTD_lazy2   },  /* level  6 */
120685 +    { 14, 14, 14,  6,  4,  8, ZSTD_lazy2   },  /* level  7 */
120686 +    { 14, 14, 14,  8,  4,  8, ZSTD_lazy2   },  /* level  8.*/
120687 +    { 14, 15, 14,  5,  4,  8, ZSTD_btlazy2 },  /* level  9.*/
120688 +    { 14, 15, 14,  9,  4,  8, ZSTD_btlazy2 },  /* level 10.*/
120689 +    { 14, 15, 14,  3,  4, 12, ZSTD_btopt   },  /* level 11.*/
120690 +    { 14, 15, 14,  4,  3, 24, ZSTD_btopt   },  /* level 12.*/
120691 +    { 14, 15, 14,  5,  3, 32, ZSTD_btultra },  /* level 13.*/
120692 +    { 14, 15, 15,  6,  3, 64, ZSTD_btultra },  /* level 14.*/
120693 +    { 14, 15, 15,  7,  3,256, ZSTD_btultra },  /* level 15.*/
120694 +    { 14, 15, 15,  5,  3, 48, ZSTD_btultra2},  /* level 16.*/
120695 +    { 14, 15, 15,  6,  3,128, ZSTD_btultra2},  /* level 17.*/
120696 +    { 14, 15, 15,  7,  3,256, ZSTD_btultra2},  /* level 18.*/
120697 +    { 14, 15, 15,  8,  3,256, ZSTD_btultra2},  /* level 19.*/
120698 +    { 14, 15, 15,  8,  3,512, ZSTD_btultra2},  /* level 20.*/
120699 +    { 14, 15, 15,  9,  3,512, ZSTD_btultra2},  /* level 21.*/
120700 +    { 14, 15, 15, 10,  3,999, ZSTD_btultra2},  /* level 22.*/
120704 +static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(int const compressionLevel, size_t const dictSize)
120706 +    ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, 0, dictSize, ZSTD_cpm_createCDict);
120707 +    switch (cParams.strategy) {
120708 +        case ZSTD_fast:
120709 +        case ZSTD_dfast:
120710 +            break;
120711 +        case ZSTD_greedy:
120712 +        case ZSTD_lazy:
120713 +        case ZSTD_lazy2:
120714 +            cParams.hashLog += ZSTD_LAZY_DDSS_BUCKET_LOG;
120715 +            break;
120716 +        case ZSTD_btlazy2:
120717 +        case ZSTD_btopt:
120718 +        case ZSTD_btultra:
120719 +        case ZSTD_btultra2:
120720 +            break;
120721 +    }
120722 +    return cParams;
120725 +static int ZSTD_dedicatedDictSearch_isSupported(
120726 +        ZSTD_compressionParameters const* cParams)
120728 +    return (cParams->strategy >= ZSTD_greedy)
120729 +        && (cParams->strategy <= ZSTD_lazy2)
120730 +        && (cParams->hashLog >= cParams->chainLog)
120731 +        && (cParams->chainLog <= 24);
120735 + * Reverses the adjustment applied to cparams when enabling dedicated dict
120736 + * search. This is used to recover the params set to be used in the working
120737 + * context. (Otherwise, those tables would also grow.)
120738 + */
120739 +static void ZSTD_dedicatedDictSearch_revertCParams(
120740 +        ZSTD_compressionParameters* cParams) {
120741 +    switch (cParams->strategy) {
120742 +        case ZSTD_fast:
120743 +        case ZSTD_dfast:
120744 +            break;
120745 +        case ZSTD_greedy:
120746 +        case ZSTD_lazy:
120747 +        case ZSTD_lazy2:
120748 +            cParams->hashLog -= ZSTD_LAZY_DDSS_BUCKET_LOG;
120749 +            break;
120750 +        case ZSTD_btlazy2:
120751 +        case ZSTD_btopt:
120752 +        case ZSTD_btultra:
120753 +        case ZSTD_btultra2:
120754 +            break;
120755 +    }
120758 +static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
120760 +    switch (mode) {
120761 +    case ZSTD_cpm_unknown:
120762 +    case ZSTD_cpm_noAttachDict:
120763 +    case ZSTD_cpm_createCDict:
120764 +        break;
120765 +    case ZSTD_cpm_attachDict:
120766 +        dictSize = 0;
120767 +        break;
120768 +    default:
120769 +        assert(0);
120770 +        break;
120771 +    }
120772 +    {   int const unknown = srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN;
120773 +        size_t const addedSize = unknown && dictSize > 0 ? 500 : 0;
120774 +        return unknown && dictSize == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : srcSizeHint+dictSize+addedSize;
120775 +    }
120778 +/*! ZSTD_getCParams_internal() :
120779 + * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
120780 + *  Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown.
120781 + *        Use dictSize == 0 for unknown or unused.
120782 + *  Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_cParamMode_e`. */
120783 +static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
120785 +    U64 const rSize = ZSTD_getCParamRowSize(srcSizeHint, dictSize, mode);
120786 +    U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);
120787 +    int row;
120788 +    DEBUGLOG(5, "ZSTD_getCParams_internal (cLevel=%i)", compressionLevel);
120790 +    /* row */
120791 +    if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT;   /* 0 == default */
120792 +    else if (compressionLevel < 0) row = 0;   /* entry 0 is baseline for fast mode */
120793 +    else if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL;
120794 +    else row = compressionLevel;
120796 +    {   ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];
120797 +        /* acceleration factor */
120798 +        if (compressionLevel < 0) {
120799 +            int const clampedCompressionLevel = MAX(ZSTD_minCLevel(), compressionLevel);
120800 +            cp.targetLength = (unsigned)(-clampedCompressionLevel);
120801 +        }
120802 +        /* refine parameters based on srcSize & dictSize */
120803 +        return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode);
120804 +    }
120807 +/*! ZSTD_getCParams() :
120808 + * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
120809 + *  Size values are optional, provide 0 if not known or unused */
120810 +ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
120812 +    if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
120813 +    return ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
120816 +/*! ZSTD_getParams() :
120817 + *  same idea as ZSTD_getCParams()
120818 + * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
120819 + *  Fields of `ZSTD_frameParameters` are set to default values */
120820 +static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) {
120821 +    ZSTD_parameters params;
120822 +    ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, mode);
120823 +    DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel);
120824 +    ZSTD_memset(&params, 0, sizeof(params));
120825 +    params.cParams = cParams;
120826 +    params.fParams.contentSizeFlag = 1;
120827 +    return params;
120830 +/*! ZSTD_getParams() :
120831 + *  same idea as ZSTD_getCParams()
120832 + * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
120833 + *  Fields of `ZSTD_frameParameters` are set to default values */
120834 +ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
120835 +    if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
120836 +    return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
120838 diff --git a/lib/zstd/compress/zstd_compress_internal.h b/lib/zstd/compress/zstd_compress_internal.h
120839 new file mode 100644
120840 index 000000000000..b56c482322ba
120841 --- /dev/null
120842 +++ b/lib/zstd/compress/zstd_compress_internal.h
120843 @@ -0,0 +1,1188 @@
120845 + * Copyright (c) Yann Collet, Facebook, Inc.
120846 + * All rights reserved.
120848 + * This source code is licensed under both the BSD-style license (found in the
120849 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
120850 + * in the COPYING file in the root directory of this source tree).
120851 + * You may select, at your option, one of the above-listed licenses.
120852 + */
120854 +/* This header contains definitions
120855 + * that shall **only** be used by modules within lib/compress.
120856 + */
120858 +#ifndef ZSTD_COMPRESS_H
120859 +#define ZSTD_COMPRESS_H
120861 +/*-*************************************
120862 +*  Dependencies
120863 +***************************************/
120864 +#include "../common/zstd_internal.h"
120865 +#include "zstd_cwksp.h"
120868 +/*-*************************************
120869 +*  Constants
120870 +***************************************/
120871 +#define kSearchStrength      8
120872 +#define HASH_READ_SIZE       8
120873 +#define ZSTD_DUBT_UNSORTED_MARK 1   /* For btlazy2 strategy, index ZSTD_DUBT_UNSORTED_MARK==1 means "unsorted".
120874 +                                       It could be confused for a real successor at index "1", if sorted as larger than its predecessor.
120875 +                                       It's not a big deal though : candidate will just be sorted again.
120876 +                                       Additionally, candidate position 1 will be lost.
120877 +                                       But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.
120878 +                                       The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table re-use with a different strategy.
120879 +                                       This constant is required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */
120882 +/*-*************************************
120883 +*  Context memory management
120884 +***************************************/
120885 +typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
120886 +typedef enum { zcss_init=0, zcss_load, zcss_flush } ZSTD_cStreamStage;
120888 +typedef struct ZSTD_prefixDict_s {
120889 +    const void* dict;
120890 +    size_t dictSize;
120891 +    ZSTD_dictContentType_e dictContentType;
120892 +} ZSTD_prefixDict;
120894 +typedef struct {
120895 +    void* dictBuffer;
120896 +    void const* dict;
120897 +    size_t dictSize;
120898 +    ZSTD_dictContentType_e dictContentType;
120899 +    ZSTD_CDict* cdict;
120900 +} ZSTD_localDict;
120902 +typedef struct {
120903 +    HUF_CElt CTable[HUF_CTABLE_SIZE_U32(255)];
120904 +    HUF_repeat repeatMode;
120905 +} ZSTD_hufCTables_t;
120907 +typedef struct {
120908 +    FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
120909 +    FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
120910 +    FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
120911 +    FSE_repeat offcode_repeatMode;
120912 +    FSE_repeat matchlength_repeatMode;
120913 +    FSE_repeat litlength_repeatMode;
120914 +} ZSTD_fseCTables_t;
120916 +typedef struct {
120917 +    ZSTD_hufCTables_t huf;
120918 +    ZSTD_fseCTables_t fse;
120919 +} ZSTD_entropyCTables_t;
120921 +typedef struct {
120922 +    U32 off;            /* Offset code (offset + ZSTD_REP_MOVE) for the match */
120923 +    U32 len;            /* Raw length of match */
120924 +} ZSTD_match_t;
120926 +typedef struct {
120927 +    U32 offset;         /* Offset of sequence */
120928 +    U32 litLength;      /* Length of literals prior to match */
120929 +    U32 matchLength;    /* Raw length of match */
120930 +} rawSeq;
120932 +typedef struct {
120933 +  rawSeq* seq;          /* The start of the sequences */
120934 +  size_t pos;           /* The index in seq where reading stopped. pos <= size. */
120935 +  size_t posInSequence; /* The position within the sequence at seq[pos] where reading
120936 +                           stopped. posInSequence <= seq[pos].litLength + seq[pos].matchLength */
120937 +  size_t size;          /* The number of sequences. <= capacity. */
120938 +  size_t capacity;      /* The capacity starting from `seq` pointer */
120939 +} rawSeqStore_t;
120941 +UNUSED_ATTR static const rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0, 0};
120943 +typedef struct {
120944 +    int price;
120945 +    U32 off;
120946 +    U32 mlen;
120947 +    U32 litlen;
120948 +    U32 rep[ZSTD_REP_NUM];
120949 +} ZSTD_optimal_t;
120951 +typedef enum { zop_dynamic=0, zop_predef } ZSTD_OptPrice_e;
120953 +typedef struct {
120954 +    /* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */
120955 +    unsigned* litFreq;           /* table of literals statistics, of size 256 */
120956 +    unsigned* litLengthFreq;     /* table of litLength statistics, of size (MaxLL+1) */
120957 +    unsigned* matchLengthFreq;   /* table of matchLength statistics, of size (MaxML+1) */
120958 +    unsigned* offCodeFreq;       /* table of offCode statistics, of size (MaxOff+1) */
120959 +    ZSTD_match_t* matchTable;    /* list of found matches, of size ZSTD_OPT_NUM+1 */
120960 +    ZSTD_optimal_t* priceTable;  /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */
120962 +    U32  litSum;                 /* nb of literals */
120963 +    U32  litLengthSum;           /* nb of litLength codes */
120964 +    U32  matchLengthSum;         /* nb of matchLength codes */
120965 +    U32  offCodeSum;             /* nb of offset codes */
120966 +    U32  litSumBasePrice;        /* to compare to log2(litfreq) */
120967 +    U32  litLengthSumBasePrice;  /* to compare to log2(llfreq)  */
120968 +    U32  matchLengthSumBasePrice;/* to compare to log2(mlfreq)  */
120969 +    U32  offCodeSumBasePrice;    /* to compare to log2(offreq)  */
120970 +    ZSTD_OptPrice_e priceType;   /* prices can be determined dynamically, or follow a pre-defined cost structure */
120971 +    const ZSTD_entropyCTables_t* symbolCosts;  /* pre-calculated dictionary statistics */
120972 +    ZSTD_literalCompressionMode_e literalCompressionMode;
120973 +} optState_t;
120975 +typedef struct {
120976 +  ZSTD_entropyCTables_t entropy;
120977 +  U32 rep[ZSTD_REP_NUM];
120978 +} ZSTD_compressedBlockState_t;
120980 +typedef struct {
120981 +    BYTE const* nextSrc;    /* next block here to continue on current prefix */
120982 +    BYTE const* base;       /* All regular indexes relative to this position */
120983 +    BYTE const* dictBase;   /* extDict indexes relative to this position */
120984 +    U32 dictLimit;          /* below that point, need extDict */
120985 +    U32 lowLimit;           /* below that point, no more valid data */
120986 +} ZSTD_window_t;
120988 +typedef struct ZSTD_matchState_t ZSTD_matchState_t;
120989 +struct ZSTD_matchState_t {
120990 +    ZSTD_window_t window;   /* State for window round buffer management */
120991 +    U32 loadedDictEnd;      /* index of end of dictionary, within context's referential.
120992 +                             * When loadedDictEnd != 0, a dictionary is in use, and still valid.
120993 +                             * This relies on a mechanism to set loadedDictEnd=0 when dictionary is no longer within distance.
120994 +                             * Such mechanism is provided within ZSTD_window_enforceMaxDist() and ZSTD_checkDictValidity().
120995 +                             * When dict referential is copied into active context (i.e. not attached),
120996 +                             * loadedDictEnd == dictSize, since referential starts from zero.
120997 +                             */
120998 +    U32 nextToUpdate;       /* index from which to continue table update */
120999 +    U32 hashLog3;           /* dispatch table for matches of len==3 : larger == faster, more memory */
121000 +    U32* hashTable;
121001 +    U32* hashTable3;
121002 +    U32* chainTable;
121003 +    int dedicatedDictSearch;  /* Indicates whether this matchState is using the
121004 +                               * dedicated dictionary search structure.
121005 +                               */
121006 +    optState_t opt;         /* optimal parser state */
121007 +    const ZSTD_matchState_t* dictMatchState;
121008 +    ZSTD_compressionParameters cParams;
121009 +    const rawSeqStore_t* ldmSeqStore;
121012 +typedef struct {
121013 +    ZSTD_compressedBlockState_t* prevCBlock;
121014 +    ZSTD_compressedBlockState_t* nextCBlock;
121015 +    ZSTD_matchState_t matchState;
121016 +} ZSTD_blockState_t;
121018 +typedef struct {
121019 +    U32 offset;
121020 +    U32 checksum;
121021 +} ldmEntry_t;
121023 +typedef struct {
121024 +    BYTE const* split;
121025 +    U32 hash;
121026 +    U32 checksum;
121027 +    ldmEntry_t* bucket;
121028 +} ldmMatchCandidate_t;
121030 +#define LDM_BATCH_SIZE 64
121032 +typedef struct {
121033 +    ZSTD_window_t window;   /* State for the window round buffer management */
121034 +    ldmEntry_t* hashTable;
121035 +    U32 loadedDictEnd;
121036 +    BYTE* bucketOffsets;    /* Next position in bucket to insert entry */
121037 +    size_t splitIndices[LDM_BATCH_SIZE];
121038 +    ldmMatchCandidate_t matchCandidates[LDM_BATCH_SIZE];
121039 +} ldmState_t;
121041 +typedef struct {
121042 +    U32 enableLdm;          /* 1 if enable long distance matching */
121043 +    U32 hashLog;            /* Log size of hashTable */
121044 +    U32 bucketSizeLog;      /* Log bucket size for collision resolution, at most 8 */
121045 +    U32 minMatchLength;     /* Minimum match length */
121046 +    U32 hashRateLog;       /* Log number of entries to skip */
121047 +    U32 windowLog;          /* Window log for the LDM */
121048 +} ldmParams_t;
121050 +typedef struct {
121051 +    int collectSequences;
121052 +    ZSTD_Sequence* seqStart;
121053 +    size_t seqIndex;
121054 +    size_t maxSequences;
121055 +} SeqCollector;
121057 +struct ZSTD_CCtx_params_s {
121058 +    ZSTD_format_e format;
121059 +    ZSTD_compressionParameters cParams;
121060 +    ZSTD_frameParameters fParams;
121062 +    int compressionLevel;
121063 +    int forceWindow;           /* force back-references to respect limit of
121064 +                                * 1<<wLog, even for dictionary */
121065 +    size_t targetCBlockSize;   /* Tries to fit compressed block size to be around targetCBlockSize.
121066 +                                * No target when targetCBlockSize == 0.
121067 +                                * There is no guarantee on compressed block size */
121068 +    int srcSizeHint;           /* User's best guess of source size.
121069 +                                * Hint is not valid when srcSizeHint == 0.
121070 +                                * There is no guarantee that hint is close to actual source size */
121072 +    ZSTD_dictAttachPref_e attachDictPref;
121073 +    ZSTD_literalCompressionMode_e literalCompressionMode;
121075 +    /* Multithreading: used to pass parameters to mtctx */
121076 +    int nbWorkers;
121077 +    size_t jobSize;
121078 +    int overlapLog;
121079 +    int rsyncable;
121081 +    /* Long distance matching parameters */
121082 +    ldmParams_t ldmParams;
121084 +    /* Dedicated dict search algorithm trigger */
121085 +    int enableDedicatedDictSearch;
121087 +    /* Input/output buffer modes */
121088 +    ZSTD_bufferMode_e inBufferMode;
121089 +    ZSTD_bufferMode_e outBufferMode;
121091 +    /* Sequence compression API */
121092 +    ZSTD_sequenceFormat_e blockDelimiters;
121093 +    int validateSequences;
121095 +    /* Internal use, for createCCtxParams() and freeCCtxParams() only */
121096 +    ZSTD_customMem customMem;
121097 +};  /* typedef'd to ZSTD_CCtx_params within "zstd.h" */
121099 +#define COMPRESS_SEQUENCES_WORKSPACE_SIZE (sizeof(unsigned) * (MaxSeq + 2))
121100 +#define ENTROPY_WORKSPACE_SIZE (HUF_WORKSPACE_SIZE + COMPRESS_SEQUENCES_WORKSPACE_SIZE)
121103 + * Indicates whether this compression proceeds directly from user-provided
121104 + * source buffer to user-provided destination buffer (ZSTDb_not_buffered), or
121105 + * whether the context needs to buffer the input/output (ZSTDb_buffered).
121106 + */
121107 +typedef enum {
121108 +    ZSTDb_not_buffered,
121109 +    ZSTDb_buffered
121110 +} ZSTD_buffered_policy_e;
121112 +struct ZSTD_CCtx_s {
121113 +    ZSTD_compressionStage_e stage;
121114 +    int cParamsChanged;                  /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */
121115 +    int bmi2;                            /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
121116 +    ZSTD_CCtx_params requestedParams;
121117 +    ZSTD_CCtx_params appliedParams;
121118 +    U32   dictID;
121119 +    size_t dictContentSize;
121121 +    ZSTD_cwksp workspace; /* manages buffer for dynamic allocations */
121122 +    size_t blockSize;
121123 +    unsigned long long pledgedSrcSizePlusOne;  /* this way, 0 (default) == unknown */
121124 +    unsigned long long consumedSrcSize;
121125 +    unsigned long long producedCSize;
121126 +    struct xxh64_state xxhState;
121127 +    ZSTD_customMem customMem;
121128 +    ZSTD_threadPool* pool;
121129 +    size_t staticSize;
121130 +    SeqCollector seqCollector;
121131 +    int isFirstBlock;
121132 +    int initialized;
121134 +    seqStore_t seqStore;      /* sequences storage ptrs */
121135 +    ldmState_t ldmState;      /* long distance matching state */
121136 +    rawSeq* ldmSequences;     /* Storage for the ldm output sequences */
121137 +    size_t maxNbLdmSequences;
121138 +    rawSeqStore_t externSeqStore; /* Mutable reference to external sequences */
121139 +    ZSTD_blockState_t blockState;
121140 +    U32* entropyWorkspace;  /* entropy workspace of ENTROPY_WORKSPACE_SIZE bytes */
121142 +    /* Wether we are streaming or not */
121143 +    ZSTD_buffered_policy_e bufferedPolicy;
121145 +    /* streaming */
121146 +    char*  inBuff;
121147 +    size_t inBuffSize;
121148 +    size_t inToCompress;
121149 +    size_t inBuffPos;
121150 +    size_t inBuffTarget;
121151 +    char*  outBuff;
121152 +    size_t outBuffSize;
121153 +    size_t outBuffContentSize;
121154 +    size_t outBuffFlushedSize;
121155 +    ZSTD_cStreamStage streamStage;
121156 +    U32    frameEnded;
121158 +    /* Stable in/out buffer verification */
121159 +    ZSTD_inBuffer expectedInBuffer;
121160 +    size_t expectedOutBufferSize;
121162 +    /* Dictionary */
121163 +    ZSTD_localDict localDict;
121164 +    const ZSTD_CDict* cdict;
121165 +    ZSTD_prefixDict prefixDict;   /* single-usage dictionary */
121167 +    /* Multi-threading */
121169 +    /* Tracing */
121172 +typedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e;
121174 +typedef enum {
121175 +    ZSTD_noDict = 0,
121176 +    ZSTD_extDict = 1,
121177 +    ZSTD_dictMatchState = 2,
121178 +    ZSTD_dedicatedDictSearch = 3
121179 +} ZSTD_dictMode_e;
121181 +typedef enum {
121182 +    ZSTD_cpm_noAttachDict = 0,  /* Compression with ZSTD_noDict or ZSTD_extDict.
121183 +                                 * In this mode we use both the srcSize and the dictSize
121184 +                                 * when selecting and adjusting parameters.
121185 +                                 */
121186 +    ZSTD_cpm_attachDict = 1,    /* Compression with ZSTD_dictMatchState or ZSTD_dedicatedDictSearch.
121187 +                                 * In this mode we only take the srcSize into account when selecting
121188 +                                 * and adjusting parameters.
121189 +                                 */
121190 +    ZSTD_cpm_createCDict = 2,   /* Creating a CDict.
121191 +                                 * In this mode we take both the source size and the dictionary size
121192 +                                 * into account when selecting and adjusting the parameters.
121193 +                                 */
121194 +    ZSTD_cpm_unknown = 3,       /* ZSTD_getCParams, ZSTD_getParams, ZSTD_adjustParams.
121195 +                                 * We don't know what these parameters are for. We default to the legacy
121196 +                                 * behavior of taking both the source size and the dict size into account
121197 +                                 * when selecting and adjusting parameters.
121198 +                                 */
121199 +} ZSTD_cParamMode_e;
121201 +typedef size_t (*ZSTD_blockCompressor) (
121202 +        ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
121203 +        void const* src, size_t srcSize);
121204 +ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode);
121207 +MEM_STATIC U32 ZSTD_LLcode(U32 litLength)
121209 +    static const BYTE LL_Code[64] = {  0,  1,  2,  3,  4,  5,  6,  7,
121210 +                                       8,  9, 10, 11, 12, 13, 14, 15,
121211 +                                      16, 16, 17, 17, 18, 18, 19, 19,
121212 +                                      20, 20, 20, 20, 21, 21, 21, 21,
121213 +                                      22, 22, 22, 22, 22, 22, 22, 22,
121214 +                                      23, 23, 23, 23, 23, 23, 23, 23,
121215 +                                      24, 24, 24, 24, 24, 24, 24, 24,
121216 +                                      24, 24, 24, 24, 24, 24, 24, 24 };
121217 +    static const U32 LL_deltaCode = 19;
121218 +    return (litLength > 63) ? ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
121221 +/* ZSTD_MLcode() :
121222 + * note : mlBase = matchLength - MINMATCH;
121223 + *        because it's the format it's stored in seqStore->sequences */
121224 +MEM_STATIC U32 ZSTD_MLcode(U32 mlBase)
121226 +    static const BYTE ML_Code[128] = { 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
121227 +                                      16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
121228 +                                      32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,
121229 +                                      38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,
121230 +                                      40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
121231 +                                      41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
121232 +                                      42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
121233 +                                      42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 };
121234 +    static const U32 ML_deltaCode = 36;
121235 +    return (mlBase > 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase];
121238 +typedef struct repcodes_s {
121239 +    U32 rep[3];
121240 +} repcodes_t;
121242 +MEM_STATIC repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0)
121244 +    repcodes_t newReps;
121245 +    if (offset >= ZSTD_REP_NUM) {  /* full offset */
121246 +        newReps.rep[2] = rep[1];
121247 +        newReps.rep[1] = rep[0];
121248 +        newReps.rep[0] = offset - ZSTD_REP_MOVE;
121249 +    } else {   /* repcode */
121250 +        U32 const repCode = offset + ll0;
121251 +        if (repCode > 0) {  /* note : if repCode==0, no change */
121252 +            U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
121253 +            newReps.rep[2] = (repCode >= 2) ? rep[1] : rep[2];
121254 +            newReps.rep[1] = rep[0];
121255 +            newReps.rep[0] = currentOffset;
121256 +        } else {   /* repCode == 0 */
121257 +            ZSTD_memcpy(&newReps, rep, sizeof(newReps));
121258 +        }
121259 +    }
121260 +    return newReps;
121263 +/* ZSTD_cParam_withinBounds:
121264 + * @return 1 if value is within cParam bounds,
121265 + * 0 otherwise */
121266 +MEM_STATIC int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value)
121268 +    ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
121269 +    if (ZSTD_isError(bounds.error)) return 0;
121270 +    if (value < bounds.lowerBound) return 0;
121271 +    if (value > bounds.upperBound) return 0;
121272 +    return 1;
121275 +/* ZSTD_noCompressBlock() :
121276 + * Writes uncompressed block to dst buffer from given src.
121277 + * Returns the size of the block */
121278 +MEM_STATIC size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock)
121280 +    U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3);
121281 +    RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity,
121282 +                    dstSize_tooSmall, "dst buf too small for uncompressed block");
121283 +    MEM_writeLE24(dst, cBlockHeader24);
121284 +    ZSTD_memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
121285 +    return ZSTD_blockHeaderSize + srcSize;
121288 +MEM_STATIC size_t ZSTD_rleCompressBlock (void* dst, size_t dstCapacity, BYTE src, size_t srcSize, U32 lastBlock)
121290 +    BYTE* const op = (BYTE*)dst;
121291 +    U32 const cBlockHeader = lastBlock + (((U32)bt_rle)<<1) + (U32)(srcSize << 3);
121292 +    RETURN_ERROR_IF(dstCapacity < 4, dstSize_tooSmall, "");
121293 +    MEM_writeLE24(op, cBlockHeader);
121294 +    op[3] = src;
121295 +    return 4;
121299 +/* ZSTD_minGain() :
121300 + * minimum compression required
121301 + * to generate a compress block or a compressed literals section.
121302 + * note : use same formula for both situations */
121303 +MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
121305 +    U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6;
121306 +    ZSTD_STATIC_ASSERT(ZSTD_btultra == 8);
121307 +    assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
121308 +    return (srcSize >> minlog) + 2;
121311 +MEM_STATIC int ZSTD_disableLiteralsCompression(const ZSTD_CCtx_params* cctxParams)
121313 +    switch (cctxParams->literalCompressionMode) {
121314 +    case ZSTD_lcm_huffman:
121315 +        return 0;
121316 +    case ZSTD_lcm_uncompressed:
121317 +        return 1;
121318 +    default:
121319 +        assert(0 /* impossible: pre-validated */);
121320 +        /* fall-through */
121321 +    case ZSTD_lcm_auto:
121322 +        return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
121323 +    }
121326 +/*! ZSTD_safecopyLiterals() :
121327 + *  memcpy() function that won't read beyond more than WILDCOPY_OVERLENGTH bytes past ilimit_w.
121328 + *  Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single
121329 + *  large copies.
121330 + */
121331 +static void ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w) {
121332 +    assert(iend > ilimit_w);
121333 +    if (ip <= ilimit_w) {
121334 +        ZSTD_wildcopy(op, ip, ilimit_w - ip, ZSTD_no_overlap);
121335 +        op += ilimit_w - ip;
121336 +        ip = ilimit_w;
121337 +    }
121338 +    while (ip < iend) *op++ = *ip++;
121341 +/*! ZSTD_storeSeq() :
121342 + *  Store a sequence (litlen, litPtr, offCode and mlBase) into seqStore_t.
121343 + *  `offCode` : distance to match + ZSTD_REP_MOVE (values <= ZSTD_REP_MOVE are repCodes).
121344 + *  `mlBase` : matchLength - MINMATCH
121345 + *  Allowed to overread literals up to litLimit.
121347 +HINT_INLINE UNUSED_ATTR
121348 +void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* literals, const BYTE* litLimit, U32 offCode, size_t mlBase)
121350 +    BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH;
121351 +    BYTE const* const litEnd = literals + litLength;
121352 +#if defined(DEBUGLEVEL) && (DEBUGLEVEL >= 6)
121353 +    static const BYTE* g_start = NULL;
121354 +    if (g_start==NULL) g_start = (const BYTE*)literals;  /* note : index only works for compression within a single segment */
121355 +    {   U32 const pos = (U32)((const BYTE*)literals - g_start);
121356 +        DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u",
121357 +               pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offCode);
121358 +    }
121359 +#endif
121360 +    assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
121361 +    /* copy Literals */
121362 +    assert(seqStorePtr->maxNbLit <= 128 KB);
121363 +    assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit);
121364 +    assert(literals + litLength <= litLimit);
121365 +    if (litEnd <= litLimit_w) {
121366 +        /* Common case we can use wildcopy.
121367 +        * First copy 16 bytes, because literals are likely short.
121368 +        */
121369 +        assert(WILDCOPY_OVERLENGTH >= 16);
121370 +        ZSTD_copy16(seqStorePtr->lit, literals);
121371 +        if (litLength > 16) {
121372 +            ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap);
121373 +        }
121374 +    } else {
121375 +        ZSTD_safecopyLiterals(seqStorePtr->lit, literals, litEnd, litLimit_w);
121376 +    }
121377 +    seqStorePtr->lit += litLength;
121379 +    /* literal Length */
121380 +    if (litLength>0xFFFF) {
121381 +        assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */
121382 +        seqStorePtr->longLengthID = 1;
121383 +        seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
121384 +    }
121385 +    seqStorePtr->sequences[0].litLength = (U16)litLength;
121387 +    /* match offset */
121388 +    seqStorePtr->sequences[0].offset = offCode + 1;
121390 +    /* match Length */
121391 +    if (mlBase>0xFFFF) {
121392 +        assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */
121393 +        seqStorePtr->longLengthID = 2;
121394 +        seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
121395 +    }
121396 +    seqStorePtr->sequences[0].matchLength = (U16)mlBase;
121398 +    seqStorePtr->sequences++;
121402 +/*-*************************************
121403 +*  Match length counter
121404 +***************************************/
121405 +static unsigned ZSTD_NbCommonBytes (size_t val)
121407 +    if (MEM_isLittleEndian()) {
121408 +        if (MEM_64bits()) {
121409 +#       if (__GNUC__ >= 4)
121410 +            return (__builtin_ctzll((U64)val) >> 3);
121411 +#       else
121412 +            static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
121413 +                                                     0, 3, 1, 3, 1, 4, 2, 7,
121414 +                                                     0, 2, 3, 6, 1, 5, 3, 5,
121415 +                                                     1, 3, 4, 4, 2, 5, 6, 7,
121416 +                                                     7, 0, 1, 2, 3, 3, 4, 6,
121417 +                                                     2, 6, 5, 5, 3, 4, 5, 6,
121418 +                                                     7, 1, 2, 4, 6, 4, 4, 5,
121419 +                                                     7, 2, 6, 5, 7, 6, 7, 7 };
121420 +            return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
121421 +#       endif
121422 +        } else { /* 32 bits */
121423 +#       if (__GNUC__ >= 3)
121424 +            return (__builtin_ctz((U32)val) >> 3);
121425 +#       else
121426 +            static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
121427 +                                                     3, 2, 2, 1, 3, 2, 0, 1,
121428 +                                                     3, 3, 1, 2, 2, 2, 2, 0,
121429 +                                                     3, 1, 2, 0, 1, 0, 1, 1 };
121430 +            return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
121431 +#       endif
121432 +        }
121433 +    } else {  /* Big Endian CPU */
121434 +        if (MEM_64bits()) {
121435 +#       if (__GNUC__ >= 4)
121436 +            return (__builtin_clzll(val) >> 3);
121437 +#       else
121438 +            unsigned r;
121439 +            const unsigned n32 = sizeof(size_t)*4;   /* calculate this way due to compiler complaining in 32-bits mode */
121440 +            if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
121441 +            if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
121442 +            r += (!val);
121443 +            return r;
121444 +#       endif
121445 +        } else { /* 32 bits */
121446 +#       if (__GNUC__ >= 3)
121447 +            return (__builtin_clz((U32)val) >> 3);
121448 +#       else
121449 +            unsigned r;
121450 +            if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
121451 +            r += (!val);
121452 +            return r;
121453 +#       endif
121454 +    }   }
121458 +MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit)
121460 +    const BYTE* const pStart = pIn;
121461 +    const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1);
121463 +    if (pIn < pInLoopLimit) {
121464 +        { size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
121465 +          if (diff) return ZSTD_NbCommonBytes(diff); }
121466 +        pIn+=sizeof(size_t); pMatch+=sizeof(size_t);
121467 +        while (pIn < pInLoopLimit) {
121468 +            size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
121469 +            if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; }
121470 +            pIn += ZSTD_NbCommonBytes(diff);
121471 +            return (size_t)(pIn - pStart);
121472 +    }   }
121473 +    if (MEM_64bits() && (pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; }
121474 +    if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; }
121475 +    if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
121476 +    return (size_t)(pIn - pStart);
121479 +/** ZSTD_count_2segments() :
121480 + *  can count match length with `ip` & `match` in 2 different segments.
121481 + *  convention : on reaching mEnd, match count continue starting from iStart
121482 + */
121483 +MEM_STATIC size_t
121484 +ZSTD_count_2segments(const BYTE* ip, const BYTE* match,
121485 +                     const BYTE* iEnd, const BYTE* mEnd, const BYTE* iStart)
121487 +    const BYTE* const vEnd = MIN( ip + (mEnd - match), iEnd);
121488 +    size_t const matchLength = ZSTD_count(ip, match, vEnd);
121489 +    if (match + matchLength != mEnd) return matchLength;
121490 +    DEBUGLOG(7, "ZSTD_count_2segments: found a 2-parts match (current length==%zu)", matchLength);
121491 +    DEBUGLOG(7, "distance from match beginning to end dictionary = %zi", mEnd - match);
121492 +    DEBUGLOG(7, "distance from current pos to end buffer = %zi", iEnd - ip);
121493 +    DEBUGLOG(7, "next byte : ip==%02X, istart==%02X", ip[matchLength], *iStart);
121494 +    DEBUGLOG(7, "final match length = %zu", matchLength + ZSTD_count(ip+matchLength, iStart, iEnd));
121495 +    return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd);
121499 +/*-*************************************
121500 + *  Hashes
121501 + ***************************************/
121502 +static const U32 prime3bytes = 506832829U;
121503 +static U32    ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes)  >> (32-h) ; }
121504 +MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */
121506 +static const U32 prime4bytes = 2654435761U;
121507 +static U32    ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
121508 +static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); }
121510 +static const U64 prime5bytes = 889523592379ULL;
121511 +static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u  << (64-40)) * prime5bytes) >> (64-h)) ; }
121512 +static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); }
121514 +static const U64 prime6bytes = 227718039650203ULL;
121515 +static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u  << (64-48)) * prime6bytes) >> (64-h)) ; }
121516 +static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
121518 +static const U64 prime7bytes = 58295818150454627ULL;
121519 +static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u  << (64-56)) * prime7bytes) >> (64-h)) ; }
121520 +static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); }
121522 +static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
121523 +static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
121524 +static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
121526 +MEM_STATIC FORCE_INLINE_ATTR
121527 +size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
121529 +    switch(mls)
121530 +    {
121531 +    default:
121532 +    case 4: return ZSTD_hash4Ptr(p, hBits);
121533 +    case 5: return ZSTD_hash5Ptr(p, hBits);
121534 +    case 6: return ZSTD_hash6Ptr(p, hBits);
121535 +    case 7: return ZSTD_hash7Ptr(p, hBits);
121536 +    case 8: return ZSTD_hash8Ptr(p, hBits);
121537 +    }
121540 +/** ZSTD_ipow() :
121541 + * Return base^exponent.
121542 + */
121543 +static U64 ZSTD_ipow(U64 base, U64 exponent)
121545 +    U64 power = 1;
121546 +    while (exponent) {
121547 +      if (exponent & 1) power *= base;
121548 +      exponent >>= 1;
121549 +      base *= base;
121550 +    }
121551 +    return power;
121554 +#define ZSTD_ROLL_HASH_CHAR_OFFSET 10
121556 +/** ZSTD_rollingHash_append() :
121557 + * Add the buffer to the hash value.
121558 + */
121559 +static U64 ZSTD_rollingHash_append(U64 hash, void const* buf, size_t size)
121561 +    BYTE const* istart = (BYTE const*)buf;
121562 +    size_t pos;
121563 +    for (pos = 0; pos < size; ++pos) {
121564 +        hash *= prime8bytes;
121565 +        hash += istart[pos] + ZSTD_ROLL_HASH_CHAR_OFFSET;
121566 +    }
121567 +    return hash;
121570 +/** ZSTD_rollingHash_compute() :
121571 + * Compute the rolling hash value of the buffer.
121572 + */
121573 +MEM_STATIC U64 ZSTD_rollingHash_compute(void const* buf, size_t size)
121575 +    return ZSTD_rollingHash_append(0, buf, size);
121578 +/** ZSTD_rollingHash_primePower() :
121579 + * Compute the primePower to be passed to ZSTD_rollingHash_rotate() for a hash
121580 + * over a window of length bytes.
121581 + */
121582 +MEM_STATIC U64 ZSTD_rollingHash_primePower(U32 length)
121584 +    return ZSTD_ipow(prime8bytes, length - 1);
121587 +/** ZSTD_rollingHash_rotate() :
121588 + * Rotate the rolling hash by one byte.
121589 + */
121590 +MEM_STATIC U64 ZSTD_rollingHash_rotate(U64 hash, BYTE toRemove, BYTE toAdd, U64 primePower)
121592 +    hash -= (toRemove + ZSTD_ROLL_HASH_CHAR_OFFSET) * primePower;
121593 +    hash *= prime8bytes;
121594 +    hash += toAdd + ZSTD_ROLL_HASH_CHAR_OFFSET;
121595 +    return hash;
121598 +/*-*************************************
121599 +*  Round buffer management
121600 +***************************************/
121601 +#if (ZSTD_WINDOWLOG_MAX_64 > 31)
121602 +# error "ZSTD_WINDOWLOG_MAX is too large : would overflow ZSTD_CURRENT_MAX"
121603 +#endif
121604 +/* Max current allowed */
121605 +#define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX))
121606 +/* Maximum chunk size before overflow correction needs to be called again */
121607 +#define ZSTD_CHUNKSIZE_MAX                                                     \
121608 +    ( ((U32)-1)                  /* Maximum ending current index */            \
121609 +    - ZSTD_CURRENT_MAX)          /* Maximum beginning lowLimit */
121612 + * ZSTD_window_clear():
121613 + * Clears the window containing the history by simply setting it to empty.
121614 + */
121615 +MEM_STATIC void ZSTD_window_clear(ZSTD_window_t* window)
121617 +    size_t const endT = (size_t)(window->nextSrc - window->base);
121618 +    U32 const end = (U32)endT;
121620 +    window->lowLimit = end;
121621 +    window->dictLimit = end;
121625 + * ZSTD_window_hasExtDict():
121626 + * Returns non-zero if the window has a non-empty extDict.
121627 + */
121628 +MEM_STATIC U32 ZSTD_window_hasExtDict(ZSTD_window_t const window)
121630 +    return window.lowLimit < window.dictLimit;
121634 + * ZSTD_matchState_dictMode():
121635 + * Inspects the provided matchState and figures out what dictMode should be
121636 + * passed to the compressor.
121637 + */
121638 +MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_matchState_t *ms)
121640 +    return ZSTD_window_hasExtDict(ms->window) ?
121641 +        ZSTD_extDict :
121642 +        ms->dictMatchState != NULL ?
121643 +            (ms->dictMatchState->dedicatedDictSearch ? ZSTD_dedicatedDictSearch : ZSTD_dictMatchState) :
121644 +            ZSTD_noDict;
121648 + * ZSTD_window_needOverflowCorrection():
121649 + * Returns non-zero if the indices are getting too large and need overflow
121650 + * protection.
121651 + */
121652 +MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window,
121653 +                                                  void const* srcEnd)
121655 +    U32 const curr = (U32)((BYTE const*)srcEnd - window.base);
121656 +    return curr > ZSTD_CURRENT_MAX;
121660 + * ZSTD_window_correctOverflow():
121661 + * Reduces the indices to protect from index overflow.
121662 + * Returns the correction made to the indices, which must be applied to every
121663 + * stored index.
121665 + * The least significant cycleLog bits of the indices must remain the same,
121666 + * which may be 0. Every index up to maxDist in the past must be valid.
121667 + * NOTE: (maxDist & cycleMask) must be zero.
121668 + */
121669 +MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
121670 +                                           U32 maxDist, void const* src)
121672 +    /* preemptive overflow correction:
121673 +     * 1. correction is large enough:
121674 +     *    lowLimit > (3<<29) ==> current > 3<<29 + 1<<windowLog
121675 +     *    1<<windowLog <= newCurrent < 1<<chainLog + 1<<windowLog
121676 +     *
121677 +     *    current - newCurrent
121678 +     *    > (3<<29 + 1<<windowLog) - (1<<windowLog + 1<<chainLog)
121679 +     *    > (3<<29) - (1<<chainLog)
121680 +     *    > (3<<29) - (1<<30)             (NOTE: chainLog <= 30)
121681 +     *    > 1<<29
121682 +     *
121683 +     * 2. (ip+ZSTD_CHUNKSIZE_MAX - cctx->base) doesn't overflow:
121684 +     *    After correction, current is less than (1<<chainLog + 1<<windowLog).
121685 +     *    In 64-bit mode we are safe, because we have 64-bit ptrdiff_t.
121686 +     *    In 32-bit mode we are safe, because (chainLog <= 29), so
121687 +     *    ip+ZSTD_CHUNKSIZE_MAX - cctx->base < 1<<32.
121688 +     * 3. (cctx->lowLimit + 1<<windowLog) < 1<<32:
121689 +     *    windowLog <= 31 ==> 3<<29 + 1<<windowLog < 7<<29 < 1<<32.
121690 +     */
121691 +    U32 const cycleMask = (1U << cycleLog) - 1;
121692 +    U32 const curr = (U32)((BYTE const*)src - window->base);
121693 +    U32 const currentCycle0 = curr & cycleMask;
121694 +    /* Exclude zero so that newCurrent - maxDist >= 1. */
121695 +    U32 const currentCycle1 = currentCycle0 == 0 ? (1U << cycleLog) : currentCycle0;
121696 +    U32 const newCurrent = currentCycle1 + maxDist;
121697 +    U32 const correction = curr - newCurrent;
121698 +    assert((maxDist & cycleMask) == 0);
121699 +    assert(curr > newCurrent);
121700 +    /* Loose bound, should be around 1<<29 (see above) */
121701 +    assert(correction > 1<<28);
121703 +    window->base += correction;
121704 +    window->dictBase += correction;
121705 +    if (window->lowLimit <= correction) window->lowLimit = 1;
121706 +    else window->lowLimit -= correction;
121707 +    if (window->dictLimit <= correction) window->dictLimit = 1;
121708 +    else window->dictLimit -= correction;
121710 +    /* Ensure we can still reference the full window. */
121711 +    assert(newCurrent >= maxDist);
121712 +    assert(newCurrent - maxDist >= 1);
121713 +    /* Ensure that lowLimit and dictLimit didn't underflow. */
121714 +    assert(window->lowLimit <= newCurrent);
121715 +    assert(window->dictLimit <= newCurrent);
121717 +    DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x", correction,
121718 +             window->lowLimit);
121719 +    return correction;
121723 + * ZSTD_window_enforceMaxDist():
121724 + * Updates lowLimit so that:
121725 + *    (srcEnd - base) - lowLimit == maxDist + loadedDictEnd
121727 + * It ensures index is valid as long as index >= lowLimit.
121728 + * This must be called before a block compression call.
121730 + * loadedDictEnd is only defined if a dictionary is in use for current compression.
121731 + * As the name implies, loadedDictEnd represents the index at end of dictionary.
121732 + * The value lies within context's referential, it can be directly compared to blockEndIdx.
121734 + * If loadedDictEndPtr is NULL, no dictionary is in use, and we use loadedDictEnd == 0.
121735 + * If loadedDictEndPtr is not NULL, we set it to zero after updating lowLimit.
121736 + * This is because dictionaries are allowed to be referenced fully
121737 + * as long as the last byte of the dictionary is in the window.
121738 + * Once input has progressed beyond window size, dictionary cannot be referenced anymore.
121740 + * In normal dict mode, the dictionary lies between lowLimit and dictLimit.
121741 + * In dictMatchState mode, lowLimit and dictLimit are the same,
121742 + * and the dictionary is below them.
121743 + * forceWindow and dictMatchState are therefore incompatible.
121744 + */
121745 +MEM_STATIC void
121746 +ZSTD_window_enforceMaxDist(ZSTD_window_t* window,
121747 +                     const void* blockEnd,
121748 +                           U32   maxDist,
121749 +                           U32*  loadedDictEndPtr,
121750 +                     const ZSTD_matchState_t** dictMatchStatePtr)
121752 +    U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
121753 +    U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0;
121754 +    DEBUGLOG(5, "ZSTD_window_enforceMaxDist: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
121755 +                (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
121757 +    /* - When there is no dictionary : loadedDictEnd == 0.
121758 +         In which case, the test (blockEndIdx > maxDist) is merely to avoid
121759 +         overflowing next operation `newLowLimit = blockEndIdx - maxDist`.
121760 +       - When there is a standard dictionary :
121761 +         Index referential is copied from the dictionary,
121762 +         which means it starts from 0.
121763 +         In which case, loadedDictEnd == dictSize,
121764 +         and it makes sense to compare `blockEndIdx > maxDist + dictSize`
121765 +         since `blockEndIdx` also starts from zero.
121766 +       - When there is an attached dictionary :
121767 +         loadedDictEnd is expressed within the referential of the context,
121768 +         so it can be directly compared against blockEndIdx.
121769 +    */
121770 +    if (blockEndIdx > maxDist + loadedDictEnd) {
121771 +        U32 const newLowLimit = blockEndIdx - maxDist;
121772 +        if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit;
121773 +        if (window->dictLimit < window->lowLimit) {
121774 +            DEBUGLOG(5, "Update dictLimit to match lowLimit, from %u to %u",
121775 +                        (unsigned)window->dictLimit, (unsigned)window->lowLimit);
121776 +            window->dictLimit = window->lowLimit;
121777 +        }
121778 +        /* On reaching window size, dictionaries are invalidated */
121779 +        if (loadedDictEndPtr) *loadedDictEndPtr = 0;
121780 +        if (dictMatchStatePtr) *dictMatchStatePtr = NULL;
121781 +    }
121784 +/* Similar to ZSTD_window_enforceMaxDist(),
121785 + * but only invalidates dictionary
121786 + * when input progresses beyond window size.
121787 + * assumption : loadedDictEndPtr and dictMatchStatePtr are valid (non NULL)
121788 + *              loadedDictEnd uses same referential as window->base
121789 + *              maxDist is the window size */
121790 +MEM_STATIC void
121791 +ZSTD_checkDictValidity(const ZSTD_window_t* window,
121792 +                       const void* blockEnd,
121793 +                             U32   maxDist,
121794 +                             U32*  loadedDictEndPtr,
121795 +                       const ZSTD_matchState_t** dictMatchStatePtr)
121797 +    assert(loadedDictEndPtr != NULL);
121798 +    assert(dictMatchStatePtr != NULL);
121799 +    {   U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
121800 +        U32 const loadedDictEnd = *loadedDictEndPtr;
121801 +        DEBUGLOG(5, "ZSTD_checkDictValidity: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
121802 +                    (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
121803 +        assert(blockEndIdx >= loadedDictEnd);
121805 +        if (blockEndIdx > loadedDictEnd + maxDist) {
121806 +            /* On reaching window size, dictionaries are invalidated.
121807 +             * For simplification, if window size is reached anywhere within next block,
121808 +             * the dictionary is invalidated for the full block.
121809 +             */
121810 +            DEBUGLOG(6, "invalidating dictionary for current block (distance > windowSize)");
121811 +            *loadedDictEndPtr = 0;
121812 +            *dictMatchStatePtr = NULL;
121813 +        } else {
121814 +            if (*loadedDictEndPtr != 0) {
121815 +                DEBUGLOG(6, "dictionary considered valid for current block");
121816 +    }   }   }
121819 +MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) {
121820 +    ZSTD_memset(window, 0, sizeof(*window));
121821 +    window->base = (BYTE const*)"";
121822 +    window->dictBase = (BYTE const*)"";
121823 +    window->dictLimit = 1;    /* start from 1, so that 1st position is valid */
121824 +    window->lowLimit = 1;     /* it ensures first and later CCtx usages compress the same */
121825 +    window->nextSrc = window->base + 1;   /* see issue #1241 */
121829 + * ZSTD_window_update():
121830 + * Updates the window by appending [src, src + srcSize) to the window.
121831 + * If it is not contiguous, the current prefix becomes the extDict, and we
121832 + * forget about the extDict. Handles overlap of the prefix and extDict.
121833 + * Returns non-zero if the segment is contiguous.
121834 + */
121835 +MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
121836 +                                  void const* src, size_t srcSize)
121838 +    BYTE const* const ip = (BYTE const*)src;
121839 +    U32 contiguous = 1;
121840 +    DEBUGLOG(5, "ZSTD_window_update");
121841 +    if (srcSize == 0)
121842 +        return contiguous;
121843 +    assert(window->base != NULL);
121844 +    assert(window->dictBase != NULL);
121845 +    /* Check if blocks follow each other */
121846 +    if (src != window->nextSrc) {
121847 +        /* not contiguous */
121848 +        size_t const distanceFromBase = (size_t)(window->nextSrc - window->base);
121849 +        DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u", window->dictLimit);
121850 +        window->lowLimit = window->dictLimit;
121851 +        assert(distanceFromBase == (size_t)(U32)distanceFromBase);  /* should never overflow */
121852 +        window->dictLimit = (U32)distanceFromBase;
121853 +        window->dictBase = window->base;
121854 +        window->base = ip - distanceFromBase;
121855 +        /* ms->nextToUpdate = window->dictLimit; */
121856 +        if (window->dictLimit - window->lowLimit < HASH_READ_SIZE) window->lowLimit = window->dictLimit;   /* too small extDict */
121857 +        contiguous = 0;
121858 +    }
121859 +    window->nextSrc = ip + srcSize;
121860 +    /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
121861 +    if ( (ip+srcSize > window->dictBase + window->lowLimit)
121862 +       & (ip < window->dictBase + window->dictLimit)) {
121863 +        ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase;
121864 +        U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx;
121865 +        window->lowLimit = lowLimitMax;
121866 +        DEBUGLOG(5, "Overlapping extDict and input : new lowLimit = %u", window->lowLimit);
121867 +    }
121868 +    return contiguous;
121872 + * Returns the lowest allowed match index. It may either be in the ext-dict or the prefix.
121873 + */
121874 +MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog)
121876 +    U32    const maxDistance = 1U << windowLog;
121877 +    U32    const lowestValid = ms->window.lowLimit;
121878 +    U32    const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
121879 +    U32    const isDictionary = (ms->loadedDictEnd != 0);
121880 +    /* When using a dictionary the entire dictionary is valid if a single byte of the dictionary
121881 +     * is within the window. We invalidate the dictionary (and set loadedDictEnd to 0) when it isn't
121882 +     * valid for the entire block. So this check is sufficient to find the lowest valid match index.
121883 +     */
121884 +    U32    const matchLowest = isDictionary ? lowestValid : withinWindow;
121885 +    return matchLowest;
121889 + * Returns the lowest allowed match index in the prefix.
121890 + */
121891 +MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog)
121893 +    U32    const maxDistance = 1U << windowLog;
121894 +    U32    const lowestValid = ms->window.dictLimit;
121895 +    U32    const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
121896 +    U32    const isDictionary = (ms->loadedDictEnd != 0);
121897 +    /* When computing the lowest prefix index we need to take the dictionary into account to handle
121898 +     * the edge case where the dictionary and the source are contiguous in memory.
121899 +     */
121900 +    U32    const matchLowest = isDictionary ? lowestValid : withinWindow;
121901 +    return matchLowest;
121906 +/* debug functions */
121907 +#if (DEBUGLEVEL>=2)
121909 +MEM_STATIC double ZSTD_fWeight(U32 rawStat)
121911 +    U32 const fp_accuracy = 8;
121912 +    U32 const fp_multiplier = (1 << fp_accuracy);
121913 +    U32 const newStat = rawStat + 1;
121914 +    U32 const hb = ZSTD_highbit32(newStat);
121915 +    U32 const BWeight = hb * fp_multiplier;
121916 +    U32 const FWeight = (newStat << fp_accuracy) >> hb;
121917 +    U32 const weight = BWeight + FWeight;
121918 +    assert(hb + fp_accuracy < 31);
121919 +    return (double)weight / fp_multiplier;
121922 +/* display a table content,
121923 + * listing each element, its frequency, and its predicted bit cost */
121924 +MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max)
121926 +    unsigned u, sum;
121927 +    for (u=0, sum=0; u<=max; u++) sum += table[u];
121928 +    DEBUGLOG(2, "total nb elts: %u", sum);
121929 +    for (u=0; u<=max; u++) {
121930 +        DEBUGLOG(2, "%2u: %5u  (%.2f)",
121931 +                u, table[u], ZSTD_fWeight(sum) - ZSTD_fWeight(table[u]) );
121932 +    }
121935 +#endif
121939 +/* ===============================================================
121940 + * Shared internal declarations
121941 + * These prototypes may be called from sources not in lib/compress
121942 + * =============================================================== */
121944 +/* ZSTD_loadCEntropy() :
121945 + * dict : must point at beginning of a valid zstd dictionary.
121946 + * return : size of dictionary header (size of magic number + dict ID + entropy tables)
121947 + * assumptions : magic number supposed already checked
121948 + *               and dictSize >= 8 */
121949 +size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
121950 +                         const void* const dict, size_t dictSize);
121952 +void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs);
121954 +/* ==============================================================
121955 + * Private declarations
121956 + * These prototypes shall only be called from within lib/compress
121957 + * ============================================================== */
121959 +/* ZSTD_getCParamsFromCCtxParams() :
121960 + * cParams are built depending on compressionLevel, src size hints,
121961 + * LDM and manually set compression parameters.
121962 + * Note: srcSizeHint == 0 means 0!
121963 + */
121964 +ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
121965 +        const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
121967 +/*! ZSTD_initCStream_internal() :
121968 + *  Private use only. Init streaming operation.
121969 + *  expects params to be valid.
121970 + *  must receive dict, or cdict, or none, but not both.
121971 + *  @return : 0, or an error code */
121972 +size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
121973 +                     const void* dict, size_t dictSize,
121974 +                     const ZSTD_CDict* cdict,
121975 +                     const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize);
121977 +void ZSTD_resetSeqStore(seqStore_t* ssPtr);
121979 +/*! ZSTD_getCParamsFromCDict() :
121980 + *  as the name implies */
121981 +ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict);
121983 +/* ZSTD_compressBegin_advanced_internal() :
121984 + * Private use only. To be called from zstdmt_compress.c. */
121985 +size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
121986 +                                    const void* dict, size_t dictSize,
121987 +                                    ZSTD_dictContentType_e dictContentType,
121988 +                                    ZSTD_dictTableLoadMethod_e dtlm,
121989 +                                    const ZSTD_CDict* cdict,
121990 +                                    const ZSTD_CCtx_params* params,
121991 +                                    unsigned long long pledgedSrcSize);
121993 +/* ZSTD_compress_advanced_internal() :
121994 + * Private use only. To be called from zstdmt_compress.c. */
121995 +size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx,
121996 +                                       void* dst, size_t dstCapacity,
121997 +                                 const void* src, size_t srcSize,
121998 +                                 const void* dict,size_t dictSize,
121999 +                                 const ZSTD_CCtx_params* params);
122002 +/* ZSTD_writeLastEmptyBlock() :
122003 + * output an empty Block with end-of-frame mark to complete a frame
122004 + * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
122005 + *           or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
122006 + */
122007 +size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity);
122010 +/* ZSTD_referenceExternalSequences() :
122011 + * Must be called before starting a compression operation.
122012 + * seqs must parse a prefix of the source.
122013 + * This cannot be used when long range matching is enabled.
122014 + * Zstd will use these sequences, and pass the literals to a secondary block
122015 + * compressor.
122016 + * @return : An error code on failure.
122017 + * NOTE: seqs are not verified! Invalid sequences can cause out-of-bounds memory
122018 + * access and data corruption.
122019 + */
122020 +size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq);
122022 +/** ZSTD_cycleLog() :
122023 + *  condition for correct operation : hashLog > 1 */
122024 +U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat);
122026 +/** ZSTD_CCtx_trace() :
122027 + *  Trace the end of a compression call.
122028 + */
122029 +void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize);
122031 +#endif /* ZSTD_COMPRESS_H */
122032 diff --git a/lib/zstd/compress/zstd_compress_literals.c b/lib/zstd/compress/zstd_compress_literals.c
122033 new file mode 100644
122034 index 000000000000..655bcda4d1f1
122035 --- /dev/null
122036 +++ b/lib/zstd/compress/zstd_compress_literals.c
122037 @@ -0,0 +1,158 @@
122039 + * Copyright (c) Yann Collet, Facebook, Inc.
122040 + * All rights reserved.
122042 + * This source code is licensed under both the BSD-style license (found in the
122043 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
122044 + * in the COPYING file in the root directory of this source tree).
122045 + * You may select, at your option, one of the above-listed licenses.
122046 + */
122048 + /*-*************************************
122049 + *  Dependencies
122050 + ***************************************/
122051 +#include "zstd_compress_literals.h"
122053 +size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
122055 +    BYTE* const ostart = (BYTE*)dst;
122056 +    U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
122058 +    RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall, "");
122060 +    switch(flSize)
122061 +    {
122062 +        case 1: /* 2 - 1 - 5 */
122063 +            ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3));
122064 +            break;
122065 +        case 2: /* 2 - 2 - 12 */
122066 +            MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4)));
122067 +            break;
122068 +        case 3: /* 2 - 2 - 20 */
122069 +            MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4)));
122070 +            break;
122071 +        default:   /* not necessary : flSize is {1,2,3} */
122072 +            assert(0);
122073 +    }
122075 +    ZSTD_memcpy(ostart + flSize, src, srcSize);
122076 +    DEBUGLOG(5, "Raw literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize));
122077 +    return srcSize + flSize;
122080 +size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
122082 +    BYTE* const ostart = (BYTE*)dst;
122083 +    U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
122085 +    (void)dstCapacity;  /* dstCapacity already guaranteed to be >=4, hence large enough */
122087 +    switch(flSize)
122088 +    {
122089 +        case 1: /* 2 - 1 - 5 */
122090 +            ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3));
122091 +            break;
122092 +        case 2: /* 2 - 2 - 12 */
122093 +            MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4)));
122094 +            break;
122095 +        case 3: /* 2 - 2 - 20 */
122096 +            MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4)));
122097 +            break;
122098 +        default:   /* not necessary : flSize is {1,2,3} */
122099 +            assert(0);
122100 +    }
122102 +    ostart[flSize] = *(const BYTE*)src;
122103 +    DEBUGLOG(5, "RLE literals: %u -> %u", (U32)srcSize, (U32)flSize + 1);
122104 +    return flSize+1;
122107 +size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
122108 +                              ZSTD_hufCTables_t* nextHuf,
122109 +                              ZSTD_strategy strategy, int disableLiteralCompression,
122110 +                              void* dst, size_t dstCapacity,
122111 +                        const void* src, size_t srcSize,
122112 +                              void* entropyWorkspace, size_t entropyWorkspaceSize,
122113 +                        const int bmi2)
122115 +    size_t const minGain = ZSTD_minGain(srcSize, strategy);
122116 +    size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
122117 +    BYTE*  const ostart = (BYTE*)dst;
122118 +    U32 singleStream = srcSize < 256;
122119 +    symbolEncodingType_e hType = set_compressed;
122120 +    size_t cLitSize;
122122 +    DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i srcSize=%u)",
122123 +                disableLiteralCompression, (U32)srcSize);
122125 +    /* Prepare nextEntropy assuming reusing the existing table */
122126 +    ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
122128 +    if (disableLiteralCompression)
122129 +        return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
122131 +    /* small ? don't even attempt compression (speed opt) */
122132 +#   define COMPRESS_LITERALS_SIZE_MIN 63
122133 +    {   size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
122134 +        if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
122135 +    }
122137 +    RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression");
122138 +    {   HUF_repeat repeat = prevHuf->repeatMode;
122139 +        int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
122140 +        if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
122141 +        cLitSize = singleStream ?
122142 +            HUF_compress1X_repeat(
122143 +                ostart+lhSize, dstCapacity-lhSize, src, srcSize,
122144 +                HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
122145 +                (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2) :
122146 +            HUF_compress4X_repeat(
122147 +                ostart+lhSize, dstCapacity-lhSize, src, srcSize,
122148 +                HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
122149 +                (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
122150 +        if (repeat != HUF_repeat_none) {
122151 +            /* reused the existing table */
122152 +            DEBUGLOG(5, "Reusing previous huffman table");
122153 +            hType = set_repeat;
122154 +        }
122155 +    }
122157 +    if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) {
122158 +        ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
122159 +        return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
122160 +    }
122161 +    if (cLitSize==1) {
122162 +        ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
122163 +        return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
122164 +    }
122166 +    if (hType == set_compressed) {
122167 +        /* using a newly constructed table */
122168 +        nextHuf->repeatMode = HUF_repeat_check;
122169 +    }
122171 +    /* Build header */
122172 +    switch(lhSize)
122173 +    {
122174 +    case 3: /* 2 - 2 - 10 - 10 */
122175 +        {   U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
122176 +            MEM_writeLE24(ostart, lhc);
122177 +            break;
122178 +        }
122179 +    case 4: /* 2 - 2 - 14 - 14 */
122180 +        {   U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
122181 +            MEM_writeLE32(ostart, lhc);
122182 +            break;
122183 +        }
122184 +    case 5: /* 2 - 2 - 18 - 18 */
122185 +        {   U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
122186 +            MEM_writeLE32(ostart, lhc);
122187 +            ostart[4] = (BYTE)(cLitSize >> 10);
122188 +            break;
122189 +        }
122190 +    default:  /* not possible : lhSize is {3,4,5} */
122191 +        assert(0);
122192 +    }
122193 +    DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)srcSize, (U32)(lhSize+cLitSize));
122194 +    return lhSize+cLitSize;
122196 diff --git a/lib/zstd/compress/zstd_compress_literals.h b/lib/zstd/compress/zstd_compress_literals.h
122197 new file mode 100644
122198 index 000000000000..9904c0cd30a0
122199 --- /dev/null
122200 +++ b/lib/zstd/compress/zstd_compress_literals.h
122201 @@ -0,0 +1,29 @@
122203 + * Copyright (c) Yann Collet, Facebook, Inc.
122204 + * All rights reserved.
122206 + * This source code is licensed under both the BSD-style license (found in the
122207 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
122208 + * in the COPYING file in the root directory of this source tree).
122209 + * You may select, at your option, one of the above-listed licenses.
122210 + */
122212 +#ifndef ZSTD_COMPRESS_LITERALS_H
122213 +#define ZSTD_COMPRESS_LITERALS_H
122215 +#include "zstd_compress_internal.h" /* ZSTD_hufCTables_t, ZSTD_minGain() */
122218 +size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
122220 +size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
122222 +size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
122223 +                              ZSTD_hufCTables_t* nextHuf,
122224 +                              ZSTD_strategy strategy, int disableLiteralCompression,
122225 +                              void* dst, size_t dstCapacity,
122226 +                        const void* src, size_t srcSize,
122227 +                              void* entropyWorkspace, size_t entropyWorkspaceSize,
122228 +                        const int bmi2);
122230 +#endif /* ZSTD_COMPRESS_LITERALS_H */
122231 diff --git a/lib/zstd/compress/zstd_compress_sequences.c b/lib/zstd/compress/zstd_compress_sequences.c
122232 new file mode 100644
122233 index 000000000000..08a5b89019dd
122234 --- /dev/null
122235 +++ b/lib/zstd/compress/zstd_compress_sequences.c
122236 @@ -0,0 +1,439 @@
122238 + * Copyright (c) Yann Collet, Facebook, Inc.
122239 + * All rights reserved.
122241 + * This source code is licensed under both the BSD-style license (found in the
122242 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
122243 + * in the COPYING file in the root directory of this source tree).
122244 + * You may select, at your option, one of the above-listed licenses.
122245 + */
122247 + /*-*************************************
122248 + *  Dependencies
122249 + ***************************************/
122250 +#include "zstd_compress_sequences.h"
122253 + * -log2(x / 256) lookup table for x in [0, 256).
122254 + * If x == 0: Return 0
122255 + * Else: Return floor(-log2(x / 256) * 256)
122256 + */
122257 +static unsigned const kInverseProbabilityLog256[256] = {
122258 +    0,    2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162,
122259 +    1130, 1100, 1073, 1047, 1024, 1001, 980,  960,  941,  923,  906,  889,
122260 +    874,  859,  844,  830,  817,  804,  791,  779,  768,  756,  745,  734,
122261 +    724,  714,  704,  694,  685,  676,  667,  658,  650,  642,  633,  626,
122262 +    618,  610,  603,  595,  588,  581,  574,  567,  561,  554,  548,  542,
122263 +    535,  529,  523,  517,  512,  506,  500,  495,  489,  484,  478,  473,
122264 +    468,  463,  458,  453,  448,  443,  438,  434,  429,  424,  420,  415,
122265 +    411,  407,  402,  398,  394,  390,  386,  382,  377,  373,  370,  366,
122266 +    362,  358,  354,  350,  347,  343,  339,  336,  332,  329,  325,  322,
122267 +    318,  315,  311,  308,  305,  302,  298,  295,  292,  289,  286,  282,
122268 +    279,  276,  273,  270,  267,  264,  261,  258,  256,  253,  250,  247,
122269 +    244,  241,  239,  236,  233,  230,  228,  225,  222,  220,  217,  215,
122270 +    212,  209,  207,  204,  202,  199,  197,  194,  192,  190,  187,  185,
122271 +    182,  180,  178,  175,  173,  171,  168,  166,  164,  162,  159,  157,
122272 +    155,  153,  151,  149,  146,  144,  142,  140,  138,  136,  134,  132,
122273 +    130,  128,  126,  123,  121,  119,  117,  115,  114,  112,  110,  108,
122274 +    106,  104,  102,  100,  98,   96,   94,   93,   91,   89,   87,   85,
122275 +    83,   82,   80,   78,   76,   74,   73,   71,   69,   67,   66,   64,
122276 +    62,   61,   59,   57,   55,   54,   52,   50,   49,   47,   46,   44,
122277 +    42,   41,   39,   37,   36,   34,   33,   31,   30,   28,   26,   25,
122278 +    23,   22,   20,   19,   17,   16,   14,   13,   11,   10,   8,    7,
122279 +    5,    4,    2,    1,
122282 +static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) {
122283 +  void const* ptr = ctable;
122284 +  U16 const* u16ptr = (U16 const*)ptr;
122285 +  U32 const maxSymbolValue = MEM_read16(u16ptr + 1);
122286 +  return maxSymbolValue;
122290 + * Returns true if we should use ncount=-1 else we should
122291 + * use ncount=1 for low probability symbols instead.
122292 + */
122293 +static unsigned ZSTD_useLowProbCount(size_t const nbSeq)
122295 +    /* Heuristic: This should cover most blocks <= 16K and
122296 +     * start to fade out after 16K to about 32K depending on
122297 +     * comprssibility.
122298 +     */
122299 +    return nbSeq >= 2048;
122303 + * Returns the cost in bytes of encoding the normalized count header.
122304 + * Returns an error if any of the helper functions return an error.
122305 + */
122306 +static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max,
122307 +                              size_t const nbSeq, unsigned const FSELog)
122309 +    BYTE wksp[FSE_NCOUNTBOUND];
122310 +    S16 norm[MaxSeq + 1];
122311 +    const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
122312 +    FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max, ZSTD_useLowProbCount(nbSeq)), "");
122313 +    return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog);
122317 + * Returns the cost in bits of encoding the distribution described by count
122318 + * using the entropy bound.
122319 + */
122320 +static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t const total)
122322 +    unsigned cost = 0;
122323 +    unsigned s;
122324 +    for (s = 0; s <= max; ++s) {
122325 +        unsigned norm = (unsigned)((256 * count[s]) / total);
122326 +        if (count[s] != 0 && norm == 0)
122327 +            norm = 1;
122328 +        assert(count[s] < total);
122329 +        cost += count[s] * kInverseProbabilityLog256[norm];
122330 +    }
122331 +    return cost >> 8;
122335 + * Returns the cost in bits of encoding the distribution in count using ctable.
122336 + * Returns an error if ctable cannot represent all the symbols in count.
122337 + */
122338 +size_t ZSTD_fseBitCost(
122339 +    FSE_CTable const* ctable,
122340 +    unsigned const* count,
122341 +    unsigned const max)
122343 +    unsigned const kAccuracyLog = 8;
122344 +    size_t cost = 0;
122345 +    unsigned s;
122346 +    FSE_CState_t cstate;
122347 +    FSE_initCState(&cstate, ctable);
122348 +    if (ZSTD_getFSEMaxSymbolValue(ctable) < max) {
122349 +        DEBUGLOG(5, "Repeat FSE_CTable has maxSymbolValue %u < %u",
122350 +                    ZSTD_getFSEMaxSymbolValue(ctable), max);
122351 +        return ERROR(GENERIC);
122352 +    }
122353 +    for (s = 0; s <= max; ++s) {
122354 +        unsigned const tableLog = cstate.stateLog;
122355 +        unsigned const badCost = (tableLog + 1) << kAccuracyLog;
122356 +        unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog);
122357 +        if (count[s] == 0)
122358 +            continue;
122359 +        if (bitCost >= badCost) {
122360 +            DEBUGLOG(5, "Repeat FSE_CTable has Prob[%u] == 0", s);
122361 +            return ERROR(GENERIC);
122362 +        }
122363 +        cost += (size_t)count[s] * bitCost;
122364 +    }
122365 +    return cost >> kAccuracyLog;
122369 + * Returns the cost in bits of encoding the distribution in count using the
122370 + * table described by norm. The max symbol support by norm is assumed >= max.
122371 + * norm must be valid for every symbol with non-zero probability in count.
122372 + */
122373 +size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
122374 +                             unsigned const* count, unsigned const max)
122376 +    unsigned const shift = 8 - accuracyLog;
122377 +    size_t cost = 0;
122378 +    unsigned s;
122379 +    assert(accuracyLog <= 8);
122380 +    for (s = 0; s <= max; ++s) {
122381 +        unsigned const normAcc = (norm[s] != -1) ? (unsigned)norm[s] : 1;
122382 +        unsigned const norm256 = normAcc << shift;
122383 +        assert(norm256 > 0);
122384 +        assert(norm256 < 256);
122385 +        cost += count[s] * kInverseProbabilityLog256[norm256];
122386 +    }
122387 +    return cost >> 8;
122390 +symbolEncodingType_e
122391 +ZSTD_selectEncodingType(
122392 +        FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
122393 +        size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
122394 +        FSE_CTable const* prevCTable,
122395 +        short const* defaultNorm, U32 defaultNormLog,
122396 +        ZSTD_defaultPolicy_e const isDefaultAllowed,
122397 +        ZSTD_strategy const strategy)
122399 +    ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0);
122400 +    if (mostFrequent == nbSeq) {
122401 +        *repeatMode = FSE_repeat_none;
122402 +        if (isDefaultAllowed && nbSeq <= 2) {
122403 +            /* Prefer set_basic over set_rle when there are 2 or less symbols,
122404 +             * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
122405 +             * If basic encoding isn't possible, always choose RLE.
122406 +             */
122407 +            DEBUGLOG(5, "Selected set_basic");
122408 +            return set_basic;
122409 +        }
122410 +        DEBUGLOG(5, "Selected set_rle");
122411 +        return set_rle;
122412 +    }
122413 +    if (strategy < ZSTD_lazy) {
122414 +        if (isDefaultAllowed) {
122415 +            size_t const staticFse_nbSeq_max = 1000;
122416 +            size_t const mult = 10 - strategy;
122417 +            size_t const baseLog = 3;
122418 +            size_t const dynamicFse_nbSeq_min = (((size_t)1 << defaultNormLog) * mult) >> baseLog;  /* 28-36 for offset, 56-72 for lengths */
122419 +            assert(defaultNormLog >= 5 && defaultNormLog <= 6);  /* xx_DEFAULTNORMLOG */
122420 +            assert(mult <= 9 && mult >= 7);
122421 +            if ( (*repeatMode == FSE_repeat_valid)
122422 +              && (nbSeq < staticFse_nbSeq_max) ) {
122423 +                DEBUGLOG(5, "Selected set_repeat");
122424 +                return set_repeat;
122425 +            }
122426 +            if ( (nbSeq < dynamicFse_nbSeq_min)
122427 +              || (mostFrequent < (nbSeq >> (defaultNormLog-1))) ) {
122428 +                DEBUGLOG(5, "Selected set_basic");
122429 +                /* The format allows default tables to be repeated, but it isn't useful.
122430 +                 * When using simple heuristics to select encoding type, we don't want
122431 +                 * to confuse these tables with dictionaries. When running more careful
122432 +                 * analysis, we don't need to waste time checking both repeating tables
122433 +                 * and default tables.
122434 +                 */
122435 +                *repeatMode = FSE_repeat_none;
122436 +                return set_basic;
122437 +            }
122438 +        }
122439 +    } else {
122440 +        size_t const basicCost = isDefaultAllowed ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : ERROR(GENERIC);
122441 +        size_t const repeatCost = *repeatMode != FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : ERROR(GENERIC);
122442 +        size_t const NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog);
122443 +        size_t const compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq);
122445 +        if (isDefaultAllowed) {
122446 +            assert(!ZSTD_isError(basicCost));
122447 +            assert(!(*repeatMode == FSE_repeat_valid && ZSTD_isError(repeatCost)));
122448 +        }
122449 +        assert(!ZSTD_isError(NCountCost));
122450 +        assert(compressedCost < ERROR(maxCode));
122451 +        DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u",
122452 +                    (unsigned)basicCost, (unsigned)repeatCost, (unsigned)compressedCost);
122453 +        if (basicCost <= repeatCost && basicCost <= compressedCost) {
122454 +            DEBUGLOG(5, "Selected set_basic");
122455 +            assert(isDefaultAllowed);
122456 +            *repeatMode = FSE_repeat_none;
122457 +            return set_basic;
122458 +        }
122459 +        if (repeatCost <= compressedCost) {
122460 +            DEBUGLOG(5, "Selected set_repeat");
122461 +            assert(!ZSTD_isError(repeatCost));
122462 +            return set_repeat;
122463 +        }
122464 +        assert(compressedCost < basicCost && compressedCost < repeatCost);
122465 +    }
122466 +    DEBUGLOG(5, "Selected set_compressed");
122467 +    *repeatMode = FSE_repeat_check;
122468 +    return set_compressed;
122471 +typedef struct {
122472 +    S16 norm[MaxSeq + 1];
122473 +    U32 wksp[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(MaxSeq, MaxFSELog)];
122474 +} ZSTD_BuildCTableWksp;
122476 +size_t
122477 +ZSTD_buildCTable(void* dst, size_t dstCapacity,
122478 +                FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
122479 +                unsigned* count, U32 max,
122480 +                const BYTE* codeTable, size_t nbSeq,
122481 +                const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
122482 +                const FSE_CTable* prevCTable, size_t prevCTableSize,
122483 +                void* entropyWorkspace, size_t entropyWorkspaceSize)
122485 +    BYTE* op = (BYTE*)dst;
122486 +    const BYTE* const oend = op + dstCapacity;
122487 +    DEBUGLOG(6, "ZSTD_buildCTable (dstCapacity=%u)", (unsigned)dstCapacity);
122489 +    switch (type) {
122490 +    case set_rle:
122491 +        FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max), "");
122492 +        RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall, "not enough space");
122493 +        *op = codeTable[0];
122494 +        return 1;
122495 +    case set_repeat:
122496 +        ZSTD_memcpy(nextCTable, prevCTable, prevCTableSize);
122497 +        return 0;
122498 +    case set_basic:
122499 +        FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, entropyWorkspace, entropyWorkspaceSize), "");  /* note : could be pre-calculated */
122500 +        return 0;
122501 +    case set_compressed: {
122502 +        ZSTD_BuildCTableWksp* wksp = (ZSTD_BuildCTableWksp*)entropyWorkspace;
122503 +        size_t nbSeq_1 = nbSeq;
122504 +        const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
122505 +        if (count[codeTable[nbSeq-1]] > 1) {
122506 +            count[codeTable[nbSeq-1]]--;
122507 +            nbSeq_1--;
122508 +        }
122509 +        assert(nbSeq_1 > 1);
122510 +        assert(entropyWorkspaceSize >= sizeof(ZSTD_BuildCTableWksp));
122511 +        (void)entropyWorkspaceSize;
122512 +        FORWARD_IF_ERROR(FSE_normalizeCount(wksp->norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)), "");
122513 +        {   size_t const NCountSize = FSE_writeNCount(op, oend - op, wksp->norm, max, tableLog);   /* overflow protected */
122514 +            FORWARD_IF_ERROR(NCountSize, "FSE_writeNCount failed");
122515 +            FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, wksp->norm, max, tableLog, wksp->wksp, sizeof(wksp->wksp)), "");
122516 +            return NCountSize;
122517 +        }
122518 +    }
122519 +    default: assert(0); RETURN_ERROR(GENERIC, "impossible to reach");
122520 +    }
122523 +FORCE_INLINE_TEMPLATE size_t
122524 +ZSTD_encodeSequences_body(
122525 +            void* dst, size_t dstCapacity,
122526 +            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
122527 +            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
122528 +            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
122529 +            seqDef const* sequences, size_t nbSeq, int longOffsets)
122531 +    BIT_CStream_t blockStream;
122532 +    FSE_CState_t  stateMatchLength;
122533 +    FSE_CState_t  stateOffsetBits;
122534 +    FSE_CState_t  stateLitLength;
122536 +    RETURN_ERROR_IF(
122537 +        ERR_isError(BIT_initCStream(&blockStream, dst, dstCapacity)),
122538 +        dstSize_tooSmall, "not enough space remaining");
122539 +    DEBUGLOG(6, "available space for bitstream : %i  (dstCapacity=%u)",
122540 +                (int)(blockStream.endPtr - blockStream.startPtr),
122541 +                (unsigned)dstCapacity);
122543 +    /* first symbols */
122544 +    FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
122545 +    FSE_initCState2(&stateOffsetBits,  CTable_OffsetBits,  ofCodeTable[nbSeq-1]);
122546 +    FSE_initCState2(&stateLitLength,   CTable_LitLength,   llCodeTable[nbSeq-1]);
122547 +    BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
122548 +    if (MEM_32bits()) BIT_flushBits(&blockStream);
122549 +    BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]);
122550 +    if (MEM_32bits()) BIT_flushBits(&blockStream);
122551 +    if (longOffsets) {
122552 +        U32 const ofBits = ofCodeTable[nbSeq-1];
122553 +        unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
122554 +        if (extraBits) {
122555 +            BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits);
122556 +            BIT_flushBits(&blockStream);
122557 +        }
122558 +        BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits,
122559 +                    ofBits - extraBits);
122560 +    } else {
122561 +        BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]);
122562 +    }
122563 +    BIT_flushBits(&blockStream);
122565 +    {   size_t n;
122566 +        for (n=nbSeq-2 ; n<nbSeq ; n--) {      /* intentional underflow */
122567 +            BYTE const llCode = llCodeTable[n];
122568 +            BYTE const ofCode = ofCodeTable[n];
122569 +            BYTE const mlCode = mlCodeTable[n];
122570 +            U32  const llBits = LL_bits[llCode];
122571 +            U32  const ofBits = ofCode;
122572 +            U32  const mlBits = ML_bits[mlCode];
122573 +            DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u",
122574 +                        (unsigned)sequences[n].litLength,
122575 +                        (unsigned)sequences[n].matchLength + MINMATCH,
122576 +                        (unsigned)sequences[n].offset);
122577 +                                                                            /* 32b*/  /* 64b*/
122578 +                                                                            /* (7)*/  /* (7)*/
122579 +            FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode);       /* 15 */  /* 15 */
122580 +            FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode);      /* 24 */  /* 24 */
122581 +            if (MEM_32bits()) BIT_flushBits(&blockStream);                  /* (7)*/
122582 +            FSE_encodeSymbol(&blockStream, &stateLitLength, llCode);        /* 16 */  /* 33 */
122583 +            if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog)))
122584 +                BIT_flushBits(&blockStream);                                /* (7)*/
122585 +            BIT_addBits(&blockStream, sequences[n].litLength, llBits);
122586 +            if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
122587 +            BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
122588 +            if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream);
122589 +            if (longOffsets) {
122590 +                unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
122591 +                if (extraBits) {
122592 +                    BIT_addBits(&blockStream, sequences[n].offset, extraBits);
122593 +                    BIT_flushBits(&blockStream);                            /* (7)*/
122594 +                }
122595 +                BIT_addBits(&blockStream, sequences[n].offset >> extraBits,
122596 +                            ofBits - extraBits);                            /* 31 */
122597 +            } else {
122598 +                BIT_addBits(&blockStream, sequences[n].offset, ofBits);     /* 31 */
122599 +            }
122600 +            BIT_flushBits(&blockStream);                                    /* (7)*/
122601 +            DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr));
122602 +    }   }
122604 +    DEBUGLOG(6, "ZSTD_encodeSequences: flushing ML state with %u bits", stateMatchLength.stateLog);
122605 +    FSE_flushCState(&blockStream, &stateMatchLength);
122606 +    DEBUGLOG(6, "ZSTD_encodeSequences: flushing Off state with %u bits", stateOffsetBits.stateLog);
122607 +    FSE_flushCState(&blockStream, &stateOffsetBits);
122608 +    DEBUGLOG(6, "ZSTD_encodeSequences: flushing LL state with %u bits", stateLitLength.stateLog);
122609 +    FSE_flushCState(&blockStream, &stateLitLength);
122611 +    {   size_t const streamSize = BIT_closeCStream(&blockStream);
122612 +        RETURN_ERROR_IF(streamSize==0, dstSize_tooSmall, "not enough space");
122613 +        return streamSize;
122614 +    }
122617 +static size_t
122618 +ZSTD_encodeSequences_default(
122619 +            void* dst, size_t dstCapacity,
122620 +            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
122621 +            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
122622 +            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
122623 +            seqDef const* sequences, size_t nbSeq, int longOffsets)
122625 +    return ZSTD_encodeSequences_body(dst, dstCapacity,
122626 +                                    CTable_MatchLength, mlCodeTable,
122627 +                                    CTable_OffsetBits, ofCodeTable,
122628 +                                    CTable_LitLength, llCodeTable,
122629 +                                    sequences, nbSeq, longOffsets);
122633 +#if DYNAMIC_BMI2
122635 +static TARGET_ATTRIBUTE("bmi2") size_t
122636 +ZSTD_encodeSequences_bmi2(
122637 +            void* dst, size_t dstCapacity,
122638 +            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
122639 +            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
122640 +            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
122641 +            seqDef const* sequences, size_t nbSeq, int longOffsets)
122643 +    return ZSTD_encodeSequences_body(dst, dstCapacity,
122644 +                                    CTable_MatchLength, mlCodeTable,
122645 +                                    CTable_OffsetBits, ofCodeTable,
122646 +                                    CTable_LitLength, llCodeTable,
122647 +                                    sequences, nbSeq, longOffsets);
122650 +#endif
122652 +size_t ZSTD_encodeSequences(
122653 +            void* dst, size_t dstCapacity,
122654 +            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
122655 +            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
122656 +            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
122657 +            seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)
122659 +    DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity);
122660 +#if DYNAMIC_BMI2
122661 +    if (bmi2) {
122662 +        return ZSTD_encodeSequences_bmi2(dst, dstCapacity,
122663 +                                         CTable_MatchLength, mlCodeTable,
122664 +                                         CTable_OffsetBits, ofCodeTable,
122665 +                                         CTable_LitLength, llCodeTable,
122666 +                                         sequences, nbSeq, longOffsets);
122667 +    }
122668 +#endif
122669 +    (void)bmi2;
122670 +    return ZSTD_encodeSequences_default(dst, dstCapacity,
122671 +                                        CTable_MatchLength, mlCodeTable,
122672 +                                        CTable_OffsetBits, ofCodeTable,
122673 +                                        CTable_LitLength, llCodeTable,
122674 +                                        sequences, nbSeq, longOffsets);
122676 diff --git a/lib/zstd/compress/zstd_compress_sequences.h b/lib/zstd/compress/zstd_compress_sequences.h
122677 new file mode 100644
122678 index 000000000000..7991364c2f71
122679 --- /dev/null
122680 +++ b/lib/zstd/compress/zstd_compress_sequences.h
122681 @@ -0,0 +1,54 @@
122683 + * Copyright (c) Yann Collet, Facebook, Inc.
122684 + * All rights reserved.
122686 + * This source code is licensed under both the BSD-style license (found in the
122687 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
122688 + * in the COPYING file in the root directory of this source tree).
122689 + * You may select, at your option, one of the above-listed licenses.
122690 + */
122692 +#ifndef ZSTD_COMPRESS_SEQUENCES_H
122693 +#define ZSTD_COMPRESS_SEQUENCES_H
122695 +#include "../common/fse.h" /* FSE_repeat, FSE_CTable */
122696 +#include "../common/zstd_internal.h" /* symbolEncodingType_e, ZSTD_strategy */
122698 +typedef enum {
122699 +    ZSTD_defaultDisallowed = 0,
122700 +    ZSTD_defaultAllowed = 1
122701 +} ZSTD_defaultPolicy_e;
122703 +symbolEncodingType_e
122704 +ZSTD_selectEncodingType(
122705 +        FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
122706 +        size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
122707 +        FSE_CTable const* prevCTable,
122708 +        short const* defaultNorm, U32 defaultNormLog,
122709 +        ZSTD_defaultPolicy_e const isDefaultAllowed,
122710 +        ZSTD_strategy const strategy);
122712 +size_t
122713 +ZSTD_buildCTable(void* dst, size_t dstCapacity,
122714 +                FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
122715 +                unsigned* count, U32 max,
122716 +                const BYTE* codeTable, size_t nbSeq,
122717 +                const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
122718 +                const FSE_CTable* prevCTable, size_t prevCTableSize,
122719 +                void* entropyWorkspace, size_t entropyWorkspaceSize);
122721 +size_t ZSTD_encodeSequences(
122722 +            void* dst, size_t dstCapacity,
122723 +            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
122724 +            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
122725 +            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
122726 +            seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2);
122728 +size_t ZSTD_fseBitCost(
122729 +    FSE_CTable const* ctable,
122730 +    unsigned const* count,
122731 +    unsigned const max);
122733 +size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
122734 +                             unsigned const* count, unsigned const max);
122735 +#endif /* ZSTD_COMPRESS_SEQUENCES_H */
122736 diff --git a/lib/zstd/compress/zstd_compress_superblock.c b/lib/zstd/compress/zstd_compress_superblock.c
122737 new file mode 100644
122738 index 000000000000..767f73f5bf3d
122739 --- /dev/null
122740 +++ b/lib/zstd/compress/zstd_compress_superblock.c
122741 @@ -0,0 +1,850 @@
122743 + * Copyright (c) Yann Collet, Facebook, Inc.
122744 + * All rights reserved.
122746 + * This source code is licensed under both the BSD-style license (found in the
122747 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
122748 + * in the COPYING file in the root directory of this source tree).
122749 + * You may select, at your option, one of the above-listed licenses.
122750 + */
122752 + /*-*************************************
122753 + *  Dependencies
122754 + ***************************************/
122755 +#include "zstd_compress_superblock.h"
122757 +#include "../common/zstd_internal.h"  /* ZSTD_getSequenceLength */
122758 +#include "hist.h"                     /* HIST_countFast_wksp */
122759 +#include "zstd_compress_internal.h"
122760 +#include "zstd_compress_sequences.h"
122761 +#include "zstd_compress_literals.h"
122763 +/*-*************************************
122764 +*  Superblock entropy buffer structs
122765 +***************************************/
122766 +/** ZSTD_hufCTablesMetadata_t :
122767 + *  Stores Literals Block Type for a super-block in hType, and
122768 + *  huffman tree description in hufDesBuffer.
122769 + *  hufDesSize refers to the size of huffman tree description in bytes.
122770 + *  This metadata is populated in ZSTD_buildSuperBlockEntropy_literal() */
122771 +typedef struct {
122772 +    symbolEncodingType_e hType;
122773 +    BYTE hufDesBuffer[ZSTD_MAX_HUF_HEADER_SIZE];
122774 +    size_t hufDesSize;
122775 +} ZSTD_hufCTablesMetadata_t;
122777 +/** ZSTD_fseCTablesMetadata_t :
122778 + *  Stores symbol compression modes for a super-block in {ll, ol, ml}Type, and
122779 + *  fse tables in fseTablesBuffer.
122780 + *  fseTablesSize refers to the size of fse tables in bytes.
122781 + *  This metadata is populated in ZSTD_buildSuperBlockEntropy_sequences() */
122782 +typedef struct {
122783 +    symbolEncodingType_e llType;
122784 +    symbolEncodingType_e ofType;
122785 +    symbolEncodingType_e mlType;
122786 +    BYTE fseTablesBuffer[ZSTD_MAX_FSE_HEADERS_SIZE];
122787 +    size_t fseTablesSize;
122788 +    size_t lastCountSize; /* This is to account for bug in 1.3.4. More detail in ZSTD_compressSubBlock_sequences() */
122789 +} ZSTD_fseCTablesMetadata_t;
122791 +typedef struct {
122792 +    ZSTD_hufCTablesMetadata_t hufMetadata;
122793 +    ZSTD_fseCTablesMetadata_t fseMetadata;
122794 +} ZSTD_entropyCTablesMetadata_t;
122797 +/** ZSTD_buildSuperBlockEntropy_literal() :
122798 + *  Builds entropy for the super-block literals.
122799 + *  Stores literals block type (raw, rle, compressed, repeat) and
122800 + *  huffman description table to hufMetadata.
122801 + *  @return : size of huffman description table or error code */
122802 +static size_t ZSTD_buildSuperBlockEntropy_literal(void* const src, size_t srcSize,
122803 +                                            const ZSTD_hufCTables_t* prevHuf,
122804 +                                                  ZSTD_hufCTables_t* nextHuf,
122805 +                                                  ZSTD_hufCTablesMetadata_t* hufMetadata,
122806 +                                                  const int disableLiteralsCompression,
122807 +                                                  void* workspace, size_t wkspSize)
122809 +    BYTE* const wkspStart = (BYTE*)workspace;
122810 +    BYTE* const wkspEnd = wkspStart + wkspSize;
122811 +    BYTE* const countWkspStart = wkspStart;
122812 +    unsigned* const countWksp = (unsigned*)workspace;
122813 +    const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned);
122814 +    BYTE* const nodeWksp = countWkspStart + countWkspSize;
122815 +    const size_t nodeWkspSize = wkspEnd-nodeWksp;
122816 +    unsigned maxSymbolValue = 255;
122817 +    unsigned huffLog = HUF_TABLELOG_DEFAULT;
122818 +    HUF_repeat repeat = prevHuf->repeatMode;
122820 +    DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy_literal (srcSize=%zu)", srcSize);
122822 +    /* Prepare nextEntropy assuming reusing the existing table */
122823 +    ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
122825 +    if (disableLiteralsCompression) {
122826 +        DEBUGLOG(5, "set_basic - disabled");
122827 +        hufMetadata->hType = set_basic;
122828 +        return 0;
122829 +    }
122831 +    /* small ? don't even attempt compression (speed opt) */
122832 +#   define COMPRESS_LITERALS_SIZE_MIN 63
122833 +    {   size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
122834 +        if (srcSize <= minLitSize) {
122835 +            DEBUGLOG(5, "set_basic - too small");
122836 +            hufMetadata->hType = set_basic;
122837 +            return 0;
122838 +        }
122839 +    }
122841 +    /* Scan input and build symbol stats */
122842 +    {   size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)src, srcSize, workspace, wkspSize);
122843 +        FORWARD_IF_ERROR(largest, "HIST_count_wksp failed");
122844 +        if (largest == srcSize) {
122845 +            DEBUGLOG(5, "set_rle");
122846 +            hufMetadata->hType = set_rle;
122847 +            return 0;
122848 +        }
122849 +        if (largest <= (srcSize >> 7)+4) {
122850 +            DEBUGLOG(5, "set_basic - no gain");
122851 +            hufMetadata->hType = set_basic;
122852 +            return 0;
122853 +        }
122854 +    }
122856 +    /* Validate the previous Huffman table */
122857 +    if (repeat == HUF_repeat_check && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) {
122858 +        repeat = HUF_repeat_none;
122859 +    }
122861 +    /* Build Huffman Tree */
122862 +    ZSTD_memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable));
122863 +    huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
122864 +    {   size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp,
122865 +                                                    maxSymbolValue, huffLog,
122866 +                                                    nodeWksp, nodeWkspSize);
122867 +        FORWARD_IF_ERROR(maxBits, "HUF_buildCTable_wksp");
122868 +        huffLog = (U32)maxBits;
122869 +        {   /* Build and write the CTable */
122870 +            size_t const newCSize = HUF_estimateCompressedSize(
122871 +                    (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue);
122872 +            size_t const hSize = HUF_writeCTable_wksp(
122873 +                    hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer),
122874 +                    (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog,
122875 +                    nodeWksp, nodeWkspSize);
122876 +            /* Check against repeating the previous CTable */
122877 +            if (repeat != HUF_repeat_none) {
122878 +                size_t const oldCSize = HUF_estimateCompressedSize(
122879 +                        (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue);
122880 +                if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) {
122881 +                    DEBUGLOG(5, "set_repeat - smaller");
122882 +                    ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
122883 +                    hufMetadata->hType = set_repeat;
122884 +                    return 0;
122885 +                }
122886 +            }
122887 +            if (newCSize + hSize >= srcSize) {
122888 +                DEBUGLOG(5, "set_basic - no gains");
122889 +                ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
122890 +                hufMetadata->hType = set_basic;
122891 +                return 0;
122892 +            }
122893 +            DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize);
122894 +            hufMetadata->hType = set_compressed;
122895 +            nextHuf->repeatMode = HUF_repeat_check;
122896 +            return hSize;
122897 +        }
122898 +    }
122901 +/** ZSTD_buildSuperBlockEntropy_sequences() :
122902 + *  Builds entropy for the super-block sequences.
122903 + *  Stores symbol compression modes and fse table to fseMetadata.
122904 + *  @return : size of fse tables or error code */
122905 +static size_t ZSTD_buildSuperBlockEntropy_sequences(seqStore_t* seqStorePtr,
122906 +                                              const ZSTD_fseCTables_t* prevEntropy,
122907 +                                                    ZSTD_fseCTables_t* nextEntropy,
122908 +                                              const ZSTD_CCtx_params* cctxParams,
122909 +                                                    ZSTD_fseCTablesMetadata_t* fseMetadata,
122910 +                                                    void* workspace, size_t wkspSize)
122912 +    BYTE* const wkspStart = (BYTE*)workspace;
122913 +    BYTE* const wkspEnd = wkspStart + wkspSize;
122914 +    BYTE* const countWkspStart = wkspStart;
122915 +    unsigned* const countWksp = (unsigned*)workspace;
122916 +    const size_t countWkspSize = (MaxSeq + 1) * sizeof(unsigned);
122917 +    BYTE* const cTableWksp = countWkspStart + countWkspSize;
122918 +    const size_t cTableWkspSize = wkspEnd-cTableWksp;
122919 +    ZSTD_strategy const strategy = cctxParams->cParams.strategy;
122920 +    FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable;
122921 +    FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable;
122922 +    FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable;
122923 +    const BYTE* const ofCodeTable = seqStorePtr->ofCode;
122924 +    const BYTE* const llCodeTable = seqStorePtr->llCode;
122925 +    const BYTE* const mlCodeTable = seqStorePtr->mlCode;
122926 +    size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
122927 +    BYTE* const ostart = fseMetadata->fseTablesBuffer;
122928 +    BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer);
122929 +    BYTE* op = ostart;
122931 +    assert(cTableWkspSize >= (1 << MaxFSELog) * sizeof(FSE_FUNCTION_TYPE));
122932 +    DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy_sequences (nbSeq=%zu)", nbSeq);
122933 +    ZSTD_memset(workspace, 0, wkspSize);
122935 +    fseMetadata->lastCountSize = 0;
122936 +    /* convert length/distances into codes */
122937 +    ZSTD_seqToCodes(seqStorePtr);
122938 +    /* build CTable for Literal Lengths */
122939 +    {   U32 LLtype;
122940 +        unsigned max = MaxLL;
122941 +        size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, llCodeTable, nbSeq, workspace, wkspSize);  /* can't fail */
122942 +        DEBUGLOG(5, "Building LL table");
122943 +        nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode;
122944 +        LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode,
122945 +                                        countWksp, max, mostFrequent, nbSeq,
122946 +                                        LLFSELog, prevEntropy->litlengthCTable,
122947 +                                        LL_defaultNorm, LL_defaultNormLog,
122948 +                                        ZSTD_defaultAllowed, strategy);
122949 +        assert(set_basic < set_compressed && set_rle < set_compressed);
122950 +        assert(!(LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
122951 +        {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
122952 +                                                    countWksp, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
122953 +                                                    prevEntropy->litlengthCTable, sizeof(prevEntropy->litlengthCTable),
122954 +                                                    cTableWksp, cTableWkspSize);
122955 +            FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for LitLens failed");
122956 +            if (LLtype == set_compressed)
122957 +                fseMetadata->lastCountSize = countSize;
122958 +            op += countSize;
122959 +            fseMetadata->llType = (symbolEncodingType_e) LLtype;
122960 +    }   }
122961 +    /* build CTable for Offsets */
122962 +    {   U32 Offtype;
122963 +        unsigned max = MaxOff;
122964 +        size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, ofCodeTable, nbSeq, workspace, wkspSize);  /* can't fail */
122965 +        /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
122966 +        ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
122967 +        DEBUGLOG(5, "Building OF table");
122968 +        nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode;
122969 +        Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode,
122970 +                                        countWksp, max, mostFrequent, nbSeq,
122971 +                                        OffFSELog, prevEntropy->offcodeCTable,
122972 +                                        OF_defaultNorm, OF_defaultNormLog,
122973 +                                        defaultPolicy, strategy);
122974 +        assert(!(Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
122975 +        {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
122976 +                                                    countWksp, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
122977 +                                                    prevEntropy->offcodeCTable, sizeof(prevEntropy->offcodeCTable),
122978 +                                                    cTableWksp, cTableWkspSize);
122979 +            FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for Offsets failed");
122980 +            if (Offtype == set_compressed)
122981 +                fseMetadata->lastCountSize = countSize;
122982 +            op += countSize;
122983 +            fseMetadata->ofType = (symbolEncodingType_e) Offtype;
122984 +    }   }
122985 +    /* build CTable for MatchLengths */
122986 +    {   U32 MLtype;
122987 +        unsigned max = MaxML;
122988 +        size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, mlCodeTable, nbSeq, workspace, wkspSize);   /* can't fail */
122989 +        DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
122990 +        nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode;
122991 +        MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode,
122992 +                                        countWksp, max, mostFrequent, nbSeq,
122993 +                                        MLFSELog, prevEntropy->matchlengthCTable,
122994 +                                        ML_defaultNorm, ML_defaultNormLog,
122995 +                                        ZSTD_defaultAllowed, strategy);
122996 +        assert(!(MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
122997 +        {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
122998 +                                                    countWksp, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
122999 +                                                    prevEntropy->matchlengthCTable, sizeof(prevEntropy->matchlengthCTable),
123000 +                                                    cTableWksp, cTableWkspSize);
123001 +            FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for MatchLengths failed");
123002 +            if (MLtype == set_compressed)
123003 +                fseMetadata->lastCountSize = countSize;
123004 +            op += countSize;
123005 +            fseMetadata->mlType = (symbolEncodingType_e) MLtype;
123006 +    }   }
123007 +    assert((size_t) (op-ostart) <= sizeof(fseMetadata->fseTablesBuffer));
123008 +    return op-ostart;
123012 +/** ZSTD_buildSuperBlockEntropy() :
123013 + *  Builds entropy for the super-block.
123014 + *  @return : 0 on success or error code */
123015 +static size_t
123016 +ZSTD_buildSuperBlockEntropy(seqStore_t* seqStorePtr,
123017 +                      const ZSTD_entropyCTables_t* prevEntropy,
123018 +                            ZSTD_entropyCTables_t* nextEntropy,
123019 +                      const ZSTD_CCtx_params* cctxParams,
123020 +                            ZSTD_entropyCTablesMetadata_t* entropyMetadata,
123021 +                            void* workspace, size_t wkspSize)
123023 +    size_t const litSize = seqStorePtr->lit - seqStorePtr->litStart;
123024 +    DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy");
123025 +    entropyMetadata->hufMetadata.hufDesSize =
123026 +        ZSTD_buildSuperBlockEntropy_literal(seqStorePtr->litStart, litSize,
123027 +                                            &prevEntropy->huf, &nextEntropy->huf,
123028 +                                            &entropyMetadata->hufMetadata,
123029 +                                            ZSTD_disableLiteralsCompression(cctxParams),
123030 +                                            workspace, wkspSize);
123031 +    FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildSuperBlockEntropy_literal failed");
123032 +    entropyMetadata->fseMetadata.fseTablesSize =
123033 +        ZSTD_buildSuperBlockEntropy_sequences(seqStorePtr,
123034 +                                              &prevEntropy->fse, &nextEntropy->fse,
123035 +                                              cctxParams,
123036 +                                              &entropyMetadata->fseMetadata,
123037 +                                              workspace, wkspSize);
123038 +    FORWARD_IF_ERROR(entropyMetadata->fseMetadata.fseTablesSize, "ZSTD_buildSuperBlockEntropy_sequences failed");
123039 +    return 0;
123042 +/** ZSTD_compressSubBlock_literal() :
123043 + *  Compresses literals section for a sub-block.
123044 + *  When we have to write the Huffman table we will sometimes choose a header
123045 + *  size larger than necessary. This is because we have to pick the header size
123046 + *  before we know the table size + compressed size, so we have a bound on the
123047 + *  table size. If we guessed incorrectly, we fall back to uncompressed literals.
123049 + *  We write the header when writeEntropy=1 and set entropyWritten=1 when we succeeded
123050 + *  in writing the header, otherwise it is set to 0.
123052 + *  hufMetadata->hType has literals block type info.
123053 + *      If it is set_basic, all sub-blocks literals section will be Raw_Literals_Block.
123054 + *      If it is set_rle, all sub-blocks literals section will be RLE_Literals_Block.
123055 + *      If it is set_compressed, first sub-block's literals section will be Compressed_Literals_Block
123056 + *      If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block
123057 + *      and the following sub-blocks' literals sections will be Treeless_Literals_Block.
123058 + *  @return : compressed size of literals section of a sub-block
123059 + *            Or 0 if it unable to compress.
123060 + *            Or error code */
123061 +static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
123062 +                                    const ZSTD_hufCTablesMetadata_t* hufMetadata,
123063 +                                    const BYTE* literals, size_t litSize,
123064 +                                    void* dst, size_t dstSize,
123065 +                                    const int bmi2, int writeEntropy, int* entropyWritten)
123067 +    size_t const header = writeEntropy ? 200 : 0;
123068 +    size_t const lhSize = 3 + (litSize >= (1 KB - header)) + (litSize >= (16 KB - header));
123069 +    BYTE* const ostart = (BYTE*)dst;
123070 +    BYTE* const oend = ostart + dstSize;
123071 +    BYTE* op = ostart + lhSize;
123072 +    U32 const singleStream = lhSize == 3;
123073 +    symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat;
123074 +    size_t cLitSize = 0;
123076 +    (void)bmi2; /* TODO bmi2... */
123078 +    DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy);
123080 +    *entropyWritten = 0;
123081 +    if (litSize == 0 || hufMetadata->hType == set_basic) {
123082 +      DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal");
123083 +      return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
123084 +    } else if (hufMetadata->hType == set_rle) {
123085 +      DEBUGLOG(5, "ZSTD_compressSubBlock_literal using rle literal");
123086 +      return ZSTD_compressRleLiteralsBlock(dst, dstSize, literals, litSize);
123087 +    }
123089 +    assert(litSize > 0);
123090 +    assert(hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat);
123092 +    if (writeEntropy && hufMetadata->hType == set_compressed) {
123093 +        ZSTD_memcpy(op, hufMetadata->hufDesBuffer, hufMetadata->hufDesSize);
123094 +        op += hufMetadata->hufDesSize;
123095 +        cLitSize += hufMetadata->hufDesSize;
123096 +        DEBUGLOG(5, "ZSTD_compressSubBlock_literal (hSize=%zu)", hufMetadata->hufDesSize);
123097 +    }
123099 +    /* TODO bmi2 */
123100 +    {   const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable)
123101 +                                          : HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable);
123102 +        op += cSize;
123103 +        cLitSize += cSize;
123104 +        if (cSize == 0 || ERR_isError(cSize)) {
123105 +            DEBUGLOG(5, "Failed to write entropy tables %s", ZSTD_getErrorName(cSize));
123106 +            return 0;
123107 +        }
123108 +        /* If we expand and we aren't writing a header then emit uncompressed */
123109 +        if (!writeEntropy && cLitSize >= litSize) {
123110 +            DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal because uncompressible");
123111 +            return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
123112 +        }
123113 +        /* If we are writing headers then allow expansion that doesn't change our header size. */
123114 +        if (lhSize < (size_t)(3 + (cLitSize >= 1 KB) + (cLitSize >= 16 KB))) {
123115 +            assert(cLitSize > litSize);
123116 +            DEBUGLOG(5, "Literals expanded beyond allowed header size");
123117 +            return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
123118 +        }
123119 +        DEBUGLOG(5, "ZSTD_compressSubBlock_literal (cSize=%zu)", cSize);
123120 +    }
123122 +    /* Build header */
123123 +    switch(lhSize)
123124 +    {
123125 +    case 3: /* 2 - 2 - 10 - 10 */
123126 +        {   U32 const lhc = hType + ((!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14);
123127 +            MEM_writeLE24(ostart, lhc);
123128 +            break;
123129 +        }
123130 +    case 4: /* 2 - 2 - 14 - 14 */
123131 +        {   U32 const lhc = hType + (2 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<18);
123132 +            MEM_writeLE32(ostart, lhc);
123133 +            break;
123134 +        }
123135 +    case 5: /* 2 - 2 - 18 - 18 */
123136 +        {   U32 const lhc = hType + (3 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<22);
123137 +            MEM_writeLE32(ostart, lhc);
123138 +            ostart[4] = (BYTE)(cLitSize >> 10);
123139 +            break;
123140 +        }
123141 +    default:  /* not possible : lhSize is {3,4,5} */
123142 +        assert(0);
123143 +    }
123144 +    *entropyWritten = 1;
123145 +    DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)litSize, (U32)(op-ostart));
123146 +    return op-ostart;
123149 +static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef* sequences, size_t nbSeq, size_t litSize, int lastSequence) {
123150 +    const seqDef* const sstart = sequences;
123151 +    const seqDef* const send = sequences + nbSeq;
123152 +    const seqDef* sp = sstart;
123153 +    size_t matchLengthSum = 0;
123154 +    size_t litLengthSum = 0;
123155 +    while (send-sp > 0) {
123156 +        ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp);
123157 +        litLengthSum += seqLen.litLength;
123158 +        matchLengthSum += seqLen.matchLength;
123159 +        sp++;
123160 +    }
123161 +    assert(litLengthSum <= litSize);
123162 +    if (!lastSequence) {
123163 +        assert(litLengthSum == litSize);
123164 +    }
123165 +    return matchLengthSum + litSize;
123168 +/** ZSTD_compressSubBlock_sequences() :
123169 + *  Compresses sequences section for a sub-block.
123170 + *  fseMetadata->llType, fseMetadata->ofType, and fseMetadata->mlType have
123171 + *  symbol compression modes for the super-block.
123172 + *  The first successfully compressed block will have these in its header.
123173 + *  We set entropyWritten=1 when we succeed in compressing the sequences.
123174 + *  The following sub-blocks will always have repeat mode.
123175 + *  @return : compressed size of sequences section of a sub-block
123176 + *            Or 0 if it is unable to compress
123177 + *            Or error code. */
123178 +static size_t ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables,
123179 +                                              const ZSTD_fseCTablesMetadata_t* fseMetadata,
123180 +                                              const seqDef* sequences, size_t nbSeq,
123181 +                                              const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
123182 +                                              const ZSTD_CCtx_params* cctxParams,
123183 +                                              void* dst, size_t dstCapacity,
123184 +                                              const int bmi2, int writeEntropy, int* entropyWritten)
123186 +    const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
123187 +    BYTE* const ostart = (BYTE*)dst;
123188 +    BYTE* const oend = ostart + dstCapacity;
123189 +    BYTE* op = ostart;
123190 +    BYTE* seqHead;
123192 +    DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (nbSeq=%zu, writeEntropy=%d, longOffsets=%d)", nbSeq, writeEntropy, longOffsets);
123194 +    *entropyWritten = 0;
123195 +    /* Sequences Header */
123196 +    RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
123197 +                    dstSize_tooSmall, "");
123198 +    if (nbSeq < 0x7F)
123199 +        *op++ = (BYTE)nbSeq;
123200 +    else if (nbSeq < LONGNBSEQ)
123201 +        op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
123202 +    else
123203 +        op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
123204 +    if (nbSeq==0) {
123205 +        return op - ostart;
123206 +    }
123208 +    /* seqHead : flags for FSE encoding type */
123209 +    seqHead = op++;
123211 +    DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (seqHeadSize=%u)", (unsigned)(op-ostart));
123213 +    if (writeEntropy) {
123214 +        const U32 LLtype = fseMetadata->llType;
123215 +        const U32 Offtype = fseMetadata->ofType;
123216 +        const U32 MLtype = fseMetadata->mlType;
123217 +        DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (fseTablesSize=%zu)", fseMetadata->fseTablesSize);
123218 +        *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
123219 +        ZSTD_memcpy(op, fseMetadata->fseTablesBuffer, fseMetadata->fseTablesSize);
123220 +        op += fseMetadata->fseTablesSize;
123221 +    } else {
123222 +        const U32 repeat = set_repeat;
123223 +        *seqHead = (BYTE)((repeat<<6) + (repeat<<4) + (repeat<<2));
123224 +    }
123226 +    {   size_t const bitstreamSize = ZSTD_encodeSequences(
123227 +                                        op, oend - op,
123228 +                                        fseTables->matchlengthCTable, mlCode,
123229 +                                        fseTables->offcodeCTable, ofCode,
123230 +                                        fseTables->litlengthCTable, llCode,
123231 +                                        sequences, nbSeq,
123232 +                                        longOffsets, bmi2);
123233 +        FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed");
123234 +        op += bitstreamSize;
123235 +        /* zstd versions <= 1.3.4 mistakenly report corruption when
123236 +         * FSE_readNCount() receives a buffer < 4 bytes.
123237 +         * Fixed by https://github.com/facebook/zstd/pull/1146.
123238 +         * This can happen when the last set_compressed table present is 2
123239 +         * bytes and the bitstream is only one byte.
123240 +         * In this exceedingly rare case, we will simply emit an uncompressed
123241 +         * block, since it isn't worth optimizing.
123242 +         */
123243 +#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
123244 +        if (writeEntropy && fseMetadata->lastCountSize && fseMetadata->lastCountSize + bitstreamSize < 4) {
123245 +            /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
123246 +            assert(fseMetadata->lastCountSize + bitstreamSize == 3);
123247 +            DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
123248 +                        "emitting an uncompressed block.");
123249 +            return 0;
123250 +        }
123251 +#endif
123252 +        DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (bitstreamSize=%zu)", bitstreamSize);
123253 +    }
123255 +    /* zstd versions <= 1.4.0 mistakenly report error when
123256 +     * sequences section body size is less than 3 bytes.
123257 +     * Fixed by https://github.com/facebook/zstd/pull/1664.
123258 +     * This can happen when the previous sequences section block is compressed
123259 +     * with rle mode and the current block's sequences section is compressed
123260 +     * with repeat mode where sequences section body size can be 1 byte.
123261 +     */
123262 +#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
123263 +    if (op-seqHead < 4) {
123264 +        DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.4.0 by emitting "
123265 +                    "an uncompressed block when sequences are < 4 bytes");
123266 +        return 0;
123267 +    }
123268 +#endif
123270 +    *entropyWritten = 1;
123271 +    return op - ostart;
123274 +/** ZSTD_compressSubBlock() :
123275 + *  Compresses a single sub-block.
123276 + *  @return : compressed size of the sub-block
123277 + *            Or 0 if it failed to compress. */
123278 +static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy,
123279 +                                    const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
123280 +                                    const seqDef* sequences, size_t nbSeq,
123281 +                                    const BYTE* literals, size_t litSize,
123282 +                                    const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
123283 +                                    const ZSTD_CCtx_params* cctxParams,
123284 +                                    void* dst, size_t dstCapacity,
123285 +                                    const int bmi2,
123286 +                                    int writeLitEntropy, int writeSeqEntropy,
123287 +                                    int* litEntropyWritten, int* seqEntropyWritten,
123288 +                                    U32 lastBlock)
123290 +    BYTE* const ostart = (BYTE*)dst;
123291 +    BYTE* const oend = ostart + dstCapacity;
123292 +    BYTE* op = ostart + ZSTD_blockHeaderSize;
123293 +    DEBUGLOG(5, "ZSTD_compressSubBlock (litSize=%zu, nbSeq=%zu, writeLitEntropy=%d, writeSeqEntropy=%d, lastBlock=%d)",
123294 +                litSize, nbSeq, writeLitEntropy, writeSeqEntropy, lastBlock);
123295 +    {   size_t cLitSize = ZSTD_compressSubBlock_literal((const HUF_CElt*)entropy->huf.CTable,
123296 +                                                        &entropyMetadata->hufMetadata, literals, litSize,
123297 +                                                        op, oend-op, bmi2, writeLitEntropy, litEntropyWritten);
123298 +        FORWARD_IF_ERROR(cLitSize, "ZSTD_compressSubBlock_literal failed");
123299 +        if (cLitSize == 0) return 0;
123300 +        op += cLitSize;
123301 +    }
123302 +    {   size_t cSeqSize = ZSTD_compressSubBlock_sequences(&entropy->fse,
123303 +                                                  &entropyMetadata->fseMetadata,
123304 +                                                  sequences, nbSeq,
123305 +                                                  llCode, mlCode, ofCode,
123306 +                                                  cctxParams,
123307 +                                                  op, oend-op,
123308 +                                                  bmi2, writeSeqEntropy, seqEntropyWritten);
123309 +        FORWARD_IF_ERROR(cSeqSize, "ZSTD_compressSubBlock_sequences failed");
123310 +        if (cSeqSize == 0) return 0;
123311 +        op += cSeqSize;
123312 +    }
123313 +    /* Write block header */
123314 +    {   size_t cSize = (op-ostart)-ZSTD_blockHeaderSize;
123315 +        U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
123316 +        MEM_writeLE24(ostart, cBlockHeader24);
123317 +    }
123318 +    return op-ostart;
123321 +static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t litSize,
123322 +                                                const ZSTD_hufCTables_t* huf,
123323 +                                                const ZSTD_hufCTablesMetadata_t* hufMetadata,
123324 +                                                void* workspace, size_t wkspSize,
123325 +                                                int writeEntropy)
123327 +    unsigned* const countWksp = (unsigned*)workspace;
123328 +    unsigned maxSymbolValue = 255;
123329 +    size_t literalSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
123331 +    if (hufMetadata->hType == set_basic) return litSize;
123332 +    else if (hufMetadata->hType == set_rle) return 1;
123333 +    else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) {
123334 +        size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize);
123335 +        if (ZSTD_isError(largest)) return litSize;
123336 +        {   size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue);
123337 +            if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize;
123338 +            return cLitSizeEstimate + literalSectionHeaderSize;
123339 +    }   }
123340 +    assert(0); /* impossible */
123341 +    return 0;
123344 +static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type,
123345 +                        const BYTE* codeTable, unsigned maxCode,
123346 +                        size_t nbSeq, const FSE_CTable* fseCTable,
123347 +                        const U32* additionalBits,
123348 +                        short const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
123349 +                        void* workspace, size_t wkspSize)
123351 +    unsigned* const countWksp = (unsigned*)workspace;
123352 +    const BYTE* ctp = codeTable;
123353 +    const BYTE* const ctStart = ctp;
123354 +    const BYTE* const ctEnd = ctStart + nbSeq;
123355 +    size_t cSymbolTypeSizeEstimateInBits = 0;
123356 +    unsigned max = maxCode;
123358 +    HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize);  /* can't fail */
123359 +    if (type == set_basic) {
123360 +        /* We selected this encoding type, so it must be valid. */
123361 +        assert(max <= defaultMax);
123362 +        cSymbolTypeSizeEstimateInBits = max <= defaultMax
123363 +                ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max)
123364 +                : ERROR(GENERIC);
123365 +    } else if (type == set_rle) {
123366 +        cSymbolTypeSizeEstimateInBits = 0;
123367 +    } else if (type == set_compressed || type == set_repeat) {
123368 +        cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max);
123369 +    }
123370 +    if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) return nbSeq * 10;
123371 +    while (ctp < ctEnd) {
123372 +        if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp];
123373 +        else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */
123374 +        ctp++;
123375 +    }
123376 +    return cSymbolTypeSizeEstimateInBits / 8;
123379 +static size_t ZSTD_estimateSubBlockSize_sequences(const BYTE* ofCodeTable,
123380 +                                                  const BYTE* llCodeTable,
123381 +                                                  const BYTE* mlCodeTable,
123382 +                                                  size_t nbSeq,
123383 +                                                  const ZSTD_fseCTables_t* fseTables,
123384 +                                                  const ZSTD_fseCTablesMetadata_t* fseMetadata,
123385 +                                                  void* workspace, size_t wkspSize,
123386 +                                                  int writeEntropy)
123388 +    size_t sequencesSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
123389 +    size_t cSeqSizeEstimate = 0;
123390 +    cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, MaxOff,
123391 +                                         nbSeq, fseTables->offcodeCTable, NULL,
123392 +                                         OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
123393 +                                         workspace, wkspSize);
123394 +    cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->llType, llCodeTable, MaxLL,
123395 +                                         nbSeq, fseTables->litlengthCTable, LL_bits,
123396 +                                         LL_defaultNorm, LL_defaultNormLog, MaxLL,
123397 +                                         workspace, wkspSize);
123398 +    cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, MaxML,
123399 +                                         nbSeq, fseTables->matchlengthCTable, ML_bits,
123400 +                                         ML_defaultNorm, ML_defaultNormLog, MaxML,
123401 +                                         workspace, wkspSize);
123402 +    if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize;
123403 +    return cSeqSizeEstimate + sequencesSectionHeaderSize;
123406 +static size_t ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize,
123407 +                                        const BYTE* ofCodeTable,
123408 +                                        const BYTE* llCodeTable,
123409 +                                        const BYTE* mlCodeTable,
123410 +                                        size_t nbSeq,
123411 +                                        const ZSTD_entropyCTables_t* entropy,
123412 +                                        const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
123413 +                                        void* workspace, size_t wkspSize,
123414 +                                        int writeLitEntropy, int writeSeqEntropy) {
123415 +    size_t cSizeEstimate = 0;
123416 +    cSizeEstimate += ZSTD_estimateSubBlockSize_literal(literals, litSize,
123417 +                                                         &entropy->huf, &entropyMetadata->hufMetadata,
123418 +                                                         workspace, wkspSize, writeLitEntropy);
123419 +    cSizeEstimate += ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable,
123420 +                                                         nbSeq, &entropy->fse, &entropyMetadata->fseMetadata,
123421 +                                                         workspace, wkspSize, writeSeqEntropy);
123422 +    return cSizeEstimate + ZSTD_blockHeaderSize;
123425 +static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMetadata)
123427 +    if (fseMetadata->llType == set_compressed || fseMetadata->llType == set_rle)
123428 +        return 1;
123429 +    if (fseMetadata->mlType == set_compressed || fseMetadata->mlType == set_rle)
123430 +        return 1;
123431 +    if (fseMetadata->ofType == set_compressed || fseMetadata->ofType == set_rle)
123432 +        return 1;
123433 +    return 0;
123436 +/** ZSTD_compressSubBlock_multi() :
123437 + *  Breaks super-block into multiple sub-blocks and compresses them.
123438 + *  Entropy will be written to the first block.
123439 + *  The following blocks will use repeat mode to compress.
123440 + *  All sub-blocks are compressed blocks (no raw or rle blocks).
123441 + *  @return : compressed size of the super block (which is multiple ZSTD blocks)
123442 + *            Or 0 if it failed to compress. */
123443 +static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
123444 +                            const ZSTD_compressedBlockState_t* prevCBlock,
123445 +                            ZSTD_compressedBlockState_t* nextCBlock,
123446 +                            const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
123447 +                            const ZSTD_CCtx_params* cctxParams,
123448 +                                  void* dst, size_t dstCapacity,
123449 +                            const void* src, size_t srcSize,
123450 +                            const int bmi2, U32 lastBlock,
123451 +                            void* workspace, size_t wkspSize)
123453 +    const seqDef* const sstart = seqStorePtr->sequencesStart;
123454 +    const seqDef* const send = seqStorePtr->sequences;
123455 +    const seqDef* sp = sstart;
123456 +    const BYTE* const lstart = seqStorePtr->litStart;
123457 +    const BYTE* const lend = seqStorePtr->lit;
123458 +    const BYTE* lp = lstart;
123459 +    BYTE const* ip = (BYTE const*)src;
123460 +    BYTE const* const iend = ip + srcSize;
123461 +    BYTE* const ostart = (BYTE*)dst;
123462 +    BYTE* const oend = ostart + dstCapacity;
123463 +    BYTE* op = ostart;
123464 +    const BYTE* llCodePtr = seqStorePtr->llCode;
123465 +    const BYTE* mlCodePtr = seqStorePtr->mlCode;
123466 +    const BYTE* ofCodePtr = seqStorePtr->ofCode;
123467 +    size_t targetCBlockSize = cctxParams->targetCBlockSize;
123468 +    size_t litSize, seqCount;
123469 +    int writeLitEntropy = entropyMetadata->hufMetadata.hType == set_compressed;
123470 +    int writeSeqEntropy = 1;
123471 +    int lastSequence = 0;
123473 +    DEBUGLOG(5, "ZSTD_compressSubBlock_multi (litSize=%u, nbSeq=%u)",
123474 +                (unsigned)(lend-lp), (unsigned)(send-sstart));
123476 +    litSize = 0;
123477 +    seqCount = 0;
123478 +    do {
123479 +        size_t cBlockSizeEstimate = 0;
123480 +        if (sstart == send) {
123481 +            lastSequence = 1;
123482 +        } else {
123483 +            const seqDef* const sequence = sp + seqCount;
123484 +            lastSequence = sequence == send - 1;
123485 +            litSize += ZSTD_getSequenceLength(seqStorePtr, sequence).litLength;
123486 +            seqCount++;
123487 +        }
123488 +        if (lastSequence) {
123489 +            assert(lp <= lend);
123490 +            assert(litSize <= (size_t)(lend - lp));
123491 +            litSize = (size_t)(lend - lp);
123492 +        }
123493 +        /* I think there is an optimization opportunity here.
123494 +         * Calling ZSTD_estimateSubBlockSize for every sequence can be wasteful
123495 +         * since it recalculates estimate from scratch.
123496 +         * For example, it would recount literal distribution and symbol codes everytime.
123497 +         */
123498 +        cBlockSizeEstimate = ZSTD_estimateSubBlockSize(lp, litSize, ofCodePtr, llCodePtr, mlCodePtr, seqCount,
123499 +                                                       &nextCBlock->entropy, entropyMetadata,
123500 +                                                       workspace, wkspSize, writeLitEntropy, writeSeqEntropy);
123501 +        if (cBlockSizeEstimate > targetCBlockSize || lastSequence) {
123502 +            int litEntropyWritten = 0;
123503 +            int seqEntropyWritten = 0;
123504 +            const size_t decompressedSize = ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, lastSequence);
123505 +            const size_t cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata,
123506 +                                                       sp, seqCount,
123507 +                                                       lp, litSize,
123508 +                                                       llCodePtr, mlCodePtr, ofCodePtr,
123509 +                                                       cctxParams,
123510 +                                                       op, oend-op,
123511 +                                                       bmi2, writeLitEntropy, writeSeqEntropy,
123512 +                                                       &litEntropyWritten, &seqEntropyWritten,
123513 +                                                       lastBlock && lastSequence);
123514 +            FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed");
123515 +            if (cSize > 0 && cSize < decompressedSize) {
123516 +                DEBUGLOG(5, "Committed the sub-block");
123517 +                assert(ip + decompressedSize <= iend);
123518 +                ip += decompressedSize;
123519 +                sp += seqCount;
123520 +                lp += litSize;
123521 +                op += cSize;
123522 +                llCodePtr += seqCount;
123523 +                mlCodePtr += seqCount;
123524 +                ofCodePtr += seqCount;
123525 +                litSize = 0;
123526 +                seqCount = 0;
123527 +                /* Entropy only needs to be written once */
123528 +                if (litEntropyWritten) {
123529 +                    writeLitEntropy = 0;
123530 +                }
123531 +                if (seqEntropyWritten) {
123532 +                    writeSeqEntropy = 0;
123533 +                }
123534 +            }
123535 +        }
123536 +    } while (!lastSequence);
123537 +    if (writeLitEntropy) {
123538 +        DEBUGLOG(5, "ZSTD_compressSubBlock_multi has literal entropy tables unwritten");
123539 +        ZSTD_memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, sizeof(prevCBlock->entropy.huf));
123540 +    }
123541 +    if (writeSeqEntropy && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata)) {
123542 +        /* If we haven't written our entropy tables, then we've violated our contract and
123543 +         * must emit an uncompressed block.
123544 +         */
123545 +        DEBUGLOG(5, "ZSTD_compressSubBlock_multi has sequence entropy tables unwritten");
123546 +        return 0;
123547 +    }
123548 +    if (ip < iend) {
123549 +        size_t const cSize = ZSTD_noCompressBlock(op, oend - op, ip, iend - ip, lastBlock);
123550 +        DEBUGLOG(5, "ZSTD_compressSubBlock_multi last sub-block uncompressed, %zu bytes", (size_t)(iend - ip));
123551 +        FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
123552 +        assert(cSize != 0);
123553 +        op += cSize;
123554 +        /* We have to regenerate the repcodes because we've skipped some sequences */
123555 +        if (sp < send) {
123556 +            seqDef const* seq;
123557 +            repcodes_t rep;
123558 +            ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep));
123559 +            for (seq = sstart; seq < sp; ++seq) {
123560 +                rep = ZSTD_updateRep(rep.rep, seq->offset - 1, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0);
123561 +            }
123562 +            ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep));
123563 +        }
123564 +    }
123565 +    DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed");
123566 +    return op-ostart;
123569 +size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
123570 +                               void* dst, size_t dstCapacity,
123571 +                               void const* src, size_t srcSize,
123572 +                               unsigned lastBlock) {
123573 +    ZSTD_entropyCTablesMetadata_t entropyMetadata;
123575 +    FORWARD_IF_ERROR(ZSTD_buildSuperBlockEntropy(&zc->seqStore,
123576 +          &zc->blockState.prevCBlock->entropy,
123577 +          &zc->blockState.nextCBlock->entropy,
123578 +          &zc->appliedParams,
123579 +          &entropyMetadata,
123580 +          zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), "");
123582 +    return ZSTD_compressSubBlock_multi(&zc->seqStore,
123583 +            zc->blockState.prevCBlock,
123584 +            zc->blockState.nextCBlock,
123585 +            &entropyMetadata,
123586 +            &zc->appliedParams,
123587 +            dst, dstCapacity,
123588 +            src, srcSize,
123589 +            zc->bmi2, lastBlock,
123590 +            zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */);
123592 diff --git a/lib/zstd/compress/zstd_compress_superblock.h b/lib/zstd/compress/zstd_compress_superblock.h
123593 new file mode 100644
123594 index 000000000000..224ece79546e
123595 --- /dev/null
123596 +++ b/lib/zstd/compress/zstd_compress_superblock.h
123597 @@ -0,0 +1,32 @@
123599 + * Copyright (c) Yann Collet, Facebook, Inc.
123600 + * All rights reserved.
123602 + * This source code is licensed under both the BSD-style license (found in the
123603 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
123604 + * in the COPYING file in the root directory of this source tree).
123605 + * You may select, at your option, one of the above-listed licenses.
123606 + */
123608 +#ifndef ZSTD_COMPRESS_ADVANCED_H
123609 +#define ZSTD_COMPRESS_ADVANCED_H
123611 +/*-*************************************
123612 +*  Dependencies
123613 +***************************************/
123615 +#include <linux/zstd.h> /* ZSTD_CCtx */
123617 +/*-*************************************
123618 +*  Target Compressed Block Size
123619 +***************************************/
123621 +/* ZSTD_compressSuperBlock() :
123622 + * Used to compress a super block when targetCBlockSize is being used.
123623 + * The given block will be compressed into multiple sub blocks that are around targetCBlockSize. */
123624 +size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
123625 +                               void* dst, size_t dstCapacity,
123626 +                               void const* src, size_t srcSize,
123627 +                               unsigned lastBlock);
123629 +#endif /* ZSTD_COMPRESS_ADVANCED_H */
123630 diff --git a/lib/zstd/compress/zstd_cwksp.h b/lib/zstd/compress/zstd_cwksp.h
123631 new file mode 100644
123632 index 000000000000..c231cc500ef5
123633 --- /dev/null
123634 +++ b/lib/zstd/compress/zstd_cwksp.h
123635 @@ -0,0 +1,482 @@
123637 + * Copyright (c) Yann Collet, Facebook, Inc.
123638 + * All rights reserved.
123640 + * This source code is licensed under both the BSD-style license (found in the
123641 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
123642 + * in the COPYING file in the root directory of this source tree).
123643 + * You may select, at your option, one of the above-listed licenses.
123644 + */
123646 +#ifndef ZSTD_CWKSP_H
123647 +#define ZSTD_CWKSP_H
123649 +/*-*************************************
123650 +*  Dependencies
123651 +***************************************/
123652 +#include "../common/zstd_internal.h"
123655 +/*-*************************************
123656 +*  Constants
123657 +***************************************/
123659 +/* Since the workspace is effectively its own little malloc implementation /
123660 + * arena, when we run under ASAN, we should similarly insert redzones between
123661 + * each internal element of the workspace, so ASAN will catch overruns that
123662 + * reach outside an object but that stay inside the workspace.
123664 + * This defines the size of that redzone.
123665 + */
123666 +#ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE
123667 +#define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
123668 +#endif
123670 +/*-*************************************
123671 +*  Structures
123672 +***************************************/
123673 +typedef enum {
123674 +    ZSTD_cwksp_alloc_objects,
123675 +    ZSTD_cwksp_alloc_buffers,
123676 +    ZSTD_cwksp_alloc_aligned
123677 +} ZSTD_cwksp_alloc_phase_e;
123680 + * Used to describe whether the workspace is statically allocated (and will not
123681 + * necessarily ever be freed), or if it's dynamically allocated and we can
123682 + * expect a well-formed caller to free this.
123683 + */
123684 +typedef enum {
123685 +    ZSTD_cwksp_dynamic_alloc,
123686 +    ZSTD_cwksp_static_alloc
123687 +} ZSTD_cwksp_static_alloc_e;
123690 + * Zstd fits all its internal datastructures into a single continuous buffer,
123691 + * so that it only needs to perform a single OS allocation (or so that a buffer
123692 + * can be provided to it and it can perform no allocations at all). This buffer
123693 + * is called the workspace.
123695 + * Several optimizations complicate that process of allocating memory ranges
123696 + * from this workspace for each internal datastructure:
123698 + * - These different internal datastructures have different setup requirements:
123700 + *   - The static objects need to be cleared once and can then be trivially
123701 + *     reused for each compression.
123703 + *   - Various buffers don't need to be initialized at all--they are always
123704 + *     written into before they're read.
123706 + *   - The matchstate tables have a unique requirement that they don't need
123707 + *     their memory to be totally cleared, but they do need the memory to have
123708 + *     some bound, i.e., a guarantee that all values in the memory they've been
123709 + *     allocated is less than some maximum value (which is the starting value
123710 + *     for the indices that they will then use for compression). When this
123711 + *     guarantee is provided to them, they can use the memory without any setup
123712 + *     work. When it can't, they have to clear the area.
123714 + * - These buffers also have different alignment requirements.
123716 + * - We would like to reuse the objects in the workspace for multiple
123717 + *   compressions without having to perform any expensive reallocation or
123718 + *   reinitialization work.
123720 + * - We would like to be able to efficiently reuse the workspace across
123721 + *   multiple compressions **even when the compression parameters change** and
123722 + *   we need to resize some of the objects (where possible).
123724 + * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp
123725 + * abstraction was created. It works as follows:
123727 + * Workspace Layout:
123729 + * [                        ... workspace ...                         ]
123730 + * [objects][tables ... ->] free space [<- ... aligned][<- ... buffers]
123732 + * The various objects that live in the workspace are divided into the
123733 + * following categories, and are allocated separately:
123735 + * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
123736 + *   so that literally everything fits in a single buffer. Note: if present,
123737 + *   this must be the first object in the workspace, since ZSTD_customFree{CCtx,
123738 + *   CDict}() rely on a pointer comparison to see whether one or two frees are
123739 + *   required.
123741 + * - Fixed size objects: these are fixed-size, fixed-count objects that are
123742 + *   nonetheless "dynamically" allocated in the workspace so that we can
123743 + *   control how they're initialized separately from the broader ZSTD_CCtx.
123744 + *   Examples:
123745 + *   - Entropy Workspace
123746 + *   - 2 x ZSTD_compressedBlockState_t
123747 + *   - CDict dictionary contents
123749 + * - Tables: these are any of several different datastructures (hash tables,
123750 + *   chain tables, binary trees) that all respect a common format: they are
123751 + *   uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
123752 + *   Their sizes depend on the cparams.
123754 + * - Aligned: these buffers are used for various purposes that require 4 byte
123755 + *   alignment, but don't require any initialization before they're used.
123757 + * - Buffers: these buffers are used for various purposes that don't require
123758 + *   any alignment or initialization before they're used. This means they can
123759 + *   be moved around at no cost for a new compression.
123761 + * Allocating Memory:
123763 + * The various types of objects must be allocated in order, so they can be
123764 + * correctly packed into the workspace buffer. That order is:
123766 + * 1. Objects
123767 + * 2. Buffers
123768 + * 3. Aligned
123769 + * 4. Tables
123771 + * Attempts to reserve objects of different types out of order will fail.
123772 + */
123773 +typedef struct {
123774 +    void* workspace;
123775 +    void* workspaceEnd;
123777 +    void* objectEnd;
123778 +    void* tableEnd;
123779 +    void* tableValidEnd;
123780 +    void* allocStart;
123782 +    BYTE allocFailed;
123783 +    int workspaceOversizedDuration;
123784 +    ZSTD_cwksp_alloc_phase_e phase;
123785 +    ZSTD_cwksp_static_alloc_e isStatic;
123786 +} ZSTD_cwksp;
123788 +/*-*************************************
123789 +*  Functions
123790 +***************************************/
123792 +MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
123794 +MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
123795 +    (void)ws;
123796 +    assert(ws->workspace <= ws->objectEnd);
123797 +    assert(ws->objectEnd <= ws->tableEnd);
123798 +    assert(ws->objectEnd <= ws->tableValidEnd);
123799 +    assert(ws->tableEnd <= ws->allocStart);
123800 +    assert(ws->tableValidEnd <= ws->allocStart);
123801 +    assert(ws->allocStart <= ws->workspaceEnd);
123805 + * Align must be a power of 2.
123806 + */
123807 +MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
123808 +    size_t const mask = align - 1;
123809 +    assert((align & mask) == 0);
123810 +    return (size + mask) & ~mask;
123814 + * Use this to determine how much space in the workspace we will consume to
123815 + * allocate this object. (Normally it should be exactly the size of the object,
123816 + * but under special conditions, like ASAN, where we pad each object, it might
123817 + * be larger.)
123819 + * Since tables aren't currently redzoned, you don't need to call through this
123820 + * to figure out how much space you need for the matchState tables. Everything
123821 + * else is though.
123822 + */
123823 +MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
123824 +    if (size == 0)
123825 +        return 0;
123826 +    return size;
123829 +MEM_STATIC void ZSTD_cwksp_internal_advance_phase(
123830 +        ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) {
123831 +    assert(phase >= ws->phase);
123832 +    if (phase > ws->phase) {
123833 +        if (ws->phase < ZSTD_cwksp_alloc_buffers &&
123834 +                phase >= ZSTD_cwksp_alloc_buffers) {
123835 +            ws->tableValidEnd = ws->objectEnd;
123836 +        }
123837 +        if (ws->phase < ZSTD_cwksp_alloc_aligned &&
123838 +                phase >= ZSTD_cwksp_alloc_aligned) {
123839 +            /* If unaligned allocations down from a too-large top have left us
123840 +             * unaligned, we need to realign our alloc ptr. Technically, this
123841 +             * can consume space that is unaccounted for in the neededSpace
123842 +             * calculation. However, I believe this can only happen when the
123843 +             * workspace is too large, and specifically when it is too large
123844 +             * by a larger margin than the space that will be consumed. */
123845 +            /* TODO: cleaner, compiler warning friendly way to do this??? */
123846 +            ws->allocStart = (BYTE*)ws->allocStart - ((size_t)ws->allocStart & (sizeof(U32)-1));
123847 +            if (ws->allocStart < ws->tableValidEnd) {
123848 +                ws->tableValidEnd = ws->allocStart;
123849 +            }
123850 +        }
123851 +        ws->phase = phase;
123852 +    }
123856 + * Returns whether this object/buffer/etc was allocated in this workspace.
123857 + */
123858 +MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) {
123859 +    return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
123863 + * Internal function. Do not use directly.
123864 + */
123865 +MEM_STATIC void* ZSTD_cwksp_reserve_internal(
123866 +        ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) {
123867 +    void* alloc;
123868 +    void* bottom = ws->tableEnd;
123869 +    ZSTD_cwksp_internal_advance_phase(ws, phase);
123870 +    alloc = (BYTE *)ws->allocStart - bytes;
123872 +    if (bytes == 0)
123873 +        return NULL;
123876 +    DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
123877 +        alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
123878 +    ZSTD_cwksp_assert_internal_consistency(ws);
123879 +    assert(alloc >= bottom);
123880 +    if (alloc < bottom) {
123881 +        DEBUGLOG(4, "cwksp: alloc failed!");
123882 +        ws->allocFailed = 1;
123883 +        return NULL;
123884 +    }
123885 +    if (alloc < ws->tableValidEnd) {
123886 +        ws->tableValidEnd = alloc;
123887 +    }
123888 +    ws->allocStart = alloc;
123891 +    return alloc;
123895 + * Reserves and returns unaligned memory.
123896 + */
123897 +MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) {
123898 +    return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
123902 + * Reserves and returns memory sized on and aligned on sizeof(unsigned).
123903 + */
123904 +MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
123905 +    assert((bytes & (sizeof(U32)-1)) == 0);
123906 +    return ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, sizeof(U32)), ZSTD_cwksp_alloc_aligned);
123910 + * Aligned on sizeof(unsigned). These buffers have the special property that
123911 + * their values remain constrained, allowing us to re-use them without
123912 + * memset()-ing them.
123913 + */
123914 +MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
123915 +    const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
123916 +    void* alloc = ws->tableEnd;
123917 +    void* end = (BYTE *)alloc + bytes;
123918 +    void* top = ws->allocStart;
123920 +    DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
123921 +        alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
123922 +    assert((bytes & (sizeof(U32)-1)) == 0);
123923 +    ZSTD_cwksp_internal_advance_phase(ws, phase);
123924 +    ZSTD_cwksp_assert_internal_consistency(ws);
123925 +    assert(end <= top);
123926 +    if (end > top) {
123927 +        DEBUGLOG(4, "cwksp: table alloc failed!");
123928 +        ws->allocFailed = 1;
123929 +        return NULL;
123930 +    }
123931 +    ws->tableEnd = end;
123934 +    return alloc;
123938 + * Aligned on sizeof(void*).
123939 + */
123940 +MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
123941 +    size_t roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
123942 +    void* alloc = ws->objectEnd;
123943 +    void* end = (BYTE*)alloc + roundedBytes;
123946 +    DEBUGLOG(5,
123947 +        "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
123948 +        alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
123949 +    assert(((size_t)alloc & (sizeof(void*)-1)) == 0);
123950 +    assert((bytes & (sizeof(void*)-1)) == 0);
123951 +    ZSTD_cwksp_assert_internal_consistency(ws);
123952 +    /* we must be in the first phase, no advance is possible */
123953 +    if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
123954 +        DEBUGLOG(4, "cwksp: object alloc failed!");
123955 +        ws->allocFailed = 1;
123956 +        return NULL;
123957 +    }
123958 +    ws->objectEnd = end;
123959 +    ws->tableEnd = end;
123960 +    ws->tableValidEnd = end;
123963 +    return alloc;
123966 +MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) {
123967 +    DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
123970 +    assert(ws->tableValidEnd >= ws->objectEnd);
123971 +    assert(ws->tableValidEnd <= ws->allocStart);
123972 +    ws->tableValidEnd = ws->objectEnd;
123973 +    ZSTD_cwksp_assert_internal_consistency(ws);
123976 +MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {
123977 +    DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean");
123978 +    assert(ws->tableValidEnd >= ws->objectEnd);
123979 +    assert(ws->tableValidEnd <= ws->allocStart);
123980 +    if (ws->tableValidEnd < ws->tableEnd) {
123981 +        ws->tableValidEnd = ws->tableEnd;
123982 +    }
123983 +    ZSTD_cwksp_assert_internal_consistency(ws);
123987 + * Zero the part of the allocated tables not already marked clean.
123988 + */
123989 +MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
123990 +    DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables");
123991 +    assert(ws->tableValidEnd >= ws->objectEnd);
123992 +    assert(ws->tableValidEnd <= ws->allocStart);
123993 +    if (ws->tableValidEnd < ws->tableEnd) {
123994 +        ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
123995 +    }
123996 +    ZSTD_cwksp_mark_tables_clean(ws);
124000 + * Invalidates table allocations.
124001 + * All other allocations remain valid.
124002 + */
124003 +MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
124004 +    DEBUGLOG(4, "cwksp: clearing tables!");
124007 +    ws->tableEnd = ws->objectEnd;
124008 +    ZSTD_cwksp_assert_internal_consistency(ws);
124012 + * Invalidates all buffer, aligned, and table allocations.
124013 + * Object allocations remain valid.
124014 + */
124015 +MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
124016 +    DEBUGLOG(4, "cwksp: clearing!");
124020 +    ws->tableEnd = ws->objectEnd;
124021 +    ws->allocStart = ws->workspaceEnd;
124022 +    ws->allocFailed = 0;
124023 +    if (ws->phase > ZSTD_cwksp_alloc_buffers) {
124024 +        ws->phase = ZSTD_cwksp_alloc_buffers;
124025 +    }
124026 +    ZSTD_cwksp_assert_internal_consistency(ws);
124030 + * The provided workspace takes ownership of the buffer [start, start+size).
124031 + * Any existing values in the workspace are ignored (the previously managed
124032 + * buffer, if present, must be separately freed).
124033 + */
124034 +MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {
124035 +    DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
124036 +    assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
124037 +    ws->workspace = start;
124038 +    ws->workspaceEnd = (BYTE*)start + size;
124039 +    ws->objectEnd = ws->workspace;
124040 +    ws->tableValidEnd = ws->objectEnd;
124041 +    ws->phase = ZSTD_cwksp_alloc_objects;
124042 +    ws->isStatic = isStatic;
124043 +    ZSTD_cwksp_clear(ws);
124044 +    ws->workspaceOversizedDuration = 0;
124045 +    ZSTD_cwksp_assert_internal_consistency(ws);
124048 +MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
124049 +    void* workspace = ZSTD_customMalloc(size, customMem);
124050 +    DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
124051 +    RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
124052 +    ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);
124053 +    return 0;
124056 +MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
124057 +    void *ptr = ws->workspace;
124058 +    DEBUGLOG(4, "cwksp: freeing workspace");
124059 +    ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));
124060 +    ZSTD_customFree(ptr, customMem);
124064 + * Moves the management of a workspace from one cwksp to another. The src cwksp
124065 + * is left in an invalid state (src must be re-init()'ed before it's used again).
124066 + */
124067 +MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
124068 +    *dst = *src;
124069 +    ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
124072 +MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
124073 +    return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
124076 +MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
124077 +    return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
124078 +         + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
124081 +MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
124082 +    return ws->allocFailed;
124085 +/*-*************************************
124086 +*  Functions Checking Free Space
124087 +***************************************/
124089 +MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
124090 +    return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
124093 +MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
124094 +    return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;
124097 +MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
124098 +    return ZSTD_cwksp_check_available(
124099 +        ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);
124102 +MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
124103 +    return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)
124104 +        && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;
124107 +MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(
124108 +        ZSTD_cwksp* ws, size_t additionalNeededSpace) {
124109 +    if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {
124110 +        ws->workspaceOversizedDuration++;
124111 +    } else {
124112 +        ws->workspaceOversizedDuration = 0;
124113 +    }
124117 +#endif /* ZSTD_CWKSP_H */
124118 diff --git a/lib/zstd/compress/zstd_double_fast.c b/lib/zstd/compress/zstd_double_fast.c
124119 new file mode 100644
124120 index 000000000000..b99172e9d2e4
124121 --- /dev/null
124122 +++ b/lib/zstd/compress/zstd_double_fast.c
124123 @@ -0,0 +1,521 @@
124125 + * Copyright (c) Yann Collet, Facebook, Inc.
124126 + * All rights reserved.
124128 + * This source code is licensed under both the BSD-style license (found in the
124129 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
124130 + * in the COPYING file in the root directory of this source tree).
124131 + * You may select, at your option, one of the above-listed licenses.
124132 + */
124134 +#include "zstd_compress_internal.h"
124135 +#include "zstd_double_fast.h"
124138 +void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
124139 +                              void const* end, ZSTD_dictTableLoadMethod_e dtlm)
124141 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
124142 +    U32* const hashLarge = ms->hashTable;
124143 +    U32  const hBitsL = cParams->hashLog;
124144 +    U32  const mls = cParams->minMatch;
124145 +    U32* const hashSmall = ms->chainTable;
124146 +    U32  const hBitsS = cParams->chainLog;
124147 +    const BYTE* const base = ms->window.base;
124148 +    const BYTE* ip = base + ms->nextToUpdate;
124149 +    const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
124150 +    const U32 fastHashFillStep = 3;
124152 +    /* Always insert every fastHashFillStep position into the hash tables.
124153 +     * Insert the other positions into the large hash table if their entry
124154 +     * is empty.
124155 +     */
124156 +    for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
124157 +        U32 const curr = (U32)(ip - base);
124158 +        U32 i;
124159 +        for (i = 0; i < fastHashFillStep; ++i) {
124160 +            size_t const smHash = ZSTD_hashPtr(ip + i, hBitsS, mls);
124161 +            size_t const lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8);
124162 +            if (i == 0)
124163 +                hashSmall[smHash] = curr + i;
124164 +            if (i == 0 || hashLarge[lgHash] == 0)
124165 +                hashLarge[lgHash] = curr + i;
124166 +            /* Only load extra positions for ZSTD_dtlm_full */
124167 +            if (dtlm == ZSTD_dtlm_fast)
124168 +                break;
124169 +    }   }
124173 +FORCE_INLINE_TEMPLATE
124174 +size_t ZSTD_compressBlock_doubleFast_generic(
124175 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124176 +        void const* src, size_t srcSize,
124177 +        U32 const mls /* template */, ZSTD_dictMode_e const dictMode)
124179 +    ZSTD_compressionParameters const* cParams = &ms->cParams;
124180 +    U32* const hashLong = ms->hashTable;
124181 +    const U32 hBitsL = cParams->hashLog;
124182 +    U32* const hashSmall = ms->chainTable;
124183 +    const U32 hBitsS = cParams->chainLog;
124184 +    const BYTE* const base = ms->window.base;
124185 +    const BYTE* const istart = (const BYTE*)src;
124186 +    const BYTE* ip = istart;
124187 +    const BYTE* anchor = istart;
124188 +    const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
124189 +    /* presumes that, if there is a dictionary, it must be using Attach mode */
124190 +    const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
124191 +    const BYTE* const prefixLowest = base + prefixLowestIndex;
124192 +    const BYTE* const iend = istart + srcSize;
124193 +    const BYTE* const ilimit = iend - HASH_READ_SIZE;
124194 +    U32 offset_1=rep[0], offset_2=rep[1];
124195 +    U32 offsetSaved = 0;
124197 +    const ZSTD_matchState_t* const dms = ms->dictMatchState;
124198 +    const ZSTD_compressionParameters* const dictCParams =
124199 +                                     dictMode == ZSTD_dictMatchState ?
124200 +                                     &dms->cParams : NULL;
124201 +    const U32* const dictHashLong  = dictMode == ZSTD_dictMatchState ?
124202 +                                     dms->hashTable : NULL;
124203 +    const U32* const dictHashSmall = dictMode == ZSTD_dictMatchState ?
124204 +                                     dms->chainTable : NULL;
124205 +    const U32 dictStartIndex       = dictMode == ZSTD_dictMatchState ?
124206 +                                     dms->window.dictLimit : 0;
124207 +    const BYTE* const dictBase     = dictMode == ZSTD_dictMatchState ?
124208 +                                     dms->window.base : NULL;
124209 +    const BYTE* const dictStart    = dictMode == ZSTD_dictMatchState ?
124210 +                                     dictBase + dictStartIndex : NULL;
124211 +    const BYTE* const dictEnd      = dictMode == ZSTD_dictMatchState ?
124212 +                                     dms->window.nextSrc : NULL;
124213 +    const U32 dictIndexDelta       = dictMode == ZSTD_dictMatchState ?
124214 +                                     prefixLowestIndex - (U32)(dictEnd - dictBase) :
124215 +                                     0;
124216 +    const U32 dictHBitsL           = dictMode == ZSTD_dictMatchState ?
124217 +                                     dictCParams->hashLog : hBitsL;
124218 +    const U32 dictHBitsS           = dictMode == ZSTD_dictMatchState ?
124219 +                                     dictCParams->chainLog : hBitsS;
124220 +    const U32 dictAndPrefixLength  = (U32)((ip - prefixLowest) + (dictEnd - dictStart));
124222 +    DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_generic");
124224 +    assert(dictMode == ZSTD_noDict || dictMode == ZSTD_dictMatchState);
124226 +    /* if a dictionary is attached, it must be within window range */
124227 +    if (dictMode == ZSTD_dictMatchState) {
124228 +        assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex);
124229 +    }
124231 +    /* init */
124232 +    ip += (dictAndPrefixLength == 0);
124233 +    if (dictMode == ZSTD_noDict) {
124234 +        U32 const curr = (U32)(ip - base);
124235 +        U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
124236 +        U32 const maxRep = curr - windowLow;
124237 +        if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
124238 +        if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
124239 +    }
124240 +    if (dictMode == ZSTD_dictMatchState) {
124241 +        /* dictMatchState repCode checks don't currently handle repCode == 0
124242 +         * disabling. */
124243 +        assert(offset_1 <= dictAndPrefixLength);
124244 +        assert(offset_2 <= dictAndPrefixLength);
124245 +    }
124247 +    /* Main Search Loop */
124248 +    while (ip < ilimit) {   /* < instead of <=, because repcode check at (ip+1) */
124249 +        size_t mLength;
124250 +        U32 offset;
124251 +        size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
124252 +        size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
124253 +        size_t const dictHL = ZSTD_hashPtr(ip, dictHBitsL, 8);
124254 +        size_t const dictHS = ZSTD_hashPtr(ip, dictHBitsS, mls);
124255 +        U32 const curr = (U32)(ip-base);
124256 +        U32 const matchIndexL = hashLong[h2];
124257 +        U32 matchIndexS = hashSmall[h];
124258 +        const BYTE* matchLong = base + matchIndexL;
124259 +        const BYTE* match = base + matchIndexS;
124260 +        const U32 repIndex = curr + 1 - offset_1;
124261 +        const BYTE* repMatch = (dictMode == ZSTD_dictMatchState
124262 +                            && repIndex < prefixLowestIndex) ?
124263 +                               dictBase + (repIndex - dictIndexDelta) :
124264 +                               base + repIndex;
124265 +        hashLong[h2] = hashSmall[h] = curr;   /* update hash tables */
124267 +        /* check dictMatchState repcode */
124268 +        if (dictMode == ZSTD_dictMatchState
124269 +            && ((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
124270 +            && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
124271 +            const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
124272 +            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
124273 +            ip++;
124274 +            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
124275 +            goto _match_stored;
124276 +        }
124278 +        /* check noDict repcode */
124279 +        if ( dictMode == ZSTD_noDict
124280 +          && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
124281 +            mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
124282 +            ip++;
124283 +            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
124284 +            goto _match_stored;
124285 +        }
124287 +        if (matchIndexL > prefixLowestIndex) {
124288 +            /* check prefix long match */
124289 +            if (MEM_read64(matchLong) == MEM_read64(ip)) {
124290 +                mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
124291 +                offset = (U32)(ip-matchLong);
124292 +                while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
124293 +                goto _match_found;
124294 +            }
124295 +        } else if (dictMode == ZSTD_dictMatchState) {
124296 +            /* check dictMatchState long match */
124297 +            U32 const dictMatchIndexL = dictHashLong[dictHL];
124298 +            const BYTE* dictMatchL = dictBase + dictMatchIndexL;
124299 +            assert(dictMatchL < dictEnd);
124301 +            if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) {
124302 +                mLength = ZSTD_count_2segments(ip+8, dictMatchL+8, iend, dictEnd, prefixLowest) + 8;
124303 +                offset = (U32)(curr - dictMatchIndexL - dictIndexDelta);
124304 +                while (((ip>anchor) & (dictMatchL>dictStart)) && (ip[-1] == dictMatchL[-1])) { ip--; dictMatchL--; mLength++; } /* catch up */
124305 +                goto _match_found;
124306 +        }   }
124308 +        if (matchIndexS > prefixLowestIndex) {
124309 +            /* check prefix short match */
124310 +            if (MEM_read32(match) == MEM_read32(ip)) {
124311 +                goto _search_next_long;
124312 +            }
124313 +        } else if (dictMode == ZSTD_dictMatchState) {
124314 +            /* check dictMatchState short match */
124315 +            U32 const dictMatchIndexS = dictHashSmall[dictHS];
124316 +            match = dictBase + dictMatchIndexS;
124317 +            matchIndexS = dictMatchIndexS + dictIndexDelta;
124319 +            if (match > dictStart && MEM_read32(match) == MEM_read32(ip)) {
124320 +                goto _search_next_long;
124321 +        }   }
124323 +        ip += ((ip-anchor) >> kSearchStrength) + 1;
124324 +#if defined(__aarch64__)
124325 +        PREFETCH_L1(ip+256);
124326 +#endif
124327 +        continue;
124329 +_search_next_long:
124331 +        {   size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
124332 +            size_t const dictHLNext = ZSTD_hashPtr(ip+1, dictHBitsL, 8);
124333 +            U32 const matchIndexL3 = hashLong[hl3];
124334 +            const BYTE* matchL3 = base + matchIndexL3;
124335 +            hashLong[hl3] = curr + 1;
124337 +            /* check prefix long +1 match */
124338 +            if (matchIndexL3 > prefixLowestIndex) {
124339 +                if (MEM_read64(matchL3) == MEM_read64(ip+1)) {
124340 +                    mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
124341 +                    ip++;
124342 +                    offset = (U32)(ip-matchL3);
124343 +                    while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
124344 +                    goto _match_found;
124345 +                }
124346 +            } else if (dictMode == ZSTD_dictMatchState) {
124347 +                /* check dict long +1 match */
124348 +                U32 const dictMatchIndexL3 = dictHashLong[dictHLNext];
124349 +                const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3;
124350 +                assert(dictMatchL3 < dictEnd);
124351 +                if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) {
124352 +                    mLength = ZSTD_count_2segments(ip+1+8, dictMatchL3+8, iend, dictEnd, prefixLowest) + 8;
124353 +                    ip++;
124354 +                    offset = (U32)(curr + 1 - dictMatchIndexL3 - dictIndexDelta);
124355 +                    while (((ip>anchor) & (dictMatchL3>dictStart)) && (ip[-1] == dictMatchL3[-1])) { ip--; dictMatchL3--; mLength++; } /* catch up */
124356 +                    goto _match_found;
124357 +        }   }   }
124359 +        /* if no long +1 match, explore the short match we found */
124360 +        if (dictMode == ZSTD_dictMatchState && matchIndexS < prefixLowestIndex) {
124361 +            mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4;
124362 +            offset = (U32)(curr - matchIndexS);
124363 +            while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
124364 +        } else {
124365 +            mLength = ZSTD_count(ip+4, match+4, iend) + 4;
124366 +            offset = (U32)(ip - match);
124367 +            while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
124368 +        }
124370 +        /* fall-through */
124372 +_match_found:
124373 +        offset_2 = offset_1;
124374 +        offset_1 = offset;
124376 +        ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
124378 +_match_stored:
124379 +        /* match found */
124380 +        ip += mLength;
124381 +        anchor = ip;
124383 +        if (ip <= ilimit) {
124384 +            /* Complementary insertion */
124385 +            /* done after iLimit test, as candidates could be > iend-8 */
124386 +            {   U32 const indexToInsert = curr+2;
124387 +                hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
124388 +                hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
124389 +                hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
124390 +                hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
124391 +            }
124393 +            /* check immediate repcode */
124394 +            if (dictMode == ZSTD_dictMatchState) {
124395 +                while (ip <= ilimit) {
124396 +                    U32 const current2 = (U32)(ip-base);
124397 +                    U32 const repIndex2 = current2 - offset_2;
124398 +                    const BYTE* repMatch2 = dictMode == ZSTD_dictMatchState
124399 +                        && repIndex2 < prefixLowestIndex ?
124400 +                            dictBase + repIndex2 - dictIndexDelta :
124401 +                            base + repIndex2;
124402 +                    if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
124403 +                       && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
124404 +                        const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;
124405 +                        size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;
124406 +                        U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
124407 +                        ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
124408 +                        hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
124409 +                        hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
124410 +                        ip += repLength2;
124411 +                        anchor = ip;
124412 +                        continue;
124413 +                    }
124414 +                    break;
124415 +            }   }
124417 +            if (dictMode == ZSTD_noDict) {
124418 +                while ( (ip <= ilimit)
124419 +                     && ( (offset_2>0)
124420 +                        & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
124421 +                    /* store sequence */
124422 +                    size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
124423 +                    U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff;  /* swap offset_2 <=> offset_1 */
124424 +                    hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
124425 +                    hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
124426 +                    ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, rLength-MINMATCH);
124427 +                    ip += rLength;
124428 +                    anchor = ip;
124429 +                    continue;   /* faster when present ... (?) */
124430 +        }   }   }
124431 +    }   /* while (ip < ilimit) */
124433 +    /* save reps for next block */
124434 +    rep[0] = offset_1 ? offset_1 : offsetSaved;
124435 +    rep[1] = offset_2 ? offset_2 : offsetSaved;
124437 +    /* Return the last literals size */
124438 +    return (size_t)(iend - anchor);
124442 +size_t ZSTD_compressBlock_doubleFast(
124443 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124444 +        void const* src, size_t srcSize)
124446 +    const U32 mls = ms->cParams.minMatch;
124447 +    switch(mls)
124448 +    {
124449 +    default: /* includes case 3 */
124450 +    case 4 :
124451 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_noDict);
124452 +    case 5 :
124453 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_noDict);
124454 +    case 6 :
124455 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_noDict);
124456 +    case 7 :
124457 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_noDict);
124458 +    }
124462 +size_t ZSTD_compressBlock_doubleFast_dictMatchState(
124463 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124464 +        void const* src, size_t srcSize)
124466 +    const U32 mls = ms->cParams.minMatch;
124467 +    switch(mls)
124468 +    {
124469 +    default: /* includes case 3 */
124470 +    case 4 :
124471 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_dictMatchState);
124472 +    case 5 :
124473 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_dictMatchState);
124474 +    case 6 :
124475 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_dictMatchState);
124476 +    case 7 :
124477 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_dictMatchState);
124478 +    }
124482 +static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
124483 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124484 +        void const* src, size_t srcSize,
124485 +        U32 const mls /* template */)
124487 +    ZSTD_compressionParameters const* cParams = &ms->cParams;
124488 +    U32* const hashLong = ms->hashTable;
124489 +    U32  const hBitsL = cParams->hashLog;
124490 +    U32* const hashSmall = ms->chainTable;
124491 +    U32  const hBitsS = cParams->chainLog;
124492 +    const BYTE* const istart = (const BYTE*)src;
124493 +    const BYTE* ip = istart;
124494 +    const BYTE* anchor = istart;
124495 +    const BYTE* const iend = istart + srcSize;
124496 +    const BYTE* const ilimit = iend - 8;
124497 +    const BYTE* const base = ms->window.base;
124498 +    const U32   endIndex = (U32)((size_t)(istart - base) + srcSize);
124499 +    const U32   lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
124500 +    const U32   dictStartIndex = lowLimit;
124501 +    const U32   dictLimit = ms->window.dictLimit;
124502 +    const U32   prefixStartIndex = (dictLimit > lowLimit) ? dictLimit : lowLimit;
124503 +    const BYTE* const prefixStart = base + prefixStartIndex;
124504 +    const BYTE* const dictBase = ms->window.dictBase;
124505 +    const BYTE* const dictStart = dictBase + dictStartIndex;
124506 +    const BYTE* const dictEnd = dictBase + prefixStartIndex;
124507 +    U32 offset_1=rep[0], offset_2=rep[1];
124509 +    DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_extDict_generic (srcSize=%zu)", srcSize);
124511 +    /* if extDict is invalidated due to maxDistance, switch to "regular" variant */
124512 +    if (prefixStartIndex == dictStartIndex)
124513 +        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, mls, ZSTD_noDict);
124515 +    /* Search Loop */
124516 +    while (ip < ilimit) {  /* < instead of <=, because (ip+1) */
124517 +        const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
124518 +        const U32 matchIndex = hashSmall[hSmall];
124519 +        const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
124520 +        const BYTE* match = matchBase + matchIndex;
124522 +        const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
124523 +        const U32 matchLongIndex = hashLong[hLong];
124524 +        const BYTE* const matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : base;
124525 +        const BYTE* matchLong = matchLongBase + matchLongIndex;
124527 +        const U32 curr = (U32)(ip-base);
124528 +        const U32 repIndex = curr + 1 - offset_1;   /* offset_1 expected <= curr +1 */
124529 +        const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
124530 +        const BYTE* const repMatch = repBase + repIndex;
124531 +        size_t mLength;
124532 +        hashSmall[hSmall] = hashLong[hLong] = curr;   /* update hash table */
124534 +        if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */
124535 +            & (repIndex > dictStartIndex))
124536 +          && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
124537 +            const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
124538 +            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
124539 +            ip++;
124540 +            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
124541 +        } else {
124542 +            if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
124543 +                const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend;
124544 +                const BYTE* const lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart;
124545 +                U32 offset;
124546 +                mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, prefixStart) + 8;
124547 +                offset = curr - matchLongIndex;
124548 +                while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; }   /* catch up */
124549 +                offset_2 = offset_1;
124550 +                offset_1 = offset;
124551 +                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
124553 +            } else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) {
124554 +                size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
124555 +                U32 const matchIndex3 = hashLong[h3];
124556 +                const BYTE* const match3Base = matchIndex3 < prefixStartIndex ? dictBase : base;
124557 +                const BYTE* match3 = match3Base + matchIndex3;
124558 +                U32 offset;
124559 +                hashLong[h3] = curr + 1;
124560 +                if ( (matchIndex3 > dictStartIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
124561 +                    const BYTE* const matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend;
124562 +                    const BYTE* const lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart;
124563 +                    mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, prefixStart) + 8;
124564 +                    ip++;
124565 +                    offset = curr+1 - matchIndex3;
124566 +                    while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
124567 +                } else {
124568 +                    const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
124569 +                    const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
124570 +                    mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
124571 +                    offset = curr - matchIndex;
124572 +                    while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; }   /* catch up */
124573 +                }
124574 +                offset_2 = offset_1;
124575 +                offset_1 = offset;
124576 +                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
124578 +            } else {
124579 +                ip += ((ip-anchor) >> kSearchStrength) + 1;
124580 +                continue;
124581 +        }   }
124583 +        /* move to next sequence start */
124584 +        ip += mLength;
124585 +        anchor = ip;
124587 +        if (ip <= ilimit) {
124588 +            /* Complementary insertion */
124589 +            /* done after iLimit test, as candidates could be > iend-8 */
124590 +            {   U32 const indexToInsert = curr+2;
124591 +                hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
124592 +                hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
124593 +                hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
124594 +                hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
124595 +            }
124597 +            /* check immediate repcode */
124598 +            while (ip <= ilimit) {
124599 +                U32 const current2 = (U32)(ip-base);
124600 +                U32 const repIndex2 = current2 - offset_2;
124601 +                const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
124602 +                if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3)   /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */
124603 +                    & (repIndex2 > dictStartIndex))
124604 +                  && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
124605 +                    const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
124606 +                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
124607 +                    U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
124608 +                    ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
124609 +                    hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
124610 +                    hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
124611 +                    ip += repLength2;
124612 +                    anchor = ip;
124613 +                    continue;
124614 +                }
124615 +                break;
124616 +    }   }   }
124618 +    /* save reps for next block */
124619 +    rep[0] = offset_1;
124620 +    rep[1] = offset_2;
124622 +    /* Return the last literals size */
124623 +    return (size_t)(iend - anchor);
124627 +size_t ZSTD_compressBlock_doubleFast_extDict(
124628 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124629 +        void const* src, size_t srcSize)
124631 +    U32 const mls = ms->cParams.minMatch;
124632 +    switch(mls)
124633 +    {
124634 +    default: /* includes case 3 */
124635 +    case 4 :
124636 +        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);
124637 +    case 5 :
124638 +        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);
124639 +    case 6 :
124640 +        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);
124641 +    case 7 :
124642 +        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);
124643 +    }
124645 diff --git a/lib/zstd/compress/zstd_double_fast.h b/lib/zstd/compress/zstd_double_fast.h
124646 new file mode 100644
124647 index 000000000000..6822bde65a1d
124648 --- /dev/null
124649 +++ b/lib/zstd/compress/zstd_double_fast.h
124650 @@ -0,0 +1,32 @@
124652 + * Copyright (c) Yann Collet, Facebook, Inc.
124653 + * All rights reserved.
124655 + * This source code is licensed under both the BSD-style license (found in the
124656 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
124657 + * in the COPYING file in the root directory of this source tree).
124658 + * You may select, at your option, one of the above-listed licenses.
124659 + */
124661 +#ifndef ZSTD_DOUBLE_FAST_H
124662 +#define ZSTD_DOUBLE_FAST_H
124665 +#include "../common/mem.h"      /* U32 */
124666 +#include "zstd_compress_internal.h"     /* ZSTD_CCtx, size_t */
124668 +void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
124669 +                              void const* end, ZSTD_dictTableLoadMethod_e dtlm);
124670 +size_t ZSTD_compressBlock_doubleFast(
124671 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124672 +        void const* src, size_t srcSize);
124673 +size_t ZSTD_compressBlock_doubleFast_dictMatchState(
124674 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124675 +        void const* src, size_t srcSize);
124676 +size_t ZSTD_compressBlock_doubleFast_extDict(
124677 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124678 +        void const* src, size_t srcSize);
124682 +#endif /* ZSTD_DOUBLE_FAST_H */
124683 diff --git a/lib/zstd/compress/zstd_fast.c b/lib/zstd/compress/zstd_fast.c
124684 new file mode 100644
124685 index 000000000000..96b7d48e2868
124686 --- /dev/null
124687 +++ b/lib/zstd/compress/zstd_fast.c
124688 @@ -0,0 +1,496 @@
124690 + * Copyright (c) Yann Collet, Facebook, Inc.
124691 + * All rights reserved.
124693 + * This source code is licensed under both the BSD-style license (found in the
124694 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
124695 + * in the COPYING file in the root directory of this source tree).
124696 + * You may select, at your option, one of the above-listed licenses.
124697 + */
124699 +#include "zstd_compress_internal.h"  /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */
124700 +#include "zstd_fast.h"
124703 +void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
124704 +                        const void* const end,
124705 +                        ZSTD_dictTableLoadMethod_e dtlm)
124707 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
124708 +    U32* const hashTable = ms->hashTable;
124709 +    U32  const hBits = cParams->hashLog;
124710 +    U32  const mls = cParams->minMatch;
124711 +    const BYTE* const base = ms->window.base;
124712 +    const BYTE* ip = base + ms->nextToUpdate;
124713 +    const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
124714 +    const U32 fastHashFillStep = 3;
124716 +    /* Always insert every fastHashFillStep position into the hash table.
124717 +     * Insert the other positions if their hash entry is empty.
124718 +     */
124719 +    for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
124720 +        U32 const curr = (U32)(ip - base);
124721 +        size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls);
124722 +        hashTable[hash0] = curr;
124723 +        if (dtlm == ZSTD_dtlm_fast) continue;
124724 +        /* Only load extra positions for ZSTD_dtlm_full */
124725 +        {   U32 p;
124726 +            for (p = 1; p < fastHashFillStep; ++p) {
124727 +                size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls);
124728 +                if (hashTable[hash] == 0) {  /* not yet filled */
124729 +                    hashTable[hash] = curr + p;
124730 +    }   }   }   }
124734 +FORCE_INLINE_TEMPLATE size_t
124735 +ZSTD_compressBlock_fast_generic(
124736 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124737 +        void const* src, size_t srcSize,
124738 +        U32 const mls)
124740 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
124741 +    U32* const hashTable = ms->hashTable;
124742 +    U32 const hlog = cParams->hashLog;
124743 +    /* support stepSize of 0 */
124744 +    size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1;
124745 +    const BYTE* const base = ms->window.base;
124746 +    const BYTE* const istart = (const BYTE*)src;
124747 +    /* We check ip0 (ip + 0) and ip1 (ip + 1) each loop */
124748 +    const BYTE* ip0 = istart;
124749 +    const BYTE* ip1;
124750 +    const BYTE* anchor = istart;
124751 +    const U32   endIndex = (U32)((size_t)(istart - base) + srcSize);
124752 +    const U32   prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
124753 +    const BYTE* const prefixStart = base + prefixStartIndex;
124754 +    const BYTE* const iend = istart + srcSize;
124755 +    const BYTE* const ilimit = iend - HASH_READ_SIZE;
124756 +    U32 offset_1=rep[0], offset_2=rep[1];
124757 +    U32 offsetSaved = 0;
124759 +    /* init */
124760 +    DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
124761 +    ip0 += (ip0 == prefixStart);
124762 +    ip1 = ip0 + 1;
124763 +    {   U32 const curr = (U32)(ip0 - base);
124764 +        U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
124765 +        U32 const maxRep = curr - windowLow;
124766 +        if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
124767 +        if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
124768 +    }
124770 +    /* Main Search Loop */
124771 +#ifdef __INTEL_COMPILER
124772 +    /* From intel 'The vector pragma indicates that the loop should be
124773 +     * vectorized if it is legal to do so'. Can be used together with
124774 +     * #pragma ivdep (but have opted to exclude that because intel
124775 +     * warns against using it).*/
124776 +    #pragma vector always
124777 +#endif
124778 +    while (ip1 < ilimit) {   /* < instead of <=, because check at ip0+2 */
124779 +        size_t mLength;
124780 +        BYTE const* ip2 = ip0 + 2;
124781 +        size_t const h0 = ZSTD_hashPtr(ip0, hlog, mls);
124782 +        U32 const val0 = MEM_read32(ip0);
124783 +        size_t const h1 = ZSTD_hashPtr(ip1, hlog, mls);
124784 +        U32 const val1 = MEM_read32(ip1);
124785 +        U32 const current0 = (U32)(ip0-base);
124786 +        U32 const current1 = (U32)(ip1-base);
124787 +        U32 const matchIndex0 = hashTable[h0];
124788 +        U32 const matchIndex1 = hashTable[h1];
124789 +        BYTE const* repMatch = ip2 - offset_1;
124790 +        const BYTE* match0 = base + matchIndex0;
124791 +        const BYTE* match1 = base + matchIndex1;
124792 +        U32 offcode;
124794 +#if defined(__aarch64__)
124795 +        PREFETCH_L1(ip0+256);
124796 +#endif
124798 +        hashTable[h0] = current0;   /* update hash table */
124799 +        hashTable[h1] = current1;   /* update hash table */
124801 +        assert(ip0 + 1 == ip1);
124803 +        if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) {
124804 +            mLength = (ip2[-1] == repMatch[-1]) ? 1 : 0;
124805 +            ip0 = ip2 - mLength;
124806 +            match0 = repMatch - mLength;
124807 +            mLength += 4;
124808 +            offcode = 0;
124809 +            goto _match;
124810 +        }
124811 +        if ((matchIndex0 > prefixStartIndex) && MEM_read32(match0) == val0) {
124812 +            /* found a regular match */
124813 +            goto _offset;
124814 +        }
124815 +        if ((matchIndex1 > prefixStartIndex) && MEM_read32(match1) == val1) {
124816 +            /* found a regular match after one literal */
124817 +            ip0 = ip1;
124818 +            match0 = match1;
124819 +            goto _offset;
124820 +        }
124821 +        {   size_t const step = ((size_t)(ip0-anchor) >> (kSearchStrength - 1)) + stepSize;
124822 +            assert(step >= 2);
124823 +            ip0 += step;
124824 +            ip1 += step;
124825 +            continue;
124826 +        }
124827 +_offset: /* Requires: ip0, match0 */
124828 +        /* Compute the offset code */
124829 +        offset_2 = offset_1;
124830 +        offset_1 = (U32)(ip0-match0);
124831 +        offcode = offset_1 + ZSTD_REP_MOVE;
124832 +        mLength = 4;
124833 +        /* Count the backwards match length */
124834 +        while (((ip0>anchor) & (match0>prefixStart))
124835 +             && (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } /* catch up */
124837 +_match: /* Requires: ip0, match0, offcode */
124838 +        /* Count the forward length */
124839 +        mLength += ZSTD_count(ip0+mLength, match0+mLength, iend);
124840 +        ZSTD_storeSeq(seqStore, (size_t)(ip0-anchor), anchor, iend, offcode, mLength-MINMATCH);
124841 +        /* match found */
124842 +        ip0 += mLength;
124843 +        anchor = ip0;
124845 +        if (ip0 <= ilimit) {
124846 +            /* Fill Table */
124847 +            assert(base+current0+2 > istart);  /* check base overflow */
124848 +            hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2;  /* here because current+2 could be > iend-8 */
124849 +            hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
124851 +            if (offset_2 > 0) { /* offset_2==0 means offset_2 is invalidated */
124852 +                while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) ) {
124853 +                    /* store sequence */
124854 +                    size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4;
124855 +                    { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
124856 +                    hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
124857 +                    ip0 += rLength;
124858 +                    ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, 0 /*offCode*/, rLength-MINMATCH);
124859 +                    anchor = ip0;
124860 +                    continue;   /* faster when present (confirmed on gcc-8) ... (?) */
124861 +        }   }   }
124862 +        ip1 = ip0 + 1;
124863 +    }
124865 +    /* save reps for next block */
124866 +    rep[0] = offset_1 ? offset_1 : offsetSaved;
124867 +    rep[1] = offset_2 ? offset_2 : offsetSaved;
124869 +    /* Return the last literals size */
124870 +    return (size_t)(iend - anchor);
124874 +size_t ZSTD_compressBlock_fast(
124875 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124876 +        void const* src, size_t srcSize)
124878 +    U32 const mls = ms->cParams.minMatch;
124879 +    assert(ms->dictMatchState == NULL);
124880 +    switch(mls)
124881 +    {
124882 +    default: /* includes case 3 */
124883 +    case 4 :
124884 +        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4);
124885 +    case 5 :
124886 +        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5);
124887 +    case 6 :
124888 +        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6);
124889 +    case 7 :
124890 +        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7);
124891 +    }
124894 +FORCE_INLINE_TEMPLATE
124895 +size_t ZSTD_compressBlock_fast_dictMatchState_generic(
124896 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
124897 +        void const* src, size_t srcSize, U32 const mls)
124899 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
124900 +    U32* const hashTable = ms->hashTable;
124901 +    U32 const hlog = cParams->hashLog;
124902 +    /* support stepSize of 0 */
124903 +    U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
124904 +    const BYTE* const base = ms->window.base;
124905 +    const BYTE* const istart = (const BYTE*)src;
124906 +    const BYTE* ip = istart;
124907 +    const BYTE* anchor = istart;
124908 +    const U32   prefixStartIndex = ms->window.dictLimit;
124909 +    const BYTE* const prefixStart = base + prefixStartIndex;
124910 +    const BYTE* const iend = istart + srcSize;
124911 +    const BYTE* const ilimit = iend - HASH_READ_SIZE;
124912 +    U32 offset_1=rep[0], offset_2=rep[1];
124913 +    U32 offsetSaved = 0;
124915 +    const ZSTD_matchState_t* const dms = ms->dictMatchState;
124916 +    const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;
124917 +    const U32* const dictHashTable = dms->hashTable;
124918 +    const U32 dictStartIndex       = dms->window.dictLimit;
124919 +    const BYTE* const dictBase     = dms->window.base;
124920 +    const BYTE* const dictStart    = dictBase + dictStartIndex;
124921 +    const BYTE* const dictEnd      = dms->window.nextSrc;
124922 +    const U32 dictIndexDelta       = prefixStartIndex - (U32)(dictEnd - dictBase);
124923 +    const U32 dictAndPrefixLength  = (U32)(ip - prefixStart + dictEnd - dictStart);
124924 +    const U32 dictHLog             = dictCParams->hashLog;
124926 +    /* if a dictionary is still attached, it necessarily means that
124927 +     * it is within window size. So we just check it. */
124928 +    const U32 maxDistance = 1U << cParams->windowLog;
124929 +    const U32 endIndex = (U32)((size_t)(ip - base) + srcSize);
124930 +    assert(endIndex - prefixStartIndex <= maxDistance);
124931 +    (void)maxDistance; (void)endIndex;   /* these variables are not used when assert() is disabled */
124933 +    /* ensure there will be no underflow
124934 +     * when translating a dict index into a local index */
124935 +    assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
124937 +    /* init */
124938 +    DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic");
124939 +    ip += (dictAndPrefixLength == 0);
124940 +    /* dictMatchState repCode checks don't currently handle repCode == 0
124941 +     * disabling. */
124942 +    assert(offset_1 <= dictAndPrefixLength);
124943 +    assert(offset_2 <= dictAndPrefixLength);
124945 +    /* Main Search Loop */
124946 +    while (ip < ilimit) {   /* < instead of <=, because repcode check at (ip+1) */
124947 +        size_t mLength;
124948 +        size_t const h = ZSTD_hashPtr(ip, hlog, mls);
124949 +        U32 const curr = (U32)(ip-base);
124950 +        U32 const matchIndex = hashTable[h];
124951 +        const BYTE* match = base + matchIndex;
124952 +        const U32 repIndex = curr + 1 - offset_1;
124953 +        const BYTE* repMatch = (repIndex < prefixStartIndex) ?
124954 +                               dictBase + (repIndex - dictIndexDelta) :
124955 +                               base + repIndex;
124956 +        hashTable[h] = curr;   /* update hash table */
124958 +        if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
124959 +          && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
124960 +            const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
124961 +            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
124962 +            ip++;
124963 +            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
124964 +        } else if ( (matchIndex <= prefixStartIndex) ) {
124965 +            size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
124966 +            U32 const dictMatchIndex = dictHashTable[dictHash];
124967 +            const BYTE* dictMatch = dictBase + dictMatchIndex;
124968 +            if (dictMatchIndex <= dictStartIndex ||
124969 +                MEM_read32(dictMatch) != MEM_read32(ip)) {
124970 +                assert(stepSize >= 1);
124971 +                ip += ((ip-anchor) >> kSearchStrength) + stepSize;
124972 +                continue;
124973 +            } else {
124974 +                /* found a dict match */
124975 +                U32 const offset = (U32)(curr-dictMatchIndex-dictIndexDelta);
124976 +                mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
124977 +                while (((ip>anchor) & (dictMatch>dictStart))
124978 +                     && (ip[-1] == dictMatch[-1])) {
124979 +                    ip--; dictMatch--; mLength++;
124980 +                } /* catch up */
124981 +                offset_2 = offset_1;
124982 +                offset_1 = offset;
124983 +                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
124984 +            }
124985 +        } else if (MEM_read32(match) != MEM_read32(ip)) {
124986 +            /* it's not a match, and we're not going to check the dictionary */
124987 +            assert(stepSize >= 1);
124988 +            ip += ((ip-anchor) >> kSearchStrength) + stepSize;
124989 +            continue;
124990 +        } else {
124991 +            /* found a regular match */
124992 +            U32 const offset = (U32)(ip-match);
124993 +            mLength = ZSTD_count(ip+4, match+4, iend) + 4;
124994 +            while (((ip>anchor) & (match>prefixStart))
124995 +                 && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
124996 +            offset_2 = offset_1;
124997 +            offset_1 = offset;
124998 +            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
124999 +        }
125001 +        /* match found */
125002 +        ip += mLength;
125003 +        anchor = ip;
125005 +        if (ip <= ilimit) {
125006 +            /* Fill Table */
125007 +            assert(base+curr+2 > istart);  /* check base overflow */
125008 +            hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2;  /* here because curr+2 could be > iend-8 */
125009 +            hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
125011 +            /* check immediate repcode */
125012 +            while (ip <= ilimit) {
125013 +                U32 const current2 = (U32)(ip-base);
125014 +                U32 const repIndex2 = current2 - offset_2;
125015 +                const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
125016 +                        dictBase - dictIndexDelta + repIndex2 :
125017 +                        base + repIndex2;
125018 +                if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
125019 +                   && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
125020 +                    const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
125021 +                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
125022 +                    U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
125023 +                    ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
125024 +                    hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
125025 +                    ip += repLength2;
125026 +                    anchor = ip;
125027 +                    continue;
125028 +                }
125029 +                break;
125030 +            }
125031 +        }
125032 +    }
125034 +    /* save reps for next block */
125035 +    rep[0] = offset_1 ? offset_1 : offsetSaved;
125036 +    rep[1] = offset_2 ? offset_2 : offsetSaved;
125038 +    /* Return the last literals size */
125039 +    return (size_t)(iend - anchor);
125042 +size_t ZSTD_compressBlock_fast_dictMatchState(
125043 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
125044 +        void const* src, size_t srcSize)
125046 +    U32 const mls = ms->cParams.minMatch;
125047 +    assert(ms->dictMatchState != NULL);
125048 +    switch(mls)
125049 +    {
125050 +    default: /* includes case 3 */
125051 +    case 4 :
125052 +        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4);
125053 +    case 5 :
125054 +        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5);
125055 +    case 6 :
125056 +        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6);
125057 +    case 7 :
125058 +        return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7);
125059 +    }
125063 +static size_t ZSTD_compressBlock_fast_extDict_generic(
125064 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
125065 +        void const* src, size_t srcSize, U32 const mls)
125067 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
125068 +    U32* const hashTable = ms->hashTable;
125069 +    U32 const hlog = cParams->hashLog;
125070 +    /* support stepSize of 0 */
125071 +    U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
125072 +    const BYTE* const base = ms->window.base;
125073 +    const BYTE* const dictBase = ms->window.dictBase;
125074 +    const BYTE* const istart = (const BYTE*)src;
125075 +    const BYTE* ip = istart;
125076 +    const BYTE* anchor = istart;
125077 +    const U32   endIndex = (U32)((size_t)(istart - base) + srcSize);
125078 +    const U32   lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
125079 +    const U32   dictStartIndex = lowLimit;
125080 +    const BYTE* const dictStart = dictBase + dictStartIndex;
125081 +    const U32   dictLimit = ms->window.dictLimit;
125082 +    const U32   prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit;
125083 +    const BYTE* const prefixStart = base + prefixStartIndex;
125084 +    const BYTE* const dictEnd = dictBase + prefixStartIndex;
125085 +    const BYTE* const iend = istart + srcSize;
125086 +    const BYTE* const ilimit = iend - 8;
125087 +    U32 offset_1=rep[0], offset_2=rep[1];
125089 +    DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1);
125091 +    /* switch to "regular" variant if extDict is invalidated due to maxDistance */
125092 +    if (prefixStartIndex == dictStartIndex)
125093 +        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, mls);
125095 +    /* Search Loop */
125096 +    while (ip < ilimit) {  /* < instead of <=, because (ip+1) */
125097 +        const size_t h = ZSTD_hashPtr(ip, hlog, mls);
125098 +        const U32    matchIndex = hashTable[h];
125099 +        const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
125100 +        const BYTE*  match = matchBase + matchIndex;
125101 +        const U32    curr = (U32)(ip-base);
125102 +        const U32    repIndex = curr + 1 - offset_1;
125103 +        const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
125104 +        const BYTE* const repMatch = repBase + repIndex;
125105 +        hashTable[h] = curr;   /* update hash table */
125106 +        DEBUGLOG(7, "offset_1 = %u , curr = %u", offset_1, curr);
125107 +        assert(offset_1 <= curr +1);   /* check repIndex */
125109 +        if ( (((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > dictStartIndex))
125110 +           && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
125111 +            const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
125112 +            size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
125113 +            ip++;
125114 +            ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, rLength-MINMATCH);
125115 +            ip += rLength;
125116 +            anchor = ip;
125117 +        } else {
125118 +            if ( (matchIndex < dictStartIndex) ||
125119 +                 (MEM_read32(match) != MEM_read32(ip)) ) {
125120 +                assert(stepSize >= 1);
125121 +                ip += ((ip-anchor) >> kSearchStrength) + stepSize;
125122 +                continue;
125123 +            }
125124 +            {   const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
125125 +                const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
125126 +                U32 const offset = curr - matchIndex;
125127 +                size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
125128 +                while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; }   /* catch up */
125129 +                offset_2 = offset_1; offset_1 = offset;  /* update offset history */
125130 +                ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
125131 +                ip += mLength;
125132 +                anchor = ip;
125133 +        }   }
125135 +        if (ip <= ilimit) {
125136 +            /* Fill Table */
125137 +            hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2;
125138 +            hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
125139 +            /* check immediate repcode */
125140 +            while (ip <= ilimit) {
125141 +                U32 const current2 = (U32)(ip-base);
125142 +                U32 const repIndex2 = current2 - offset_2;
125143 +                const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
125144 +                if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (repIndex2 > dictStartIndex))  /* intentional overflow */
125145 +                   && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
125146 +                    const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
125147 +                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
125148 +                    { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; }  /* swap offset_2 <=> offset_1 */
125149 +                    ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, 0 /*offcode*/, repLength2-MINMATCH);
125150 +                    hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
125151 +                    ip += repLength2;
125152 +                    anchor = ip;
125153 +                    continue;
125154 +                }
125155 +                break;
125156 +    }   }   }
125158 +    /* save reps for next block */
125159 +    rep[0] = offset_1;
125160 +    rep[1] = offset_2;
125162 +    /* Return the last literals size */
125163 +    return (size_t)(iend - anchor);
125167 +size_t ZSTD_compressBlock_fast_extDict(
125168 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
125169 +        void const* src, size_t srcSize)
125171 +    U32 const mls = ms->cParams.minMatch;
125172 +    switch(mls)
125173 +    {
125174 +    default: /* includes case 3 */
125175 +    case 4 :
125176 +        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);
125177 +    case 5 :
125178 +        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);
125179 +    case 6 :
125180 +        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);
125181 +    case 7 :
125182 +        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);
125183 +    }
125185 diff --git a/lib/zstd/compress/zstd_fast.h b/lib/zstd/compress/zstd_fast.h
125186 new file mode 100644
125187 index 000000000000..fddc2f532d21
125188 --- /dev/null
125189 +++ b/lib/zstd/compress/zstd_fast.h
125190 @@ -0,0 +1,31 @@
125192 + * Copyright (c) Yann Collet, Facebook, Inc.
125193 + * All rights reserved.
125195 + * This source code is licensed under both the BSD-style license (found in the
125196 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
125197 + * in the COPYING file in the root directory of this source tree).
125198 + * You may select, at your option, one of the above-listed licenses.
125199 + */
125201 +#ifndef ZSTD_FAST_H
125202 +#define ZSTD_FAST_H
125205 +#include "../common/mem.h"      /* U32 */
125206 +#include "zstd_compress_internal.h"
125208 +void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
125209 +                        void const* end, ZSTD_dictTableLoadMethod_e dtlm);
125210 +size_t ZSTD_compressBlock_fast(
125211 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
125212 +        void const* src, size_t srcSize);
125213 +size_t ZSTD_compressBlock_fast_dictMatchState(
125214 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
125215 +        void const* src, size_t srcSize);
125216 +size_t ZSTD_compressBlock_fast_extDict(
125217 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
125218 +        void const* src, size_t srcSize);
125221 +#endif /* ZSTD_FAST_H */
125222 diff --git a/lib/zstd/compress/zstd_lazy.c b/lib/zstd/compress/zstd_lazy.c
125223 new file mode 100644
125224 index 000000000000..39aa2569aabc
125225 --- /dev/null
125226 +++ b/lib/zstd/compress/zstd_lazy.c
125227 @@ -0,0 +1,1412 @@
125229 + * Copyright (c) Yann Collet, Facebook, Inc.
125230 + * All rights reserved.
125232 + * This source code is licensed under both the BSD-style license (found in the
125233 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
125234 + * in the COPYING file in the root directory of this source tree).
125235 + * You may select, at your option, one of the above-listed licenses.
125236 + */
125238 +#include "zstd_compress_internal.h"
125239 +#include "zstd_lazy.h"
125242 +/*-*************************************
125243 +*  Binary Tree search
125244 +***************************************/
125246 +static void
125247 +ZSTD_updateDUBT(ZSTD_matchState_t* ms,
125248 +                const BYTE* ip, const BYTE* iend,
125249 +                U32 mls)
125251 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
125252 +    U32* const hashTable = ms->hashTable;
125253 +    U32  const hashLog = cParams->hashLog;
125255 +    U32* const bt = ms->chainTable;
125256 +    U32  const btLog  = cParams->chainLog - 1;
125257 +    U32  const btMask = (1 << btLog) - 1;
125259 +    const BYTE* const base = ms->window.base;
125260 +    U32 const target = (U32)(ip - base);
125261 +    U32 idx = ms->nextToUpdate;
125263 +    if (idx != target)
125264 +        DEBUGLOG(7, "ZSTD_updateDUBT, from %u to %u (dictLimit:%u)",
125265 +                    idx, target, ms->window.dictLimit);
125266 +    assert(ip + 8 <= iend);   /* condition for ZSTD_hashPtr */
125267 +    (void)iend;
125269 +    assert(idx >= ms->window.dictLimit);   /* condition for valid base+idx */
125270 +    for ( ; idx < target ; idx++) {
125271 +        size_t const h  = ZSTD_hashPtr(base + idx, hashLog, mls);   /* assumption : ip + 8 <= iend */
125272 +        U32    const matchIndex = hashTable[h];
125274 +        U32*   const nextCandidatePtr = bt + 2*(idx&btMask);
125275 +        U32*   const sortMarkPtr  = nextCandidatePtr + 1;
125277 +        DEBUGLOG(8, "ZSTD_updateDUBT: insert %u", idx);
125278 +        hashTable[h] = idx;   /* Update Hash Table */
125279 +        *nextCandidatePtr = matchIndex;   /* update BT like a chain */
125280 +        *sortMarkPtr = ZSTD_DUBT_UNSORTED_MARK;
125281 +    }
125282 +    ms->nextToUpdate = target;
125286 +/** ZSTD_insertDUBT1() :
125287 + *  sort one already inserted but unsorted position
125288 + *  assumption : curr >= btlow == (curr - btmask)
125289 + *  doesn't fail */
125290 +static void
125291 +ZSTD_insertDUBT1(ZSTD_matchState_t* ms,
125292 +                 U32 curr, const BYTE* inputEnd,
125293 +                 U32 nbCompares, U32 btLow,
125294 +                 const ZSTD_dictMode_e dictMode)
125296 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
125297 +    U32* const bt = ms->chainTable;
125298 +    U32  const btLog  = cParams->chainLog - 1;
125299 +    U32  const btMask = (1 << btLog) - 1;
125300 +    size_t commonLengthSmaller=0, commonLengthLarger=0;
125301 +    const BYTE* const base = ms->window.base;
125302 +    const BYTE* const dictBase = ms->window.dictBase;
125303 +    const U32 dictLimit = ms->window.dictLimit;
125304 +    const BYTE* const ip = (curr>=dictLimit) ? base + curr : dictBase + curr;
125305 +    const BYTE* const iend = (curr>=dictLimit) ? inputEnd : dictBase + dictLimit;
125306 +    const BYTE* const dictEnd = dictBase + dictLimit;
125307 +    const BYTE* const prefixStart = base + dictLimit;
125308 +    const BYTE* match;
125309 +    U32* smallerPtr = bt + 2*(curr&btMask);
125310 +    U32* largerPtr  = smallerPtr + 1;
125311 +    U32 matchIndex = *smallerPtr;   /* this candidate is unsorted : next sorted candidate is reached through *smallerPtr, while *largerPtr contains previous unsorted candidate (which is already saved and can be overwritten) */
125312 +    U32 dummy32;   /* to be nullified at the end */
125313 +    U32 const windowValid = ms->window.lowLimit;
125314 +    U32 const maxDistance = 1U << cParams->windowLog;
125315 +    U32 const windowLow = (curr - windowValid > maxDistance) ? curr - maxDistance : windowValid;
125318 +    DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)",
125319 +                curr, dictLimit, windowLow);
125320 +    assert(curr >= btLow);
125321 +    assert(ip < iend);   /* condition for ZSTD_count */
125323 +    while (nbCompares-- && (matchIndex > windowLow)) {
125324 +        U32* const nextPtr = bt + 2*(matchIndex & btMask);
125325 +        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
125326 +        assert(matchIndex < curr);
125327 +        /* note : all candidates are now supposed sorted,
125328 +         * but it's still possible to have nextPtr[1] == ZSTD_DUBT_UNSORTED_MARK
125329 +         * when a real index has the same value as ZSTD_DUBT_UNSORTED_MARK */
125331 +        if ( (dictMode != ZSTD_extDict)
125332 +          || (matchIndex+matchLength >= dictLimit)  /* both in current segment*/
125333 +          || (curr < dictLimit) /* both in extDict */) {
125334 +            const BYTE* const mBase = ( (dictMode != ZSTD_extDict)
125335 +                                     || (matchIndex+matchLength >= dictLimit)) ?
125336 +                                        base : dictBase;
125337 +            assert( (matchIndex+matchLength >= dictLimit)   /* might be wrong if extDict is incorrectly set to 0 */
125338 +                 || (curr < dictLimit) );
125339 +            match = mBase + matchIndex;
125340 +            matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
125341 +        } else {
125342 +            match = dictBase + matchIndex;
125343 +            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
125344 +            if (matchIndex+matchLength >= dictLimit)
125345 +                match = base + matchIndex;   /* preparation for next read of match[matchLength] */
125346 +        }
125348 +        DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ",
125349 +                    curr, matchIndex, (U32)matchLength);
125351 +        if (ip+matchLength == iend) {   /* equal : no way to know if inf or sup */
125352 +            break;   /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
125353 +        }
125355 +        if (match[matchLength] < ip[matchLength]) {  /* necessarily within buffer */
125356 +            /* match is smaller than current */
125357 +            *smallerPtr = matchIndex;             /* update smaller idx */
125358 +            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
125359 +            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop searching */
125360 +            DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is smaller : next => %u",
125361 +                        matchIndex, btLow, nextPtr[1]);
125362 +            smallerPtr = nextPtr+1;               /* new "candidate" => larger than match, which was smaller than target */
125363 +            matchIndex = nextPtr[1];              /* new matchIndex, larger than previous and closer to current */
125364 +        } else {
125365 +            /* match is larger than current */
125366 +            *largerPtr = matchIndex;
125367 +            commonLengthLarger = matchLength;
125368 +            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop searching */
125369 +            DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is larger => %u",
125370 +                        matchIndex, btLow, nextPtr[0]);
125371 +            largerPtr = nextPtr;
125372 +            matchIndex = nextPtr[0];
125373 +    }   }
125375 +    *smallerPtr = *largerPtr = 0;
125379 +static size_t
125380 +ZSTD_DUBT_findBetterDictMatch (
125381 +        ZSTD_matchState_t* ms,
125382 +        const BYTE* const ip, const BYTE* const iend,
125383 +        size_t* offsetPtr,
125384 +        size_t bestLength,
125385 +        U32 nbCompares,
125386 +        U32 const mls,
125387 +        const ZSTD_dictMode_e dictMode)
125389 +    const ZSTD_matchState_t * const dms = ms->dictMatchState;
125390 +    const ZSTD_compressionParameters* const dmsCParams = &dms->cParams;
125391 +    const U32 * const dictHashTable = dms->hashTable;
125392 +    U32         const hashLog = dmsCParams->hashLog;
125393 +    size_t      const h  = ZSTD_hashPtr(ip, hashLog, mls);
125394 +    U32               dictMatchIndex = dictHashTable[h];
125396 +    const BYTE* const base = ms->window.base;
125397 +    const BYTE* const prefixStart = base + ms->window.dictLimit;
125398 +    U32         const curr = (U32)(ip-base);
125399 +    const BYTE* const dictBase = dms->window.base;
125400 +    const BYTE* const dictEnd = dms->window.nextSrc;
125401 +    U32         const dictHighLimit = (U32)(dms->window.nextSrc - dms->window.base);
125402 +    U32         const dictLowLimit = dms->window.lowLimit;
125403 +    U32         const dictIndexDelta = ms->window.lowLimit - dictHighLimit;
125405 +    U32*        const dictBt = dms->chainTable;
125406 +    U32         const btLog  = dmsCParams->chainLog - 1;
125407 +    U32         const btMask = (1 << btLog) - 1;
125408 +    U32         const btLow = (btMask >= dictHighLimit - dictLowLimit) ? dictLowLimit : dictHighLimit - btMask;
125410 +    size_t commonLengthSmaller=0, commonLengthLarger=0;
125412 +    (void)dictMode;
125413 +    assert(dictMode == ZSTD_dictMatchState);
125415 +    while (nbCompares-- && (dictMatchIndex > dictLowLimit)) {
125416 +        U32* const nextPtr = dictBt + 2*(dictMatchIndex & btMask);
125417 +        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
125418 +        const BYTE* match = dictBase + dictMatchIndex;
125419 +        matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
125420 +        if (dictMatchIndex+matchLength >= dictHighLimit)
125421 +            match = base + dictMatchIndex + dictIndexDelta;   /* to prepare for next usage of match[matchLength] */
125423 +        if (matchLength > bestLength) {
125424 +            U32 matchIndex = dictMatchIndex + dictIndexDelta;
125425 +            if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) {
125426 +                DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)",
125427 +                    curr, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, ZSTD_REP_MOVE + curr - matchIndex, dictMatchIndex, matchIndex);
125428 +                bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
125429 +            }
125430 +            if (ip+matchLength == iend) {   /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */
125431 +                break;   /* drop, to guarantee consistency (miss a little bit of compression) */
125432 +            }
125433 +        }
125435 +        if (match[matchLength] < ip[matchLength]) {
125436 +            if (dictMatchIndex <= btLow) { break; }   /* beyond tree size, stop the search */
125437 +            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
125438 +            dictMatchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
125439 +        } else {
125440 +            /* match is larger than current */
125441 +            if (dictMatchIndex <= btLow) { break; }   /* beyond tree size, stop the search */
125442 +            commonLengthLarger = matchLength;
125443 +            dictMatchIndex = nextPtr[0];
125444 +        }
125445 +    }
125447 +    if (bestLength >= MINMATCH) {
125448 +        U32 const mIndex = curr - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
125449 +        DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
125450 +                    curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
125451 +    }
125452 +    return bestLength;
125457 +static size_t
125458 +ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
125459 +                        const BYTE* const ip, const BYTE* const iend,
125460 +                        size_t* offsetPtr,
125461 +                        U32 const mls,
125462 +                        const ZSTD_dictMode_e dictMode)
125464 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
125465 +    U32*   const hashTable = ms->hashTable;
125466 +    U32    const hashLog = cParams->hashLog;
125467 +    size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);
125468 +    U32          matchIndex  = hashTable[h];
125470 +    const BYTE* const base = ms->window.base;
125471 +    U32    const curr = (U32)(ip-base);
125472 +    U32    const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog);
125474 +    U32*   const bt = ms->chainTable;
125475 +    U32    const btLog  = cParams->chainLog - 1;
125476 +    U32    const btMask = (1 << btLog) - 1;
125477 +    U32    const btLow = (btMask >= curr) ? 0 : curr - btMask;
125478 +    U32    const unsortLimit = MAX(btLow, windowLow);
125480 +    U32*         nextCandidate = bt + 2*(matchIndex&btMask);
125481 +    U32*         unsortedMark = bt + 2*(matchIndex&btMask) + 1;
125482 +    U32          nbCompares = 1U << cParams->searchLog;
125483 +    U32          nbCandidates = nbCompares;
125484 +    U32          previousCandidate = 0;
125486 +    DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", curr);
125487 +    assert(ip <= iend-8);   /* required for h calculation */
125488 +    assert(dictMode != ZSTD_dedicatedDictSearch);
125490 +    /* reach end of unsorted candidates list */
125491 +    while ( (matchIndex > unsortLimit)
125492 +         && (*unsortedMark == ZSTD_DUBT_UNSORTED_MARK)
125493 +         && (nbCandidates > 1) ) {
125494 +        DEBUGLOG(8, "ZSTD_DUBT_findBestMatch: candidate %u is unsorted",
125495 +                    matchIndex);
125496 +        *unsortedMark = previousCandidate;  /* the unsortedMark becomes a reversed chain, to move up back to original position */
125497 +        previousCandidate = matchIndex;
125498 +        matchIndex = *nextCandidate;
125499 +        nextCandidate = bt + 2*(matchIndex&btMask);
125500 +        unsortedMark = bt + 2*(matchIndex&btMask) + 1;
125501 +        nbCandidates --;
125502 +    }
125504 +    /* nullify last candidate if it's still unsorted
125505 +     * simplification, detrimental to compression ratio, beneficial for speed */
125506 +    if ( (matchIndex > unsortLimit)
125507 +      && (*unsortedMark==ZSTD_DUBT_UNSORTED_MARK) ) {
125508 +        DEBUGLOG(7, "ZSTD_DUBT_findBestMatch: nullify last unsorted candidate %u",
125509 +                    matchIndex);
125510 +        *nextCandidate = *unsortedMark = 0;
125511 +    }
125513 +    /* batch sort stacked candidates */
125514 +    matchIndex = previousCandidate;
125515 +    while (matchIndex) {  /* will end on matchIndex == 0 */
125516 +        U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1;
125517 +        U32 const nextCandidateIdx = *nextCandidateIdxPtr;
125518 +        ZSTD_insertDUBT1(ms, matchIndex, iend,
125519 +                         nbCandidates, unsortLimit, dictMode);
125520 +        matchIndex = nextCandidateIdx;
125521 +        nbCandidates++;
125522 +    }
125524 +    /* find longest match */
125525 +    {   size_t commonLengthSmaller = 0, commonLengthLarger = 0;
125526 +        const BYTE* const dictBase = ms->window.dictBase;
125527 +        const U32 dictLimit = ms->window.dictLimit;
125528 +        const BYTE* const dictEnd = dictBase + dictLimit;
125529 +        const BYTE* const prefixStart = base + dictLimit;
125530 +        U32* smallerPtr = bt + 2*(curr&btMask);
125531 +        U32* largerPtr  = bt + 2*(curr&btMask) + 1;
125532 +        U32 matchEndIdx = curr + 8 + 1;
125533 +        U32 dummy32;   /* to be nullified at the end */
125534 +        size_t bestLength = 0;
125536 +        matchIndex  = hashTable[h];
125537 +        hashTable[h] = curr;   /* Update Hash Table */
125539 +        while (nbCompares-- && (matchIndex > windowLow)) {
125540 +            U32* const nextPtr = bt + 2*(matchIndex & btMask);
125541 +            size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
125542 +            const BYTE* match;
125544 +            if ((dictMode != ZSTD_extDict) || (matchIndex+matchLength >= dictLimit)) {
125545 +                match = base + matchIndex;
125546 +                matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
125547 +            } else {
125548 +                match = dictBase + matchIndex;
125549 +                matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
125550 +                if (matchIndex+matchLength >= dictLimit)
125551 +                    match = base + matchIndex;   /* to prepare for next usage of match[matchLength] */
125552 +            }
125554 +            if (matchLength > bestLength) {
125555 +                if (matchLength > matchEndIdx - matchIndex)
125556 +                    matchEndIdx = matchIndex + (U32)matchLength;
125557 +                if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
125558 +                    bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
125559 +                if (ip+matchLength == iend) {   /* equal : no way to know if inf or sup */
125560 +                    if (dictMode == ZSTD_dictMatchState) {
125561 +                        nbCompares = 0; /* in addition to avoiding checking any
125562 +                                         * further in this loop, make sure we
125563 +                                         * skip checking in the dictionary. */
125564 +                    }
125565 +                    break;   /* drop, to guarantee consistency (miss a little bit of compression) */
125566 +                }
125567 +            }
125569 +            if (match[matchLength] < ip[matchLength]) {
125570 +                /* match is smaller than current */
125571 +                *smallerPtr = matchIndex;             /* update smaller idx */
125572 +                commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
125573 +                if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
125574 +                smallerPtr = nextPtr+1;               /* new "smaller" => larger of match */
125575 +                matchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
125576 +            } else {
125577 +                /* match is larger than current */
125578 +                *largerPtr = matchIndex;
125579 +                commonLengthLarger = matchLength;
125580 +                if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
125581 +                largerPtr = nextPtr;
125582 +                matchIndex = nextPtr[0];
125583 +        }   }
125585 +        *smallerPtr = *largerPtr = 0;
125587 +        if (dictMode == ZSTD_dictMatchState && nbCompares) {
125588 +            bestLength = ZSTD_DUBT_findBetterDictMatch(
125589 +                    ms, ip, iend,
125590 +                    offsetPtr, bestLength, nbCompares,
125591 +                    mls, dictMode);
125592 +        }
125594 +        assert(matchEndIdx > curr+8); /* ensure nextToUpdate is increased */
125595 +        ms->nextToUpdate = matchEndIdx - 8;   /* skip repetitive patterns */
125596 +        if (bestLength >= MINMATCH) {
125597 +            U32 const mIndex = curr - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
125598 +            DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
125599 +                        curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
125600 +        }
125601 +        return bestLength;
125602 +    }
125606 +/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
125607 +FORCE_INLINE_TEMPLATE size_t
125608 +ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
125609 +                const BYTE* const ip, const BYTE* const iLimit,
125610 +                      size_t* offsetPtr,
125611 +                const U32 mls /* template */,
125612 +                const ZSTD_dictMode_e dictMode)
125614 +    DEBUGLOG(7, "ZSTD_BtFindBestMatch");
125615 +    if (ip < ms->window.base + ms->nextToUpdate) return 0;   /* skipped area */
125616 +    ZSTD_updateDUBT(ms, ip, iLimit, mls);
125617 +    return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode);
125621 +static size_t
125622 +ZSTD_BtFindBestMatch_selectMLS (  ZSTD_matchState_t* ms,
125623 +                            const BYTE* ip, const BYTE* const iLimit,
125624 +                                  size_t* offsetPtr)
125626 +    switch(ms->cParams.minMatch)
125627 +    {
125628 +    default : /* includes case 3 */
125629 +    case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
125630 +    case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
125631 +    case 7 :
125632 +    case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
125633 +    }
125637 +static size_t ZSTD_BtFindBestMatch_dictMatchState_selectMLS (
125638 +                        ZSTD_matchState_t* ms,
125639 +                        const BYTE* ip, const BYTE* const iLimit,
125640 +                        size_t* offsetPtr)
125642 +    switch(ms->cParams.minMatch)
125643 +    {
125644 +    default : /* includes case 3 */
125645 +    case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
125646 +    case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
125647 +    case 7 :
125648 +    case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
125649 +    }
125653 +static size_t ZSTD_BtFindBestMatch_extDict_selectMLS (
125654 +                        ZSTD_matchState_t* ms,
125655 +                        const BYTE* ip, const BYTE* const iLimit,
125656 +                        size_t* offsetPtr)
125658 +    switch(ms->cParams.minMatch)
125659 +    {
125660 +    default : /* includes case 3 */
125661 +    case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
125662 +    case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
125663 +    case 7 :
125664 +    case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
125665 +    }
125670 +/* *********************************
125671 +*  Hash Chain
125672 +***********************************/
125673 +#define NEXT_IN_CHAIN(d, mask)   chainTable[(d) & (mask)]
125675 +/* Update chains up to ip (excluded)
125676 +   Assumption : always within prefix (i.e. not within extDict) */
125677 +FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal(
125678 +                        ZSTD_matchState_t* ms,
125679 +                        const ZSTD_compressionParameters* const cParams,
125680 +                        const BYTE* ip, U32 const mls)
125682 +    U32* const hashTable  = ms->hashTable;
125683 +    const U32 hashLog = cParams->hashLog;
125684 +    U32* const chainTable = ms->chainTable;
125685 +    const U32 chainMask = (1 << cParams->chainLog) - 1;
125686 +    const BYTE* const base = ms->window.base;
125687 +    const U32 target = (U32)(ip - base);
125688 +    U32 idx = ms->nextToUpdate;
125690 +    while(idx < target) { /* catch up */
125691 +        size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
125692 +        NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
125693 +        hashTable[h] = idx;
125694 +        idx++;
125695 +    }
125697 +    ms->nextToUpdate = target;
125698 +    return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
125701 +U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {
125702 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
125703 +    return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch);
125706 +void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip)
125708 +    const BYTE* const base = ms->window.base;
125709 +    U32 const target = (U32)(ip - base);
125710 +    U32* const hashTable = ms->hashTable;
125711 +    U32* const chainTable = ms->chainTable;
125712 +    U32 const chainSize = 1 << ms->cParams.chainLog;
125713 +    U32 idx = ms->nextToUpdate;
125714 +    U32 const minChain = chainSize < target ? target - chainSize : idx;
125715 +    U32 const bucketSize = 1 << ZSTD_LAZY_DDSS_BUCKET_LOG;
125716 +    U32 const cacheSize = bucketSize - 1;
125717 +    U32 const chainAttempts = (1 << ms->cParams.searchLog) - cacheSize;
125718 +    U32 const chainLimit = chainAttempts > 255 ? 255 : chainAttempts;
125720 +    /* We know the hashtable is oversized by a factor of `bucketSize`.
125721 +     * We are going to temporarily pretend `bucketSize == 1`, keeping only a
125722 +     * single entry. We will use the rest of the space to construct a temporary
125723 +     * chaintable.
125724 +     */
125725 +    U32 const hashLog = ms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG;
125726 +    U32* const tmpHashTable = hashTable;
125727 +    U32* const tmpChainTable = hashTable + ((size_t)1 << hashLog);
125728 +    U32 const tmpChainSize = ((1 << ZSTD_LAZY_DDSS_BUCKET_LOG) - 1) << hashLog;
125729 +    U32 const tmpMinChain = tmpChainSize < target ? target - tmpChainSize : idx;
125731 +    U32 hashIdx;
125733 +    assert(ms->cParams.chainLog <= 24);
125734 +    assert(ms->cParams.hashLog >= ms->cParams.chainLog);
125735 +    assert(idx != 0);
125736 +    assert(tmpMinChain <= minChain);
125738 +    /* fill conventional hash table and conventional chain table */
125739 +    for ( ; idx < target; idx++) {
125740 +        U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch);
125741 +        if (idx >= tmpMinChain) {
125742 +            tmpChainTable[idx - tmpMinChain] = hashTable[h];
125743 +        }
125744 +        tmpHashTable[h] = idx;
125745 +    }
125747 +    /* sort chains into ddss chain table */
125748 +    {
125749 +        U32 chainPos = 0;
125750 +        for (hashIdx = 0; hashIdx < (1U << hashLog); hashIdx++) {
125751 +            U32 count;
125752 +            U32 countBeyondMinChain = 0;
125753 +            U32 i = tmpHashTable[hashIdx];
125754 +            for (count = 0; i >= tmpMinChain && count < cacheSize; count++) {
125755 +                /* skip through the chain to the first position that won't be
125756 +                 * in the hash cache bucket */
125757 +                if (i < minChain) {
125758 +                    countBeyondMinChain++;
125759 +                }
125760 +                i = tmpChainTable[i - tmpMinChain];
125761 +            }
125762 +            if (count == cacheSize) {
125763 +                for (count = 0; count < chainLimit;) {
125764 +                    if (i < minChain) {
125765 +                        if (!i || countBeyondMinChain++ > cacheSize) {
125766 +                            /* only allow pulling `cacheSize` number of entries
125767 +                             * into the cache or chainTable beyond `minChain`,
125768 +                             * to replace the entries pulled out of the
125769 +                             * chainTable into the cache. This lets us reach
125770 +                             * back further without increasing the total number
125771 +                             * of entries in the chainTable, guaranteeing the
125772 +                             * DDSS chain table will fit into the space
125773 +                             * allocated for the regular one. */
125774 +                            break;
125775 +                        }
125776 +                    }
125777 +                    chainTable[chainPos++] = i;
125778 +                    count++;
125779 +                    if (i < tmpMinChain) {
125780 +                        break;
125781 +                    }
125782 +                    i = tmpChainTable[i - tmpMinChain];
125783 +                }
125784 +            } else {
125785 +                count = 0;
125786 +            }
125787 +            if (count) {
125788 +                tmpHashTable[hashIdx] = ((chainPos - count) << 8) + count;
125789 +            } else {
125790 +                tmpHashTable[hashIdx] = 0;
125791 +            }
125792 +        }
125793 +        assert(chainPos <= chainSize); /* I believe this is guaranteed... */
125794 +    }
125796 +    /* move chain pointers into the last entry of each hash bucket */
125797 +    for (hashIdx = (1 << hashLog); hashIdx; ) {
125798 +        U32 const bucketIdx = --hashIdx << ZSTD_LAZY_DDSS_BUCKET_LOG;
125799 +        U32 const chainPackedPointer = tmpHashTable[hashIdx];
125800 +        U32 i;
125801 +        for (i = 0; i < cacheSize; i++) {
125802 +            hashTable[bucketIdx + i] = 0;
125803 +        }
125804 +        hashTable[bucketIdx + bucketSize - 1] = chainPackedPointer;
125805 +    }
125807 +    /* fill the buckets of the hash table */
125808 +    for (idx = ms->nextToUpdate; idx < target; idx++) {
125809 +        U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch)
125810 +                   << ZSTD_LAZY_DDSS_BUCKET_LOG;
125811 +        U32 i;
125812 +        /* Shift hash cache down 1. */
125813 +        for (i = cacheSize - 1; i; i--)
125814 +            hashTable[h + i] = hashTable[h + i - 1];
125815 +        hashTable[h] = idx;
125816 +    }
125818 +    ms->nextToUpdate = target;
125822 +/* inlining is important to hardwire a hot branch (template emulation) */
125823 +FORCE_INLINE_TEMPLATE
125824 +size_t ZSTD_HcFindBestMatch_generic (
125825 +                        ZSTD_matchState_t* ms,
125826 +                        const BYTE* const ip, const BYTE* const iLimit,
125827 +                        size_t* offsetPtr,
125828 +                        const U32 mls, const ZSTD_dictMode_e dictMode)
125830 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
125831 +    U32* const chainTable = ms->chainTable;
125832 +    const U32 chainSize = (1 << cParams->chainLog);
125833 +    const U32 chainMask = chainSize-1;
125834 +    const BYTE* const base = ms->window.base;
125835 +    const BYTE* const dictBase = ms->window.dictBase;
125836 +    const U32 dictLimit = ms->window.dictLimit;
125837 +    const BYTE* const prefixStart = base + dictLimit;
125838 +    const BYTE* const dictEnd = dictBase + dictLimit;
125839 +    const U32 curr = (U32)(ip-base);
125840 +    const U32 maxDistance = 1U << cParams->windowLog;
125841 +    const U32 lowestValid = ms->window.lowLimit;
125842 +    const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
125843 +    const U32 isDictionary = (ms->loadedDictEnd != 0);
125844 +    const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
125845 +    const U32 minChain = curr > chainSize ? curr - chainSize : 0;
125846 +    U32 nbAttempts = 1U << cParams->searchLog;
125847 +    size_t ml=4-1;
125849 +    const ZSTD_matchState_t* const dms = ms->dictMatchState;
125850 +    const U32 ddsHashLog = dictMode == ZSTD_dedicatedDictSearch
125851 +                         ? dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
125852 +    const size_t ddsIdx = dictMode == ZSTD_dedicatedDictSearch
125853 +                        ? ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
125855 +    U32 matchIndex;
125857 +    if (dictMode == ZSTD_dedicatedDictSearch) {
125858 +        const U32* entry = &dms->hashTable[ddsIdx];
125859 +        PREFETCH_L1(entry);
125860 +    }
125862 +    /* HC4 match finder */
125863 +    matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls);
125865 +    for ( ; (matchIndex>=lowLimit) & (nbAttempts>0) ; nbAttempts--) {
125866 +        size_t currentMl=0;
125867 +        if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
125868 +            const BYTE* const match = base + matchIndex;
125869 +            assert(matchIndex >= dictLimit);   /* ensures this is true if dictMode != ZSTD_extDict */
125870 +            if (match[ml] == ip[ml])   /* potentially better */
125871 +                currentMl = ZSTD_count(ip, match, iLimit);
125872 +        } else {
125873 +            const BYTE* const match = dictBase + matchIndex;
125874 +            assert(match+4 <= dictEnd);
125875 +            if (MEM_read32(match) == MEM_read32(ip))   /* assumption : matchIndex <= dictLimit-4 (by table construction) */
125876 +                currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
125877 +        }
125879 +        /* save best solution */
125880 +        if (currentMl > ml) {
125881 +            ml = currentMl;
125882 +            *offsetPtr = curr - matchIndex + ZSTD_REP_MOVE;
125883 +            if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
125884 +        }
125886 +        if (matchIndex <= minChain) break;
125887 +        matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
125888 +    }
125890 +    if (dictMode == ZSTD_dedicatedDictSearch) {
125891 +        const U32 ddsLowestIndex  = dms->window.dictLimit;
125892 +        const BYTE* const ddsBase = dms->window.base;
125893 +        const BYTE* const ddsEnd  = dms->window.nextSrc;
125894 +        const U32 ddsSize         = (U32)(ddsEnd - ddsBase);
125895 +        const U32 ddsIndexDelta   = dictLimit - ddsSize;
125896 +        const U32 bucketSize      = (1 << ZSTD_LAZY_DDSS_BUCKET_LOG);
125897 +        const U32 bucketLimit     = nbAttempts < bucketSize - 1 ? nbAttempts : bucketSize - 1;
125898 +        U32 ddsAttempt;
125900 +        for (ddsAttempt = 0; ddsAttempt < bucketSize - 1; ddsAttempt++) {
125901 +            PREFETCH_L1(ddsBase + dms->hashTable[ddsIdx + ddsAttempt]);
125902 +        }
125904 +        {
125905 +            U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
125906 +            U32 const chainIndex = chainPackedPointer >> 8;
125908 +            PREFETCH_L1(&dms->chainTable[chainIndex]);
125909 +        }
125911 +        for (ddsAttempt = 0; ddsAttempt < bucketLimit; ddsAttempt++) {
125912 +            size_t currentMl=0;
125913 +            const BYTE* match;
125914 +            matchIndex = dms->hashTable[ddsIdx + ddsAttempt];
125915 +            match = ddsBase + matchIndex;
125917 +            if (!matchIndex) {
125918 +                return ml;
125919 +            }
125921 +            /* guaranteed by table construction */
125922 +            (void)ddsLowestIndex;
125923 +            assert(matchIndex >= ddsLowestIndex);
125924 +            assert(match+4 <= ddsEnd);
125925 +            if (MEM_read32(match) == MEM_read32(ip)) {
125926 +                /* assumption : matchIndex <= dictLimit-4 (by table construction) */
125927 +                currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
125928 +            }
125930 +            /* save best solution */
125931 +            if (currentMl > ml) {
125932 +                ml = currentMl;
125933 +                *offsetPtr = curr - (matchIndex + ddsIndexDelta) + ZSTD_REP_MOVE;
125934 +                if (ip+currentMl == iLimit) {
125935 +                    /* best possible, avoids read overflow on next attempt */
125936 +                    return ml;
125937 +                }
125938 +            }
125939 +        }
125941 +        {
125942 +            U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
125943 +            U32 chainIndex = chainPackedPointer >> 8;
125944 +            U32 const chainLength = chainPackedPointer & 0xFF;
125945 +            U32 const chainAttempts = nbAttempts - ddsAttempt;
125946 +            U32 const chainLimit = chainAttempts > chainLength ? chainLength : chainAttempts;
125947 +            U32 chainAttempt;
125949 +            for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++) {
125950 +                PREFETCH_L1(ddsBase + dms->chainTable[chainIndex + chainAttempt]);
125951 +            }
125953 +            for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++, chainIndex++) {
125954 +                size_t currentMl=0;
125955 +                const BYTE* match;
125956 +                matchIndex = dms->chainTable[chainIndex];
125957 +                match = ddsBase + matchIndex;
125959 +                /* guaranteed by table construction */
125960 +                assert(matchIndex >= ddsLowestIndex);
125961 +                assert(match+4 <= ddsEnd);
125962 +                if (MEM_read32(match) == MEM_read32(ip)) {
125963 +                    /* assumption : matchIndex <= dictLimit-4 (by table construction) */
125964 +                    currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
125965 +                }
125967 +                /* save best solution */
125968 +                if (currentMl > ml) {
125969 +                    ml = currentMl;
125970 +                    *offsetPtr = curr - (matchIndex + ddsIndexDelta) + ZSTD_REP_MOVE;
125971 +                    if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
125972 +                }
125973 +            }
125974 +        }
125975 +    } else if (dictMode == ZSTD_dictMatchState) {
125976 +        const U32* const dmsChainTable = dms->chainTable;
125977 +        const U32 dmsChainSize         = (1 << dms->cParams.chainLog);
125978 +        const U32 dmsChainMask         = dmsChainSize - 1;
125979 +        const U32 dmsLowestIndex       = dms->window.dictLimit;
125980 +        const BYTE* const dmsBase      = dms->window.base;
125981 +        const BYTE* const dmsEnd       = dms->window.nextSrc;
125982 +        const U32 dmsSize              = (U32)(dmsEnd - dmsBase);
125983 +        const U32 dmsIndexDelta        = dictLimit - dmsSize;
125984 +        const U32 dmsMinChain = dmsSize > dmsChainSize ? dmsSize - dmsChainSize : 0;
125986 +        matchIndex = dms->hashTable[ZSTD_hashPtr(ip, dms->cParams.hashLog, mls)];
125988 +        for ( ; (matchIndex>=dmsLowestIndex) & (nbAttempts>0) ; nbAttempts--) {
125989 +            size_t currentMl=0;
125990 +            const BYTE* const match = dmsBase + matchIndex;
125991 +            assert(match+4 <= dmsEnd);
125992 +            if (MEM_read32(match) == MEM_read32(ip))   /* assumption : matchIndex <= dictLimit-4 (by table construction) */
125993 +                currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4;
125995 +            /* save best solution */
125996 +            if (currentMl > ml) {
125997 +                ml = currentMl;
125998 +                *offsetPtr = curr - (matchIndex + dmsIndexDelta) + ZSTD_REP_MOVE;
125999 +                if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
126000 +            }
126002 +            if (matchIndex <= dmsMinChain) break;
126004 +            matchIndex = dmsChainTable[matchIndex & dmsChainMask];
126005 +        }
126006 +    }
126008 +    return ml;
126012 +FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS (
126013 +                        ZSTD_matchState_t* ms,
126014 +                        const BYTE* ip, const BYTE* const iLimit,
126015 +                        size_t* offsetPtr)
126017 +    switch(ms->cParams.minMatch)
126018 +    {
126019 +    default : /* includes case 3 */
126020 +    case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
126021 +    case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
126022 +    case 7 :
126023 +    case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
126024 +    }
126028 +static size_t ZSTD_HcFindBestMatch_dictMatchState_selectMLS (
126029 +                        ZSTD_matchState_t* ms,
126030 +                        const BYTE* ip, const BYTE* const iLimit,
126031 +                        size_t* offsetPtr)
126033 +    switch(ms->cParams.minMatch)
126034 +    {
126035 +    default : /* includes case 3 */
126036 +    case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
126037 +    case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
126038 +    case 7 :
126039 +    case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
126040 +    }
126044 +static size_t ZSTD_HcFindBestMatch_dedicatedDictSearch_selectMLS (
126045 +                        ZSTD_matchState_t* ms,
126046 +                        const BYTE* ip, const BYTE* const iLimit,
126047 +                        size_t* offsetPtr)
126049 +    switch(ms->cParams.minMatch)
126050 +    {
126051 +    default : /* includes case 3 */
126052 +    case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dedicatedDictSearch);
126053 +    case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dedicatedDictSearch);
126054 +    case 7 :
126055 +    case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dedicatedDictSearch);
126056 +    }
126060 +FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
126061 +                        ZSTD_matchState_t* ms,
126062 +                        const BYTE* ip, const BYTE* const iLimit,
126063 +                        size_t* offsetPtr)
126065 +    switch(ms->cParams.minMatch)
126066 +    {
126067 +    default : /* includes case 3 */
126068 +    case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
126069 +    case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
126070 +    case 7 :
126071 +    case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
126072 +    }
126076 +/* *******************************
126077 +*  Common parser - lazy strategy
126078 +*********************************/
126079 +typedef enum { search_hashChain, search_binaryTree } searchMethod_e;
126081 +FORCE_INLINE_TEMPLATE size_t
126082 +ZSTD_compressBlock_lazy_generic(
126083 +                        ZSTD_matchState_t* ms, seqStore_t* seqStore,
126084 +                        U32 rep[ZSTD_REP_NUM],
126085 +                        const void* src, size_t srcSize,
126086 +                        const searchMethod_e searchMethod, const U32 depth,
126087 +                        ZSTD_dictMode_e const dictMode)
126089 +    const BYTE* const istart = (const BYTE*)src;
126090 +    const BYTE* ip = istart;
126091 +    const BYTE* anchor = istart;
126092 +    const BYTE* const iend = istart + srcSize;
126093 +    const BYTE* const ilimit = iend - 8;
126094 +    const BYTE* const base = ms->window.base;
126095 +    const U32 prefixLowestIndex = ms->window.dictLimit;
126096 +    const BYTE* const prefixLowest = base + prefixLowestIndex;
126098 +    typedef size_t (*searchMax_f)(
126099 +                        ZSTD_matchState_t* ms,
126100 +                        const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
126102 +    /**
126103 +     * This table is indexed first by the four ZSTD_dictMode_e values, and then
126104 +     * by the two searchMethod_e values. NULLs are placed for configurations
126105 +     * that should never occur (extDict modes go to the other implementation
126106 +     * below and there is no DDSS for binary tree search yet).
126107 +     */
126108 +    const searchMax_f searchFuncs[4][2] = {
126109 +        {
126110 +            ZSTD_HcFindBestMatch_selectMLS,
126111 +            ZSTD_BtFindBestMatch_selectMLS
126112 +        },
126113 +        {
126114 +            NULL,
126115 +            NULL
126116 +        },
126117 +        {
126118 +            ZSTD_HcFindBestMatch_dictMatchState_selectMLS,
126119 +            ZSTD_BtFindBestMatch_dictMatchState_selectMLS
126120 +        },
126121 +        {
126122 +            ZSTD_HcFindBestMatch_dedicatedDictSearch_selectMLS,
126123 +            NULL
126124 +        }
126125 +    };
126127 +    searchMax_f const searchMax = searchFuncs[dictMode][searchMethod == search_binaryTree];
126128 +    U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
126130 +    const int isDMS = dictMode == ZSTD_dictMatchState;
126131 +    const int isDDS = dictMode == ZSTD_dedicatedDictSearch;
126132 +    const int isDxS = isDMS || isDDS;
126133 +    const ZSTD_matchState_t* const dms = ms->dictMatchState;
126134 +    const U32 dictLowestIndex      = isDxS ? dms->window.dictLimit : 0;
126135 +    const BYTE* const dictBase     = isDxS ? dms->window.base : NULL;
126136 +    const BYTE* const dictLowest   = isDxS ? dictBase + dictLowestIndex : NULL;
126137 +    const BYTE* const dictEnd      = isDxS ? dms->window.nextSrc : NULL;
126138 +    const U32 dictIndexDelta       = isDxS ?
126139 +                                     prefixLowestIndex - (U32)(dictEnd - dictBase) :
126140 +                                     0;
126141 +    const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictLowest));
126143 +    assert(searchMax != NULL);
126145 +    DEBUGLOG(5, "ZSTD_compressBlock_lazy_generic (dictMode=%u)", (U32)dictMode);
126147 +    /* init */
126148 +    ip += (dictAndPrefixLength == 0);
126149 +    if (dictMode == ZSTD_noDict) {
126150 +        U32 const curr = (U32)(ip - base);
126151 +        U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, ms->cParams.windowLog);
126152 +        U32 const maxRep = curr - windowLow;
126153 +        if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
126154 +        if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
126155 +    }
126156 +    if (isDxS) {
126157 +        /* dictMatchState repCode checks don't currently handle repCode == 0
126158 +         * disabling. */
126159 +        assert(offset_1 <= dictAndPrefixLength);
126160 +        assert(offset_2 <= dictAndPrefixLength);
126161 +    }
126163 +    /* Match Loop */
126164 +#if defined(__x86_64__)
126165 +    /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the
126166 +     * code alignment is perturbed. To fix the instability align the loop on 32-bytes.
126167 +     */
126168 +    __asm__(".p2align 5");
126169 +#endif
126170 +    while (ip < ilimit) {
126171 +        size_t matchLength=0;
126172 +        size_t offset=0;
126173 +        const BYTE* start=ip+1;
126175 +        /* check repCode */
126176 +        if (isDxS) {
126177 +            const U32 repIndex = (U32)(ip - base) + 1 - offset_1;
126178 +            const BYTE* repMatch = ((dictMode == ZSTD_dictMatchState || dictMode == ZSTD_dedicatedDictSearch)
126179 +                                && repIndex < prefixLowestIndex) ?
126180 +                                   dictBase + (repIndex - dictIndexDelta) :
126181 +                                   base + repIndex;
126182 +            if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
126183 +                && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
126184 +                const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
126185 +                matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
126186 +                if (depth==0) goto _storeSequence;
126187 +            }
126188 +        }
126189 +        if ( dictMode == ZSTD_noDict
126190 +          && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
126191 +            matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
126192 +            if (depth==0) goto _storeSequence;
126193 +        }
126195 +        /* first search (depth 0) */
126196 +        {   size_t offsetFound = 999999999;
126197 +            size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
126198 +            if (ml2 > matchLength)
126199 +                matchLength = ml2, start = ip, offset=offsetFound;
126200 +        }
126202 +        if (matchLength < 4) {
126203 +            ip += ((ip-anchor) >> kSearchStrength) + 1;   /* jump faster over incompressible sections */
126204 +            continue;
126205 +        }
126207 +        /* let's try to find a better solution */
126208 +        if (depth>=1)
126209 +        while (ip<ilimit) {
126210 +            ip ++;
126211 +            if ( (dictMode == ZSTD_noDict)
126212 +              && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
126213 +                size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
126214 +                int const gain2 = (int)(mlRep * 3);
126215 +                int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
126216 +                if ((mlRep >= 4) && (gain2 > gain1))
126217 +                    matchLength = mlRep, offset = 0, start = ip;
126218 +            }
126219 +            if (isDxS) {
126220 +                const U32 repIndex = (U32)(ip - base) - offset_1;
126221 +                const BYTE* repMatch = repIndex < prefixLowestIndex ?
126222 +                               dictBase + (repIndex - dictIndexDelta) :
126223 +                               base + repIndex;
126224 +                if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
126225 +                    && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
126226 +                    const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
126227 +                    size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
126228 +                    int const gain2 = (int)(mlRep * 3);
126229 +                    int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
126230 +                    if ((mlRep >= 4) && (gain2 > gain1))
126231 +                        matchLength = mlRep, offset = 0, start = ip;
126232 +                }
126233 +            }
126234 +            {   size_t offset2=999999999;
126235 +                size_t const ml2 = searchMax(ms, ip, iend, &offset2);
126236 +                int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
126237 +                int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
126238 +                if ((ml2 >= 4) && (gain2 > gain1)) {
126239 +                    matchLength = ml2, offset = offset2, start = ip;
126240 +                    continue;   /* search a better one */
126241 +            }   }
126243 +            /* let's find an even better one */
126244 +            if ((depth==2) && (ip<ilimit)) {
126245 +                ip ++;
126246 +                if ( (dictMode == ZSTD_noDict)
126247 +                  && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
126248 +                    size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
126249 +                    int const gain2 = (int)(mlRep * 4);
126250 +                    int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
126251 +                    if ((mlRep >= 4) && (gain2 > gain1))
126252 +                        matchLength = mlRep, offset = 0, start = ip;
126253 +                }
126254 +                if (isDxS) {
126255 +                    const U32 repIndex = (U32)(ip - base) - offset_1;
126256 +                    const BYTE* repMatch = repIndex < prefixLowestIndex ?
126257 +                                   dictBase + (repIndex - dictIndexDelta) :
126258 +                                   base + repIndex;
126259 +                    if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
126260 +                        && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
126261 +                        const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
126262 +                        size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
126263 +                        int const gain2 = (int)(mlRep * 4);
126264 +                        int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
126265 +                        if ((mlRep >= 4) && (gain2 > gain1))
126266 +                            matchLength = mlRep, offset = 0, start = ip;
126267 +                    }
126268 +                }
126269 +                {   size_t offset2=999999999;
126270 +                    size_t const ml2 = searchMax(ms, ip, iend, &offset2);
126271 +                    int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
126272 +                    int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
126273 +                    if ((ml2 >= 4) && (gain2 > gain1)) {
126274 +                        matchLength = ml2, offset = offset2, start = ip;
126275 +                        continue;
126276 +            }   }   }
126277 +            break;  /* nothing found : store previous solution */
126278 +        }
126280 +        /* NOTE:
126281 +         * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior.
126282 +         * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which
126283 +         * overflows the pointer, which is undefined behavior.
126284 +         */
126285 +        /* catch up */
126286 +        if (offset) {
126287 +            if (dictMode == ZSTD_noDict) {
126288 +                while ( ((start > anchor) & (start - (offset-ZSTD_REP_MOVE) > prefixLowest))
126289 +                     && (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) )  /* only search for offset within prefix */
126290 +                    { start--; matchLength++; }
126291 +            }
126292 +            if (isDxS) {
126293 +                U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
126294 +                const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex;
126295 +                const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest;
126296 +                while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; }  /* catch up */
126297 +            }
126298 +            offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
126299 +        }
126300 +        /* store sequence */
126301 +_storeSequence:
126302 +        {   size_t const litLength = start - anchor;
126303 +            ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH);
126304 +            anchor = ip = start + matchLength;
126305 +        }
126307 +        /* check immediate repcode */
126308 +        if (isDxS) {
126309 +            while (ip <= ilimit) {
126310 +                U32 const current2 = (U32)(ip-base);
126311 +                U32 const repIndex = current2 - offset_2;
126312 +                const BYTE* repMatch = repIndex < prefixLowestIndex ?
126313 +                        dictBase - dictIndexDelta + repIndex :
126314 +                        base + repIndex;
126315 +                if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex) >= 3 /* intentional overflow */)
126316 +                   && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
126317 +                    const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend;
126318 +                    matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4;
126319 +                    offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset;   /* swap offset_2 <=> offset_1 */
126320 +                    ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
126321 +                    ip += matchLength;
126322 +                    anchor = ip;
126323 +                    continue;
126324 +                }
126325 +                break;
126326 +            }
126327 +        }
126329 +        if (dictMode == ZSTD_noDict) {
126330 +            while ( ((ip <= ilimit) & (offset_2>0))
126331 +                 && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {
126332 +                /* store sequence */
126333 +                matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
126334 +                offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
126335 +                ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
126336 +                ip += matchLength;
126337 +                anchor = ip;
126338 +                continue;   /* faster when present ... (?) */
126339 +    }   }   }
126341 +    /* Save reps for next block */
126342 +    rep[0] = offset_1 ? offset_1 : savedOffset;
126343 +    rep[1] = offset_2 ? offset_2 : savedOffset;
126345 +    /* Return the last literals size */
126346 +    return (size_t)(iend - anchor);
126350 +size_t ZSTD_compressBlock_btlazy2(
126351 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126352 +        void const* src, size_t srcSize)
126354 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict);
126357 +size_t ZSTD_compressBlock_lazy2(
126358 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126359 +        void const* src, size_t srcSize)
126361 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict);
126364 +size_t ZSTD_compressBlock_lazy(
126365 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126366 +        void const* src, size_t srcSize)
126368 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict);
126371 +size_t ZSTD_compressBlock_greedy(
126372 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126373 +        void const* src, size_t srcSize)
126375 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict);
126378 +size_t ZSTD_compressBlock_btlazy2_dictMatchState(
126379 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126380 +        void const* src, size_t srcSize)
126382 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState);
126385 +size_t ZSTD_compressBlock_lazy2_dictMatchState(
126386 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126387 +        void const* src, size_t srcSize)
126389 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState);
126392 +size_t ZSTD_compressBlock_lazy_dictMatchState(
126393 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126394 +        void const* src, size_t srcSize)
126396 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState);
126399 +size_t ZSTD_compressBlock_greedy_dictMatchState(
126400 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126401 +        void const* src, size_t srcSize)
126403 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState);
126407 +size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
126408 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126409 +        void const* src, size_t srcSize)
126411 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dedicatedDictSearch);
126414 +size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
126415 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126416 +        void const* src, size_t srcSize)
126418 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dedicatedDictSearch);
126421 +size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
126422 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126423 +        void const* src, size_t srcSize)
126425 +    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dedicatedDictSearch);
126429 +FORCE_INLINE_TEMPLATE
126430 +size_t ZSTD_compressBlock_lazy_extDict_generic(
126431 +                        ZSTD_matchState_t* ms, seqStore_t* seqStore,
126432 +                        U32 rep[ZSTD_REP_NUM],
126433 +                        const void* src, size_t srcSize,
126434 +                        const searchMethod_e searchMethod, const U32 depth)
126436 +    const BYTE* const istart = (const BYTE*)src;
126437 +    const BYTE* ip = istart;
126438 +    const BYTE* anchor = istart;
126439 +    const BYTE* const iend = istart + srcSize;
126440 +    const BYTE* const ilimit = iend - 8;
126441 +    const BYTE* const base = ms->window.base;
126442 +    const U32 dictLimit = ms->window.dictLimit;
126443 +    const BYTE* const prefixStart = base + dictLimit;
126444 +    const BYTE* const dictBase = ms->window.dictBase;
126445 +    const BYTE* const dictEnd  = dictBase + dictLimit;
126446 +    const BYTE* const dictStart  = dictBase + ms->window.lowLimit;
126447 +    const U32 windowLog = ms->cParams.windowLog;
126449 +    typedef size_t (*searchMax_f)(
126450 +                        ZSTD_matchState_t* ms,
126451 +                        const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
126452 +    searchMax_f searchMax = searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_extDict_selectMLS : ZSTD_HcFindBestMatch_extDict_selectMLS;
126454 +    U32 offset_1 = rep[0], offset_2 = rep[1];
126456 +    DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic");
126458 +    /* init */
126459 +    ip += (ip == prefixStart);
126461 +    /* Match Loop */
126462 +#if defined(__x86_64__)
126463 +    /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the
126464 +     * code alignment is perturbed. To fix the instability align the loop on 32-bytes.
126465 +     */
126466 +    __asm__(".p2align 5");
126467 +#endif
126468 +    while (ip < ilimit) {
126469 +        size_t matchLength=0;
126470 +        size_t offset=0;
126471 +        const BYTE* start=ip+1;
126472 +        U32 curr = (U32)(ip-base);
126474 +        /* check repCode */
126475 +        {   const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr+1, windowLog);
126476 +            const U32 repIndex = (U32)(curr+1 - offset_1);
126477 +            const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
126478 +            const BYTE* const repMatch = repBase + repIndex;
126479 +            if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow))   /* intentional overflow */
126480 +            if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
126481 +                /* repcode detected we should take it */
126482 +                const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
126483 +                matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4;
126484 +                if (depth==0) goto _storeSequence;
126485 +        }   }
126487 +        /* first search (depth 0) */
126488 +        {   size_t offsetFound = 999999999;
126489 +            size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
126490 +            if (ml2 > matchLength)
126491 +                matchLength = ml2, start = ip, offset=offsetFound;
126492 +        }
126494 +         if (matchLength < 4) {
126495 +            ip += ((ip-anchor) >> kSearchStrength) + 1;   /* jump faster over incompressible sections */
126496 +            continue;
126497 +        }
126499 +        /* let's try to find a better solution */
126500 +        if (depth>=1)
126501 +        while (ip<ilimit) {
126502 +            ip ++;
126503 +            curr++;
126504 +            /* check repCode */
126505 +            if (offset) {
126506 +                const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
126507 +                const U32 repIndex = (U32)(curr - offset_1);
126508 +                const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
126509 +                const BYTE* const repMatch = repBase + repIndex;
126510 +                if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow))  /* intentional overflow */
126511 +                if (MEM_read32(ip) == MEM_read32(repMatch)) {
126512 +                    /* repcode detected */
126513 +                    const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
126514 +                    size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
126515 +                    int const gain2 = (int)(repLength * 3);
126516 +                    int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
126517 +                    if ((repLength >= 4) && (gain2 > gain1))
126518 +                        matchLength = repLength, offset = 0, start = ip;
126519 +            }   }
126521 +            /* search match, depth 1 */
126522 +            {   size_t offset2=999999999;
126523 +                size_t const ml2 = searchMax(ms, ip, iend, &offset2);
126524 +                int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
126525 +                int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
126526 +                if ((ml2 >= 4) && (gain2 > gain1)) {
126527 +                    matchLength = ml2, offset = offset2, start = ip;
126528 +                    continue;   /* search a better one */
126529 +            }   }
126531 +            /* let's find an even better one */
126532 +            if ((depth==2) && (ip<ilimit)) {
126533 +                ip ++;
126534 +                curr++;
126535 +                /* check repCode */
126536 +                if (offset) {
126537 +                    const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
126538 +                    const U32 repIndex = (U32)(curr - offset_1);
126539 +                    const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
126540 +                    const BYTE* const repMatch = repBase + repIndex;
126541 +                    if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow))  /* intentional overflow */
126542 +                    if (MEM_read32(ip) == MEM_read32(repMatch)) {
126543 +                        /* repcode detected */
126544 +                        const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
126545 +                        size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
126546 +                        int const gain2 = (int)(repLength * 4);
126547 +                        int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
126548 +                        if ((repLength >= 4) && (gain2 > gain1))
126549 +                            matchLength = repLength, offset = 0, start = ip;
126550 +                }   }
126552 +                /* search match, depth 2 */
126553 +                {   size_t offset2=999999999;
126554 +                    size_t const ml2 = searchMax(ms, ip, iend, &offset2);
126555 +                    int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
126556 +                    int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
126557 +                    if ((ml2 >= 4) && (gain2 > gain1)) {
126558 +                        matchLength = ml2, offset = offset2, start = ip;
126559 +                        continue;
126560 +            }   }   }
126561 +            break;  /* nothing found : store previous solution */
126562 +        }
126564 +        /* catch up */
126565 +        if (offset) {
126566 +            U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
126567 +            const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
126568 +            const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
126569 +            while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; }  /* catch up */
126570 +            offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
126571 +        }
126573 +        /* store sequence */
126574 +_storeSequence:
126575 +        {   size_t const litLength = start - anchor;
126576 +            ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH);
126577 +            anchor = ip = start + matchLength;
126578 +        }
126580 +        /* check immediate repcode */
126581 +        while (ip <= ilimit) {
126582 +            const U32 repCurrent = (U32)(ip-base);
126583 +            const U32 windowLow = ZSTD_getLowestMatchIndex(ms, repCurrent, windowLog);
126584 +            const U32 repIndex = repCurrent - offset_2;
126585 +            const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
126586 +            const BYTE* const repMatch = repBase + repIndex;
126587 +            if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow))  /* intentional overflow */
126588 +            if (MEM_read32(ip) == MEM_read32(repMatch)) {
126589 +                /* repcode detected we should take it */
126590 +                const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
126591 +                matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
126592 +                offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset;   /* swap offset history */
126593 +                ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
126594 +                ip += matchLength;
126595 +                anchor = ip;
126596 +                continue;   /* faster when present ... (?) */
126597 +            }
126598 +            break;
126599 +    }   }
126601 +    /* Save reps for next block */
126602 +    rep[0] = offset_1;
126603 +    rep[1] = offset_2;
126605 +    /* Return the last literals size */
126606 +    return (size_t)(iend - anchor);
126610 +size_t ZSTD_compressBlock_greedy_extDict(
126611 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126612 +        void const* src, size_t srcSize)
126614 +    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0);
126617 +size_t ZSTD_compressBlock_lazy_extDict(
126618 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126619 +        void const* src, size_t srcSize)
126622 +    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1);
126625 +size_t ZSTD_compressBlock_lazy2_extDict(
126626 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126627 +        void const* src, size_t srcSize)
126630 +    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2);
126633 +size_t ZSTD_compressBlock_btlazy2_extDict(
126634 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126635 +        void const* src, size_t srcSize)
126638 +    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2);
126640 diff --git a/lib/zstd/compress/zstd_lazy.h b/lib/zstd/compress/zstd_lazy.h
126641 new file mode 100644
126642 index 000000000000..1fb7621e6a88
126643 --- /dev/null
126644 +++ b/lib/zstd/compress/zstd_lazy.h
126645 @@ -0,0 +1,81 @@
126647 + * Copyright (c) Yann Collet, Facebook, Inc.
126648 + * All rights reserved.
126650 + * This source code is licensed under both the BSD-style license (found in the
126651 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
126652 + * in the COPYING file in the root directory of this source tree).
126653 + * You may select, at your option, one of the above-listed licenses.
126654 + */
126656 +#ifndef ZSTD_LAZY_H
126657 +#define ZSTD_LAZY_H
126660 +#include "zstd_compress_internal.h"
126663 + * Dedicated Dictionary Search Structure bucket log. In the
126664 + * ZSTD_dedicatedDictSearch mode, the hashTable has
126665 + * 2 ** ZSTD_LAZY_DDSS_BUCKET_LOG entries in each bucket, rather than just
126666 + * one.
126667 + */
126668 +#define ZSTD_LAZY_DDSS_BUCKET_LOG 2
126670 +U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip);
126672 +void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip);
126674 +void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue);  /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */
126676 +size_t ZSTD_compressBlock_btlazy2(
126677 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126678 +        void const* src, size_t srcSize);
126679 +size_t ZSTD_compressBlock_lazy2(
126680 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126681 +        void const* src, size_t srcSize);
126682 +size_t ZSTD_compressBlock_lazy(
126683 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126684 +        void const* src, size_t srcSize);
126685 +size_t ZSTD_compressBlock_greedy(
126686 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126687 +        void const* src, size_t srcSize);
126689 +size_t ZSTD_compressBlock_btlazy2_dictMatchState(
126690 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126691 +        void const* src, size_t srcSize);
126692 +size_t ZSTD_compressBlock_lazy2_dictMatchState(
126693 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126694 +        void const* src, size_t srcSize);
126695 +size_t ZSTD_compressBlock_lazy_dictMatchState(
126696 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126697 +        void const* src, size_t srcSize);
126698 +size_t ZSTD_compressBlock_greedy_dictMatchState(
126699 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126700 +        void const* src, size_t srcSize);
126702 +size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
126703 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126704 +        void const* src, size_t srcSize);
126705 +size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
126706 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126707 +        void const* src, size_t srcSize);
126708 +size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
126709 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126710 +        void const* src, size_t srcSize);
126712 +size_t ZSTD_compressBlock_greedy_extDict(
126713 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126714 +        void const* src, size_t srcSize);
126715 +size_t ZSTD_compressBlock_lazy_extDict(
126716 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126717 +        void const* src, size_t srcSize);
126718 +size_t ZSTD_compressBlock_lazy2_extDict(
126719 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126720 +        void const* src, size_t srcSize);
126721 +size_t ZSTD_compressBlock_btlazy2_extDict(
126722 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
126723 +        void const* src, size_t srcSize);
126726 +#endif /* ZSTD_LAZY_H */
126727 diff --git a/lib/zstd/compress/zstd_ldm.c b/lib/zstd/compress/zstd_ldm.c
126728 new file mode 100644
126729 index 000000000000..084fd24fdca8
126730 --- /dev/null
126731 +++ b/lib/zstd/compress/zstd_ldm.c
126732 @@ -0,0 +1,686 @@
126734 + * Copyright (c) Yann Collet, Facebook, Inc.
126735 + * All rights reserved.
126737 + * This source code is licensed under both the BSD-style license (found in the
126738 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
126739 + * in the COPYING file in the root directory of this source tree).
126740 + * You may select, at your option, one of the above-listed licenses.
126741 + */
126743 +#include "zstd_ldm.h"
126745 +#include "../common/debug.h"
126746 +#include <linux/xxhash.h>
126747 +#include "zstd_fast.h"          /* ZSTD_fillHashTable() */
126748 +#include "zstd_double_fast.h"   /* ZSTD_fillDoubleHashTable() */
126749 +#include "zstd_ldm_geartab.h"
126751 +#define LDM_BUCKET_SIZE_LOG 3
126752 +#define LDM_MIN_MATCH_LENGTH 64
126753 +#define LDM_HASH_RLOG 7
126755 +typedef struct {
126756 +    U64 rolling;
126757 +    U64 stopMask;
126758 +} ldmRollingHashState_t;
126760 +/** ZSTD_ldm_gear_init():
126762 + * Initializes the rolling hash state such that it will honor the
126763 + * settings in params. */
126764 +static void ZSTD_ldm_gear_init(ldmRollingHashState_t* state, ldmParams_t const* params)
126766 +    unsigned maxBitsInMask = MIN(params->minMatchLength, 64);
126767 +    unsigned hashRateLog = params->hashRateLog;
126769 +    state->rolling = ~(U32)0;
126771 +    /* The choice of the splitting criterion is subject to two conditions:
126772 +     *   1. it has to trigger on average every 2^(hashRateLog) bytes;
126773 +     *   2. ideally, it has to depend on a window of minMatchLength bytes.
126774 +     *
126775 +     * In the gear hash algorithm, bit n depends on the last n bytes;
126776 +     * so in order to obtain a good quality splitting criterion it is
126777 +     * preferable to use bits with high weight.
126778 +     *
126779 +     * To match condition 1 we use a mask with hashRateLog bits set
126780 +     * and, because of the previous remark, we make sure these bits
126781 +     * have the highest possible weight while still respecting
126782 +     * condition 2.
126783 +     */
126784 +    if (hashRateLog > 0 && hashRateLog <= maxBitsInMask) {
126785 +        state->stopMask = (((U64)1 << hashRateLog) - 1) << (maxBitsInMask - hashRateLog);
126786 +    } else {
126787 +        /* In this degenerate case we simply honor the hash rate. */
126788 +        state->stopMask = ((U64)1 << hashRateLog) - 1;
126789 +    }
126792 +/** ZSTD_ldm_gear_feed():
126794 + * Registers in the splits array all the split points found in the first
126795 + * size bytes following the data pointer. This function terminates when
126796 + * either all the data has been processed or LDM_BATCH_SIZE splits are
126797 + * present in the splits array.
126799 + * Precondition: The splits array must not be full.
126800 + * Returns: The number of bytes processed. */
126801 +static size_t ZSTD_ldm_gear_feed(ldmRollingHashState_t* state,
126802 +                                 BYTE const* data, size_t size,
126803 +                                 size_t* splits, unsigned* numSplits)
126805 +    size_t n;
126806 +    U64 hash, mask;
126808 +    hash = state->rolling;
126809 +    mask = state->stopMask;
126810 +    n = 0;
126812 +#define GEAR_ITER_ONCE() do { \
126813 +        hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
126814 +        n += 1; \
126815 +        if (UNLIKELY((hash & mask) == 0)) { \
126816 +            splits[*numSplits] = n; \
126817 +            *numSplits += 1; \
126818 +            if (*numSplits == LDM_BATCH_SIZE) \
126819 +                goto done; \
126820 +        } \
126821 +    } while (0)
126823 +    while (n + 3 < size) {
126824 +        GEAR_ITER_ONCE();
126825 +        GEAR_ITER_ONCE();
126826 +        GEAR_ITER_ONCE();
126827 +        GEAR_ITER_ONCE();
126828 +    }
126829 +    while (n < size) {
126830 +        GEAR_ITER_ONCE();
126831 +    }
126833 +#undef GEAR_ITER_ONCE
126835 +done:
126836 +    state->rolling = hash;
126837 +    return n;
126840 +void ZSTD_ldm_adjustParameters(ldmParams_t* params,
126841 +                               ZSTD_compressionParameters const* cParams)
126843 +    params->windowLog = cParams->windowLog;
126844 +    ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
126845 +    DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
126846 +    if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
126847 +    if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH;
126848 +    if (params->hashLog == 0) {
126849 +        params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG);
126850 +        assert(params->hashLog <= ZSTD_HASHLOG_MAX);
126851 +    }
126852 +    if (params->hashRateLog == 0) {
126853 +        params->hashRateLog = params->windowLog < params->hashLog
126854 +                                   ? 0
126855 +                                   : params->windowLog - params->hashLog;
126856 +    }
126857 +    params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
126860 +size_t ZSTD_ldm_getTableSize(ldmParams_t params)
126862 +    size_t const ldmHSize = ((size_t)1) << params.hashLog;
126863 +    size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog);
126864 +    size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog);
126865 +    size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize)
126866 +                           + ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t));
126867 +    return params.enableLdm ? totalSize : 0;
126870 +size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)
126872 +    return params.enableLdm ? (maxChunkSize / params.minMatchLength) : 0;
126875 +/** ZSTD_ldm_getBucket() :
126876 + *  Returns a pointer to the start of the bucket associated with hash. */
126877 +static ldmEntry_t* ZSTD_ldm_getBucket(
126878 +        ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams)
126880 +    return ldmState->hashTable + (hash << ldmParams.bucketSizeLog);
126883 +/** ZSTD_ldm_insertEntry() :
126884 + *  Insert the entry with corresponding hash into the hash table */
126885 +static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
126886 +                                 size_t const hash, const ldmEntry_t entry,
126887 +                                 ldmParams_t const ldmParams)
126889 +    BYTE* const pOffset = ldmState->bucketOffsets + hash;
126890 +    unsigned const offset = *pOffset;
126892 +    *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + offset) = entry;
126893 +    *pOffset = (BYTE)((offset + 1) & ((1u << ldmParams.bucketSizeLog) - 1));
126897 +/** ZSTD_ldm_countBackwardsMatch() :
126898 + *  Returns the number of bytes that match backwards before pIn and pMatch.
126900 + *  We count only bytes where pMatch >= pBase and pIn >= pAnchor. */
126901 +static size_t ZSTD_ldm_countBackwardsMatch(
126902 +            const BYTE* pIn, const BYTE* pAnchor,
126903 +            const BYTE* pMatch, const BYTE* pMatchBase)
126905 +    size_t matchLength = 0;
126906 +    while (pIn > pAnchor && pMatch > pMatchBase && pIn[-1] == pMatch[-1]) {
126907 +        pIn--;
126908 +        pMatch--;
126909 +        matchLength++;
126910 +    }
126911 +    return matchLength;
126914 +/** ZSTD_ldm_countBackwardsMatch_2segments() :
126915 + *  Returns the number of bytes that match backwards from pMatch,
126916 + *  even with the backwards match spanning 2 different segments.
126918 + *  On reaching `pMatchBase`, start counting from mEnd */
126919 +static size_t ZSTD_ldm_countBackwardsMatch_2segments(
126920 +                    const BYTE* pIn, const BYTE* pAnchor,
126921 +                    const BYTE* pMatch, const BYTE* pMatchBase,
126922 +                    const BYTE* pExtDictStart, const BYTE* pExtDictEnd)
126924 +    size_t matchLength = ZSTD_ldm_countBackwardsMatch(pIn, pAnchor, pMatch, pMatchBase);
126925 +    if (pMatch - matchLength != pMatchBase || pMatchBase == pExtDictStart) {
126926 +        /* If backwards match is entirely in the extDict or prefix, immediately return */
126927 +        return matchLength;
126928 +    }
126929 +    DEBUGLOG(7, "ZSTD_ldm_countBackwardsMatch_2segments: found 2-parts backwards match (length in prefix==%zu)", matchLength);
126930 +    matchLength += ZSTD_ldm_countBackwardsMatch(pIn - matchLength, pAnchor, pExtDictEnd, pExtDictStart);
126931 +    DEBUGLOG(7, "final backwards match length = %zu", matchLength);
126932 +    return matchLength;
126935 +/** ZSTD_ldm_fillFastTables() :
126937 + *  Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies.
126938 + *  This is similar to ZSTD_loadDictionaryContent.
126940 + *  The tables for the other strategies are filled within their
126941 + *  block compressors. */
126942 +static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
126943 +                                      void const* end)
126945 +    const BYTE* const iend = (const BYTE*)end;
126947 +    switch(ms->cParams.strategy)
126948 +    {
126949 +    case ZSTD_fast:
126950 +        ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast);
126951 +        break;
126953 +    case ZSTD_dfast:
126954 +        ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast);
126955 +        break;
126957 +    case ZSTD_greedy:
126958 +    case ZSTD_lazy:
126959 +    case ZSTD_lazy2:
126960 +    case ZSTD_btlazy2:
126961 +    case ZSTD_btopt:
126962 +    case ZSTD_btultra:
126963 +    case ZSTD_btultra2:
126964 +        break;
126965 +    default:
126966 +        assert(0);  /* not possible : not a valid strategy id */
126967 +    }
126969 +    return 0;
126972 +void ZSTD_ldm_fillHashTable(
126973 +            ldmState_t* ldmState, const BYTE* ip,
126974 +            const BYTE* iend, ldmParams_t const* params)
126976 +    U32 const minMatchLength = params->minMatchLength;
126977 +    U32 const hBits = params->hashLog - params->bucketSizeLog;
126978 +    BYTE const* const base = ldmState->window.base;
126979 +    BYTE const* const istart = ip;
126980 +    ldmRollingHashState_t hashState;
126981 +    size_t* const splits = ldmState->splitIndices;
126982 +    unsigned numSplits;
126984 +    DEBUGLOG(5, "ZSTD_ldm_fillHashTable");
126986 +    ZSTD_ldm_gear_init(&hashState, params);
126987 +    while (ip < iend) {
126988 +        size_t hashed;
126989 +        unsigned n;
126991 +        numSplits = 0;
126992 +        hashed = ZSTD_ldm_gear_feed(&hashState, ip, iend - ip, splits, &numSplits);
126994 +        for (n = 0; n < numSplits; n++) {
126995 +            if (ip + splits[n] >= istart + minMatchLength) {
126996 +                BYTE const* const split = ip + splits[n] - minMatchLength;
126997 +                U64 const xxhash = xxh64(split, minMatchLength, 0);
126998 +                U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
126999 +                ldmEntry_t entry;
127001 +                entry.offset = (U32)(split - base);
127002 +                entry.checksum = (U32)(xxhash >> 32);
127003 +                ZSTD_ldm_insertEntry(ldmState, hash, entry, *params);
127004 +            }
127005 +        }
127007 +        ip += hashed;
127008 +    }
127012 +/** ZSTD_ldm_limitTableUpdate() :
127014 + *  Sets cctx->nextToUpdate to a position corresponding closer to anchor
127015 + *  if it is far way
127016 + *  (after a long match, only update tables a limited amount). */
127017 +static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
127019 +    U32 const curr = (U32)(anchor - ms->window.base);
127020 +    if (curr > ms->nextToUpdate + 1024) {
127021 +        ms->nextToUpdate =
127022 +            curr - MIN(512, curr - ms->nextToUpdate - 1024);
127023 +    }
127026 +static size_t ZSTD_ldm_generateSequences_internal(
127027 +        ldmState_t* ldmState, rawSeqStore_t* rawSeqStore,
127028 +        ldmParams_t const* params, void const* src, size_t srcSize)
127030 +    /* LDM parameters */
127031 +    int const extDict = ZSTD_window_hasExtDict(ldmState->window);
127032 +    U32 const minMatchLength = params->minMatchLength;
127033 +    U32 const entsPerBucket = 1U << params->bucketSizeLog;
127034 +    U32 const hBits = params->hashLog - params->bucketSizeLog;
127035 +    /* Prefix and extDict parameters */
127036 +    U32 const dictLimit = ldmState->window.dictLimit;
127037 +    U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit;
127038 +    BYTE const* const base = ldmState->window.base;
127039 +    BYTE const* const dictBase = extDict ? ldmState->window.dictBase : NULL;
127040 +    BYTE const* const dictStart = extDict ? dictBase + lowestIndex : NULL;
127041 +    BYTE const* const dictEnd = extDict ? dictBase + dictLimit : NULL;
127042 +    BYTE const* const lowPrefixPtr = base + dictLimit;
127043 +    /* Input bounds */
127044 +    BYTE const* const istart = (BYTE const*)src;
127045 +    BYTE const* const iend = istart + srcSize;
127046 +    BYTE const* const ilimit = iend - HASH_READ_SIZE;
127047 +    /* Input positions */
127048 +    BYTE const* anchor = istart;
127049 +    BYTE const* ip = istart;
127050 +    /* Rolling hash state */
127051 +    ldmRollingHashState_t hashState;
127052 +    /* Arrays for staged-processing */
127053 +    size_t* const splits = ldmState->splitIndices;
127054 +    ldmMatchCandidate_t* const candidates = ldmState->matchCandidates;
127055 +    unsigned numSplits;
127057 +    if (srcSize < minMatchLength)
127058 +        return iend - anchor;
127060 +    /* Initialize the rolling hash state with the first minMatchLength bytes */
127061 +    ZSTD_ldm_gear_init(&hashState, params);
127062 +    {
127063 +        size_t n = 0;
127065 +        while (n < minMatchLength) {
127066 +            numSplits = 0;
127067 +            n += ZSTD_ldm_gear_feed(&hashState, ip + n, minMatchLength - n,
127068 +                                    splits, &numSplits);
127069 +        }
127070 +        ip += minMatchLength;
127071 +    }
127073 +    while (ip < ilimit) {
127074 +        size_t hashed;
127075 +        unsigned n;
127077 +        numSplits = 0;
127078 +        hashed = ZSTD_ldm_gear_feed(&hashState, ip, ilimit - ip,
127079 +                                    splits, &numSplits);
127081 +        for (n = 0; n < numSplits; n++) {
127082 +            BYTE const* const split = ip + splits[n] - minMatchLength;
127083 +            U64 const xxhash = xxh64(split, minMatchLength, 0);
127084 +            U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
127086 +            candidates[n].split = split;
127087 +            candidates[n].hash = hash;
127088 +            candidates[n].checksum = (U32)(xxhash >> 32);
127089 +            candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, *params);
127090 +            PREFETCH_L1(candidates[n].bucket);
127091 +        }
127093 +        for (n = 0; n < numSplits; n++) {
127094 +            size_t forwardMatchLength = 0, backwardMatchLength = 0,
127095 +                   bestMatchLength = 0, mLength;
127096 +            BYTE const* const split = candidates[n].split;
127097 +            U32 const checksum = candidates[n].checksum;
127098 +            U32 const hash = candidates[n].hash;
127099 +            ldmEntry_t* const bucket = candidates[n].bucket;
127100 +            ldmEntry_t const* cur;
127101 +            ldmEntry_t const* bestEntry = NULL;
127102 +            ldmEntry_t newEntry;
127104 +            newEntry.offset = (U32)(split - base);
127105 +            newEntry.checksum = checksum;
127107 +            /* If a split point would generate a sequence overlapping with
127108 +             * the previous one, we merely register it in the hash table and
127109 +             * move on */
127110 +            if (split < anchor) {
127111 +                ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
127112 +                continue;
127113 +            }
127115 +            for (cur = bucket; cur < bucket + entsPerBucket; cur++) {
127116 +                size_t curForwardMatchLength, curBackwardMatchLength,
127117 +                       curTotalMatchLength;
127118 +                if (cur->checksum != checksum || cur->offset <= lowestIndex) {
127119 +                    continue;
127120 +                }
127121 +                if (extDict) {
127122 +                    BYTE const* const curMatchBase =
127123 +                        cur->offset < dictLimit ? dictBase : base;
127124 +                    BYTE const* const pMatch = curMatchBase + cur->offset;
127125 +                    BYTE const* const matchEnd =
127126 +                        cur->offset < dictLimit ? dictEnd : iend;
127127 +                    BYTE const* const lowMatchPtr =
127128 +                        cur->offset < dictLimit ? dictStart : lowPrefixPtr;
127129 +                    curForwardMatchLength =
127130 +                        ZSTD_count_2segments(split, pMatch, iend, matchEnd, lowPrefixPtr);
127131 +                    if (curForwardMatchLength < minMatchLength) {
127132 +                        continue;
127133 +                    }
127134 +                    curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch_2segments(
127135 +                            split, anchor, pMatch, lowMatchPtr, dictStart, dictEnd);
127136 +                } else { /* !extDict */
127137 +                    BYTE const* const pMatch = base + cur->offset;
127138 +                    curForwardMatchLength = ZSTD_count(split, pMatch, iend);
127139 +                    if (curForwardMatchLength < minMatchLength) {
127140 +                        continue;
127141 +                    }
127142 +                    curBackwardMatchLength =
127143 +                        ZSTD_ldm_countBackwardsMatch(split, anchor, pMatch, lowPrefixPtr);
127144 +                }
127145 +                curTotalMatchLength = curForwardMatchLength + curBackwardMatchLength;
127147 +                if (curTotalMatchLength > bestMatchLength) {
127148 +                    bestMatchLength = curTotalMatchLength;
127149 +                    forwardMatchLength = curForwardMatchLength;
127150 +                    backwardMatchLength = curBackwardMatchLength;
127151 +                    bestEntry = cur;
127152 +                }
127153 +            }
127155 +            /* No match found -- insert an entry into the hash table
127156 +             * and process the next candidate match */
127157 +            if (bestEntry == NULL) {
127158 +                ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
127159 +                continue;
127160 +            }
127162 +            /* Match found */
127163 +            mLength = forwardMatchLength + backwardMatchLength;
127164 +            {
127165 +                U32 const offset = (U32)(split - base) - bestEntry->offset;
127166 +                rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;
127168 +                /* Out of sequence storage */
127169 +                if (rawSeqStore->size == rawSeqStore->capacity)
127170 +                    return ERROR(dstSize_tooSmall);
127171 +                seq->litLength = (U32)(split - backwardMatchLength - anchor);
127172 +                seq->matchLength = (U32)mLength;
127173 +                seq->offset = offset;
127174 +                rawSeqStore->size++;
127175 +            }
127177 +            /* Insert the current entry into the hash table --- it must be
127178 +             * done after the previous block to avoid clobbering bestEntry */
127179 +            ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
127181 +            anchor = split + forwardMatchLength;
127182 +        }
127184 +        ip += hashed;
127185 +    }
127187 +    return iend - anchor;
127190 +/*! ZSTD_ldm_reduceTable() :
127191 + *  reduce table indexes by `reducerValue` */
127192 +static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,
127193 +                                 U32 const reducerValue)
127195 +    U32 u;
127196 +    for (u = 0; u < size; u++) {
127197 +        if (table[u].offset < reducerValue) table[u].offset = 0;
127198 +        else table[u].offset -= reducerValue;
127199 +    }
127202 +size_t ZSTD_ldm_generateSequences(
127203 +        ldmState_t* ldmState, rawSeqStore_t* sequences,
127204 +        ldmParams_t const* params, void const* src, size_t srcSize)
127206 +    U32 const maxDist = 1U << params->windowLog;
127207 +    BYTE const* const istart = (BYTE const*)src;
127208 +    BYTE const* const iend = istart + srcSize;
127209 +    size_t const kMaxChunkSize = 1 << 20;
127210 +    size_t const nbChunks = (srcSize / kMaxChunkSize) + ((srcSize % kMaxChunkSize) != 0);
127211 +    size_t chunk;
127212 +    size_t leftoverSize = 0;
127214 +    assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize);
127215 +    /* Check that ZSTD_window_update() has been called for this chunk prior
127216 +     * to passing it to this function.
127217 +     */
127218 +    assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize);
127219 +    /* The input could be very large (in zstdmt), so it must be broken up into
127220 +     * chunks to enforce the maximum distance and handle overflow correction.
127221 +     */
127222 +    assert(sequences->pos <= sequences->size);
127223 +    assert(sequences->size <= sequences->capacity);
127224 +    for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) {
127225 +        BYTE const* const chunkStart = istart + chunk * kMaxChunkSize;
127226 +        size_t const remaining = (size_t)(iend - chunkStart);
127227 +        BYTE const *const chunkEnd =
127228 +            (remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize;
127229 +        size_t const chunkSize = chunkEnd - chunkStart;
127230 +        size_t newLeftoverSize;
127231 +        size_t const prevSize = sequences->size;
127233 +        assert(chunkStart < iend);
127234 +        /* 1. Perform overflow correction if necessary. */
127235 +        if (ZSTD_window_needOverflowCorrection(ldmState->window, chunkEnd)) {
127236 +            U32 const ldmHSize = 1U << params->hashLog;
127237 +            U32 const correction = ZSTD_window_correctOverflow(
127238 +                &ldmState->window, /* cycleLog */ 0, maxDist, chunkStart);
127239 +            ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction);
127240 +            /* invalidate dictionaries on overflow correction */
127241 +            ldmState->loadedDictEnd = 0;
127242 +        }
127243 +        /* 2. We enforce the maximum offset allowed.
127244 +         *
127245 +         * kMaxChunkSize should be small enough that we don't lose too much of
127246 +         * the window through early invalidation.
127247 +         * TODO: * Test the chunk size.
127248 +         *       * Try invalidation after the sequence generation and test the
127249 +         *         the offset against maxDist directly.
127250 +         *
127251 +         * NOTE: Because of dictionaries + sequence splitting we MUST make sure
127252 +         * that any offset used is valid at the END of the sequence, since it may
127253 +         * be split into two sequences. This condition holds when using
127254 +         * ZSTD_window_enforceMaxDist(), but if we move to checking offsets
127255 +         * against maxDist directly, we'll have to carefully handle that case.
127256 +         */
127257 +        ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, &ldmState->loadedDictEnd, NULL);
127258 +        /* 3. Generate the sequences for the chunk, and get newLeftoverSize. */
127259 +        newLeftoverSize = ZSTD_ldm_generateSequences_internal(
127260 +            ldmState, sequences, params, chunkStart, chunkSize);
127261 +        if (ZSTD_isError(newLeftoverSize))
127262 +            return newLeftoverSize;
127263 +        /* 4. We add the leftover literals from previous iterations to the first
127264 +         *    newly generated sequence, or add the `newLeftoverSize` if none are
127265 +         *    generated.
127266 +         */
127267 +        /* Prepend the leftover literals from the last call */
127268 +        if (prevSize < sequences->size) {
127269 +            sequences->seq[prevSize].litLength += (U32)leftoverSize;
127270 +            leftoverSize = newLeftoverSize;
127271 +        } else {
127272 +            assert(newLeftoverSize == chunkSize);
127273 +            leftoverSize += chunkSize;
127274 +        }
127275 +    }
127276 +    return 0;
127279 +void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) {
127280 +    while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) {
127281 +        rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos;
127282 +        if (srcSize <= seq->litLength) {
127283 +            /* Skip past srcSize literals */
127284 +            seq->litLength -= (U32)srcSize;
127285 +            return;
127286 +        }
127287 +        srcSize -= seq->litLength;
127288 +        seq->litLength = 0;
127289 +        if (srcSize < seq->matchLength) {
127290 +            /* Skip past the first srcSize of the match */
127291 +            seq->matchLength -= (U32)srcSize;
127292 +            if (seq->matchLength < minMatch) {
127293 +                /* The match is too short, omit it */
127294 +                if (rawSeqStore->pos + 1 < rawSeqStore->size) {
127295 +                    seq[1].litLength += seq[0].matchLength;
127296 +                }
127297 +                rawSeqStore->pos++;
127298 +            }
127299 +            return;
127300 +        }
127301 +        srcSize -= seq->matchLength;
127302 +        seq->matchLength = 0;
127303 +        rawSeqStore->pos++;
127304 +    }
127308 + * If the sequence length is longer than remaining then the sequence is split
127309 + * between this block and the next.
127311 + * Returns the current sequence to handle, or if the rest of the block should
127312 + * be literals, it returns a sequence with offset == 0.
127313 + */
127314 +static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,
127315 +                                 U32 const remaining, U32 const minMatch)
127317 +    rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos];
127318 +    assert(sequence.offset > 0);
127319 +    /* Likely: No partial sequence */
127320 +    if (remaining >= sequence.litLength + sequence.matchLength) {
127321 +        rawSeqStore->pos++;
127322 +        return sequence;
127323 +    }
127324 +    /* Cut the sequence short (offset == 0 ==> rest is literals). */
127325 +    if (remaining <= sequence.litLength) {
127326 +        sequence.offset = 0;
127327 +    } else if (remaining < sequence.litLength + sequence.matchLength) {
127328 +        sequence.matchLength = remaining - sequence.litLength;
127329 +        if (sequence.matchLength < minMatch) {
127330 +            sequence.offset = 0;
127331 +        }
127332 +    }
127333 +    /* Skip past `remaining` bytes for the future sequences. */
127334 +    ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch);
127335 +    return sequence;
127338 +void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
127339 +    U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
127340 +    while (currPos && rawSeqStore->pos < rawSeqStore->size) {
127341 +        rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
127342 +        if (currPos >= currSeq.litLength + currSeq.matchLength) {
127343 +            currPos -= currSeq.litLength + currSeq.matchLength;
127344 +            rawSeqStore->pos++;
127345 +        } else {
127346 +            rawSeqStore->posInSequence = currPos;
127347 +            break;
127348 +        }
127349 +    }
127350 +    if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
127351 +        rawSeqStore->posInSequence = 0;
127352 +    }
127355 +size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
127356 +    ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
127357 +    void const* src, size_t srcSize)
127359 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
127360 +    unsigned const minMatch = cParams->minMatch;
127361 +    ZSTD_blockCompressor const blockCompressor =
127362 +        ZSTD_selectBlockCompressor(cParams->strategy, ZSTD_matchState_dictMode(ms));
127363 +    /* Input bounds */
127364 +    BYTE const* const istart = (BYTE const*)src;
127365 +    BYTE const* const iend = istart + srcSize;
127366 +    /* Input positions */
127367 +    BYTE const* ip = istart;
127369 +    DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize);
127370 +    /* If using opt parser, use LDMs only as candidates rather than always accepting them */
127371 +    if (cParams->strategy >= ZSTD_btopt) {
127372 +        size_t lastLLSize;
127373 +        ms->ldmSeqStore = rawSeqStore;
127374 +        lastLLSize = blockCompressor(ms, seqStore, rep, src, srcSize);
127375 +        ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore, srcSize);
127376 +        return lastLLSize;
127377 +    }
127379 +    assert(rawSeqStore->pos <= rawSeqStore->size);
127380 +    assert(rawSeqStore->size <= rawSeqStore->capacity);
127381 +    /* Loop through each sequence and apply the block compressor to the literals */
127382 +    while (rawSeqStore->pos < rawSeqStore->size && ip < iend) {
127383 +        /* maybeSplitSequence updates rawSeqStore->pos */
127384 +        rawSeq const sequence = maybeSplitSequence(rawSeqStore,
127385 +                                                   (U32)(iend - ip), minMatch);
127386 +        int i;
127387 +        /* End signal */
127388 +        if (sequence.offset == 0)
127389 +            break;
127391 +        assert(ip + sequence.litLength + sequence.matchLength <= iend);
127393 +        /* Fill tables for block compressor */
127394 +        ZSTD_ldm_limitTableUpdate(ms, ip);
127395 +        ZSTD_ldm_fillFastTables(ms, ip);
127396 +        /* Run the block compressor */
127397 +        DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength);
127398 +        {
127399 +            size_t const newLitLength =
127400 +                blockCompressor(ms, seqStore, rep, ip, sequence.litLength);
127401 +            ip += sequence.litLength;
127402 +            /* Update the repcodes */
127403 +            for (i = ZSTD_REP_NUM - 1; i > 0; i--)
127404 +                rep[i] = rep[i-1];
127405 +            rep[0] = sequence.offset;
127406 +            /* Store the sequence */
127407 +            ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend,
127408 +                          sequence.offset + ZSTD_REP_MOVE,
127409 +                          sequence.matchLength - MINMATCH);
127410 +            ip += sequence.matchLength;
127411 +        }
127412 +    }
127413 +    /* Fill the tables for the block compressor */
127414 +    ZSTD_ldm_limitTableUpdate(ms, ip);
127415 +    ZSTD_ldm_fillFastTables(ms, ip);
127416 +    /* Compress the last literals */
127417 +    return blockCompressor(ms, seqStore, rep, ip, iend - ip);
127419 diff --git a/lib/zstd/compress/zstd_ldm.h b/lib/zstd/compress/zstd_ldm.h
127420 new file mode 100644
127421 index 000000000000..5ee467eaca2e
127422 --- /dev/null
127423 +++ b/lib/zstd/compress/zstd_ldm.h
127424 @@ -0,0 +1,110 @@
127426 + * Copyright (c) Yann Collet, Facebook, Inc.
127427 + * All rights reserved.
127429 + * This source code is licensed under both the BSD-style license (found in the
127430 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
127431 + * in the COPYING file in the root directory of this source tree).
127432 + * You may select, at your option, one of the above-listed licenses.
127433 + */
127435 +#ifndef ZSTD_LDM_H
127436 +#define ZSTD_LDM_H
127439 +#include "zstd_compress_internal.h"   /* ldmParams_t, U32 */
127440 +#include <linux/zstd.h>   /* ZSTD_CCtx, size_t */
127442 +/*-*************************************
127443 +*  Long distance matching
127444 +***************************************/
127446 +#define ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_LIMIT_DEFAULT
127448 +void ZSTD_ldm_fillHashTable(
127449 +            ldmState_t* state, const BYTE* ip,
127450 +            const BYTE* iend, ldmParams_t const* params);
127453 + * ZSTD_ldm_generateSequences():
127455 + * Generates the sequences using the long distance match finder.
127456 + * Generates long range matching sequences in `sequences`, which parse a prefix
127457 + * of the source. `sequences` must be large enough to store every sequence,
127458 + * which can be checked with `ZSTD_ldm_getMaxNbSeq()`.
127459 + * @returns 0 or an error code.
127461 + * NOTE: The user must have called ZSTD_window_update() for all of the input
127462 + * they have, even if they pass it to ZSTD_ldm_generateSequences() in chunks.
127463 + * NOTE: This function returns an error if it runs out of space to store
127464 + *       sequences.
127465 + */
127466 +size_t ZSTD_ldm_generateSequences(
127467 +            ldmState_t* ldms, rawSeqStore_t* sequences,
127468 +            ldmParams_t const* params, void const* src, size_t srcSize);
127471 + * ZSTD_ldm_blockCompress():
127473 + * Compresses a block using the predefined sequences, along with a secondary
127474 + * block compressor. The literals section of every sequence is passed to the
127475 + * secondary block compressor, and those sequences are interspersed with the
127476 + * predefined sequences. Returns the length of the last literals.
127477 + * Updates `rawSeqStore.pos` to indicate how many sequences have been consumed.
127478 + * `rawSeqStore.seq` may also be updated to split the last sequence between two
127479 + * blocks.
127480 + * @return The length of the last literals.
127482 + * NOTE: The source must be at most the maximum block size, but the predefined
127483 + * sequences can be any size, and may be longer than the block. In the case that
127484 + * they are longer than the block, the last sequences may need to be split into
127485 + * two. We handle that case correctly, and update `rawSeqStore` appropriately.
127486 + * NOTE: This function does not return any errors.
127487 + */
127488 +size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
127489 +            ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
127490 +            void const* src, size_t srcSize);
127493 + * ZSTD_ldm_skipSequences():
127495 + * Skip past `srcSize` bytes worth of sequences in `rawSeqStore`.
127496 + * Avoids emitting matches less than `minMatch` bytes.
127497 + * Must be called for data that is not passed to ZSTD_ldm_blockCompress().
127498 + */
127499 +void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize,
127500 +    U32 const minMatch);
127502 +/* ZSTD_ldm_skipRawSeqStoreBytes():
127503 + * Moves forward in rawSeqStore by nbBytes, updating fields 'pos' and 'posInSequence'.
127504 + * Not to be used in conjunction with ZSTD_ldm_skipSequences().
127505 + * Must be called for data with is not passed to ZSTD_ldm_blockCompress().
127506 + */
127507 +void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes);
127509 +/** ZSTD_ldm_getTableSize() :
127510 + *  Estimate the space needed for long distance matching tables or 0 if LDM is
127511 + *  disabled.
127512 + */
127513 +size_t ZSTD_ldm_getTableSize(ldmParams_t params);
127515 +/** ZSTD_ldm_getSeqSpace() :
127516 + *  Return an upper bound on the number of sequences that can be produced by
127517 + *  the long distance matcher, or 0 if LDM is disabled.
127518 + */
127519 +size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize);
127521 +/** ZSTD_ldm_adjustParameters() :
127522 + *  If the params->hashRateLog is not set, set it to its default value based on
127523 + *  windowLog and params->hashLog.
127525 + *  Ensures that params->bucketSizeLog is <= params->hashLog (setting it to
127526 + *  params->hashLog if it is not).
127528 + *  Ensures that the minMatchLength >= targetLength during optimal parsing.
127529 + */
127530 +void ZSTD_ldm_adjustParameters(ldmParams_t* params,
127531 +                               ZSTD_compressionParameters const* cParams);
127534 +#endif /* ZSTD_FAST_H */
127535 diff --git a/lib/zstd/compress/zstd_ldm_geartab.h b/lib/zstd/compress/zstd_ldm_geartab.h
127536 new file mode 100644
127537 index 000000000000..e5c24d856b0a
127538 --- /dev/null
127539 +++ b/lib/zstd/compress/zstd_ldm_geartab.h
127540 @@ -0,0 +1,103 @@
127542 + * Copyright (c) Yann Collet, Facebook, Inc.
127543 + * All rights reserved.
127545 + * This source code is licensed under both the BSD-style license (found in the
127546 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
127547 + * in the COPYING file in the root directory of this source tree).
127548 + * You may select, at your option, one of the above-listed licenses.
127549 + */
127551 +#ifndef ZSTD_LDM_GEARTAB_H
127552 +#define ZSTD_LDM_GEARTAB_H
127554 +static U64 ZSTD_ldm_gearTab[256] = {
127555 +    0xf5b8f72c5f77775c, 0x84935f266b7ac412, 0xb647ada9ca730ccc,
127556 +    0xb065bb4b114fb1de, 0x34584e7e8c3a9fd0, 0x4e97e17c6ae26b05,
127557 +    0x3a03d743bc99a604, 0xcecd042422c4044f, 0x76de76c58524259e,
127558 +    0x9c8528f65badeaca, 0x86563706e2097529, 0x2902475fa375d889,
127559 +    0xafb32a9739a5ebe6, 0xce2714da3883e639, 0x21eaf821722e69e,
127560 +    0x37b628620b628,    0x49a8d455d88caf5,  0x8556d711e6958140,
127561 +    0x4f7ae74fc605c1f,  0x829f0c3468bd3a20, 0x4ffdc885c625179e,
127562 +    0x8473de048a3daf1b, 0x51008822b05646b2, 0x69d75d12b2d1cc5f,
127563 +    0x8c9d4a19159154bc, 0xc3cc10f4abbd4003, 0xd06ddc1cecb97391,
127564 +    0xbe48e6e7ed80302e, 0x3481db31cee03547, 0xacc3f67cdaa1d210,
127565 +    0x65cb771d8c7f96cc, 0x8eb27177055723dd, 0xc789950d44cd94be,
127566 +    0x934feadc3700b12b, 0x5e485f11edbdf182, 0x1e2e2a46fd64767a,
127567 +    0x2969ca71d82efa7c, 0x9d46e9935ebbba2e, 0xe056b67e05e6822b,
127568 +    0x94d73f55739d03a0, 0xcd7010bdb69b5a03, 0x455ef9fcd79b82f4,
127569 +    0x869cb54a8749c161, 0x38d1a4fa6185d225, 0xb475166f94bbe9bb,
127570 +    0xa4143548720959f1, 0x7aed4780ba6b26ba, 0xd0ce264439e02312,
127571 +    0x84366d746078d508, 0xa8ce973c72ed17be, 0x21c323a29a430b01,
127572 +    0x9962d617e3af80ee, 0xab0ce91d9c8cf75b, 0x530e8ee6d19a4dbc,
127573 +    0x2ef68c0cf53f5d72, 0xc03a681640a85506, 0x496e4e9f9c310967,
127574 +    0x78580472b59b14a0, 0x273824c23b388577, 0x66bf923ad45cb553,
127575 +    0x47ae1a5a2492ba86, 0x35e304569e229659, 0x4765182a46870b6f,
127576 +    0x6cbab625e9099412, 0xddac9a2e598522c1, 0x7172086e666624f2,
127577 +    0xdf5003ca503b7837, 0x88c0c1db78563d09, 0x58d51865acfc289d,
127578 +    0x177671aec65224f1, 0xfb79d8a241e967d7, 0x2be1e101cad9a49a,
127579 +    0x6625682f6e29186b, 0x399553457ac06e50, 0x35dffb4c23abb74,
127580 +    0x429db2591f54aade, 0xc52802a8037d1009, 0x6acb27381f0b25f3,
127581 +    0xf45e2551ee4f823b, 0x8b0ea2d99580c2f7, 0x3bed519cbcb4e1e1,
127582 +    0xff452823dbb010a,  0x9d42ed614f3dd267, 0x5b9313c06257c57b,
127583 +    0xa114b8008b5e1442, 0xc1fe311c11c13d4b, 0x66e8763ea34c5568,
127584 +    0x8b982af1c262f05d, 0xee8876faaa75fbb7, 0x8a62a4d0d172bb2a,
127585 +    0xc13d94a3b7449a97, 0x6dbbba9dc15d037c, 0xc786101f1d92e0f1,
127586 +    0xd78681a907a0b79b, 0xf61aaf2962c9abb9, 0x2cfd16fcd3cb7ad9,
127587 +    0x868c5b6744624d21, 0x25e650899c74ddd7, 0xba042af4a7c37463,
127588 +    0x4eb1a539465a3eca, 0xbe09dbf03b05d5ca, 0x774e5a362b5472ba,
127589 +    0x47a1221229d183cd, 0x504b0ca18ef5a2df, 0xdffbdfbde2456eb9,
127590 +    0x46cd2b2fbee34634, 0xf2aef8fe819d98c3, 0x357f5276d4599d61,
127591 +    0x24a5483879c453e3, 0x88026889192b4b9,  0x28da96671782dbec,
127592 +    0x4ef37c40588e9aaa, 0x8837b90651bc9fb3, 0xc164f741d3f0e5d6,
127593 +    0xbc135a0a704b70ba, 0x69cd868f7622ada,  0xbc37ba89e0b9c0ab,
127594 +    0x47c14a01323552f6, 0x4f00794bacee98bb, 0x7107de7d637a69d5,
127595 +    0x88af793bb6f2255e, 0xf3c6466b8799b598, 0xc288c616aa7f3b59,
127596 +    0x81ca63cf42fca3fd, 0x88d85ace36a2674b, 0xd056bd3792389e7,
127597 +    0xe55c396c4e9dd32d, 0xbefb504571e6c0a6, 0x96ab32115e91e8cc,
127598 +    0xbf8acb18de8f38d1, 0x66dae58801672606, 0x833b6017872317fb,
127599 +    0xb87c16f2d1c92864, 0xdb766a74e58b669c, 0x89659f85c61417be,
127600 +    0xc8daad856011ea0c, 0x76a4b565b6fe7eae, 0xa469d085f6237312,
127601 +    0xaaf0365683a3e96c, 0x4dbb746f8424f7b8, 0x638755af4e4acc1,
127602 +    0x3d7807f5bde64486, 0x17be6d8f5bbb7639, 0x903f0cd44dc35dc,
127603 +    0x67b672eafdf1196c, 0xa676ff93ed4c82f1, 0x521d1004c5053d9d,
127604 +    0x37ba9ad09ccc9202, 0x84e54d297aacfb51, 0xa0b4b776a143445,
127605 +    0x820d471e20b348e,  0x1874383cb83d46dc, 0x97edeec7a1efe11c,
127606 +    0xb330e50b1bdc42aa, 0x1dd91955ce70e032, 0xa514cdb88f2939d5,
127607 +    0x2791233fd90db9d3, 0x7b670a4cc50f7a9b, 0x77c07d2a05c6dfa5,
127608 +    0xe3778b6646d0a6fa, 0xb39c8eda47b56749, 0x933ed448addbef28,
127609 +    0xaf846af6ab7d0bf4, 0xe5af208eb666e49,  0x5e6622f73534cd6a,
127610 +    0x297daeca42ef5b6e, 0x862daef3d35539a6, 0xe68722498f8e1ea9,
127611 +    0x981c53093dc0d572, 0xfa09b0bfbf86fbf5, 0x30b1e96166219f15,
127612 +    0x70e7d466bdc4fb83, 0x5a66736e35f2a8e9, 0xcddb59d2b7c1baef,
127613 +    0xd6c7d247d26d8996, 0xea4e39eac8de1ba3, 0x539c8bb19fa3aff2,
127614 +    0x9f90e4c5fd508d8,  0xa34e5956fbaf3385, 0x2e2f8e151d3ef375,
127615 +    0x173691e9b83faec1, 0xb85a8d56bf016379, 0x8382381267408ae3,
127616 +    0xb90f901bbdc0096d, 0x7c6ad32933bcec65, 0x76bb5e2f2c8ad595,
127617 +    0x390f851a6cf46d28, 0xc3e6064da1c2da72, 0xc52a0c101cfa5389,
127618 +    0xd78eaf84a3fbc530, 0x3781b9e2288b997e, 0x73c2f6dea83d05c4,
127619 +    0x4228e364c5b5ed7,  0x9d7a3edf0da43911, 0x8edcfeda24686756,
127620 +    0x5e7667a7b7a9b3a1, 0x4c4f389fa143791d, 0xb08bc1023da7cddc,
127621 +    0x7ab4be3ae529b1cc, 0x754e6132dbe74ff9, 0x71635442a839df45,
127622 +    0x2f6fb1643fbe52de, 0x961e0a42cf7a8177, 0xf3b45d83d89ef2ea,
127623 +    0xee3de4cf4a6e3e9b, 0xcd6848542c3295e7, 0xe4cee1664c78662f,
127624 +    0x9947548b474c68c4, 0x25d73777a5ed8b0b, 0xc915b1d636b7fc,
127625 +    0x21c2ba75d9b0d2da, 0x5f6b5dcf608a64a1, 0xdcf333255ff9570c,
127626 +    0x633b922418ced4ee, 0xc136dde0b004b34a, 0x58cc83b05d4b2f5a,
127627 +    0x5eb424dda28e42d2, 0x62df47369739cd98, 0xb4e0b42485e4ce17,
127628 +    0x16e1f0c1f9a8d1e7, 0x8ec3916707560ebf, 0x62ba6e2df2cc9db3,
127629 +    0xcbf9f4ff77d83a16, 0x78d9d7d07d2bbcc4, 0xef554ce1e02c41f4,
127630 +    0x8d7581127eccf94d, 0xa9b53336cb3c8a05, 0x38c42c0bf45c4f91,
127631 +    0x640893cdf4488863, 0x80ec34bc575ea568, 0x39f324f5b48eaa40,
127632 +    0xe9d9ed1f8eff527f, 0x9224fc058cc5a214, 0xbaba00b04cfe7741,
127633 +    0x309a9f120fcf52af, 0xa558f3ec65626212, 0x424bec8b7adabe2f,
127634 +    0x41622513a6aea433, 0xb88da2d5324ca798, 0xd287733b245528a4,
127635 +    0x9a44697e6d68aec3, 0x7b1093be2f49bb28, 0x50bbec632e3d8aad,
127636 +    0x6cd90723e1ea8283, 0x897b9e7431b02bf3, 0x219efdcb338a7047,
127637 +    0x3b0311f0a27c0656, 0xdb17bf91c0db96e7, 0x8cd4fd6b4e85a5b2,
127638 +    0xfab071054ba6409d, 0x40d6fe831fa9dfd9, 0xaf358debad7d791e,
127639 +    0xeb8d0e25a65e3e58, 0xbbcbd3df14e08580, 0xcf751f27ecdab2b,
127640 +    0x2b4da14f2613d8f4
127643 +#endif /* ZSTD_LDM_GEARTAB_H */
127644 diff --git a/lib/zstd/compress/zstd_opt.c b/lib/zstd/compress/zstd_opt.c
127645 new file mode 100644
127646 index 000000000000..9ab92d4ef499
127647 --- /dev/null
127648 +++ b/lib/zstd/compress/zstd_opt.c
127649 @@ -0,0 +1,1345 @@
127651 + * Copyright (c) Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
127652 + * All rights reserved.
127654 + * This source code is licensed under both the BSD-style license (found in the
127655 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
127656 + * in the COPYING file in the root directory of this source tree).
127657 + * You may select, at your option, one of the above-listed licenses.
127658 + */
127660 +#include "zstd_compress_internal.h"
127661 +#include "hist.h"
127662 +#include "zstd_opt.h"
127665 +#define ZSTD_LITFREQ_ADD    2   /* scaling factor for litFreq, so that frequencies adapt faster to new stats */
127666 +#define ZSTD_FREQ_DIV       4   /* log factor when using previous stats to init next stats */
127667 +#define ZSTD_MAX_PRICE     (1<<30)
127669 +#define ZSTD_PREDEF_THRESHOLD 1024   /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */
127672 +/*-*************************************
127673 +*  Price functions for optimal parser
127674 +***************************************/
127676 +#if 0    /* approximation at bit level */
127677 +#  define BITCOST_ACCURACY 0
127678 +#  define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
127679 +#  define WEIGHT(stat)  ((void)opt, ZSTD_bitWeight(stat))
127680 +#elif 0  /* fractional bit accuracy */
127681 +#  define BITCOST_ACCURACY 8
127682 +#  define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
127683 +#  define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat))
127684 +#else    /* opt==approx, ultra==accurate */
127685 +#  define BITCOST_ACCURACY 8
127686 +#  define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY)
127687 +#  define WEIGHT(stat,opt) (opt ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat))
127688 +#endif
127690 +MEM_STATIC U32 ZSTD_bitWeight(U32 stat)
127692 +    return (ZSTD_highbit32(stat+1) * BITCOST_MULTIPLIER);
127695 +MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat)
127697 +    U32 const stat = rawStat + 1;
127698 +    U32 const hb = ZSTD_highbit32(stat);
127699 +    U32 const BWeight = hb * BITCOST_MULTIPLIER;
127700 +    U32 const FWeight = (stat << BITCOST_ACCURACY) >> hb;
127701 +    U32 const weight = BWeight + FWeight;
127702 +    assert(hb + BITCOST_ACCURACY < 31);
127703 +    return weight;
127706 +#if (DEBUGLEVEL>=2)
127707 +/* debugging function,
127708 + * @return price in bytes as fractional value
127709 + * for debug messages only */
127710 +MEM_STATIC double ZSTD_fCost(U32 price)
127712 +    return (double)price / (BITCOST_MULTIPLIER*8);
127714 +#endif
127716 +static int ZSTD_compressedLiterals(optState_t const* const optPtr)
127718 +    return optPtr->literalCompressionMode != ZSTD_lcm_uncompressed;
127721 +static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel)
127723 +    if (ZSTD_compressedLiterals(optPtr))
127724 +        optPtr->litSumBasePrice = WEIGHT(optPtr->litSum, optLevel);
127725 +    optPtr->litLengthSumBasePrice = WEIGHT(optPtr->litLengthSum, optLevel);
127726 +    optPtr->matchLengthSumBasePrice = WEIGHT(optPtr->matchLengthSum, optLevel);
127727 +    optPtr->offCodeSumBasePrice = WEIGHT(optPtr->offCodeSum, optLevel);
127731 +/* ZSTD_downscaleStat() :
127732 + * reduce all elements in table by a factor 2^(ZSTD_FREQ_DIV+malus)
127733 + * return the resulting sum of elements */
127734 +static U32 ZSTD_downscaleStat(unsigned* table, U32 lastEltIndex, int malus)
127736 +    U32 s, sum=0;
127737 +    DEBUGLOG(5, "ZSTD_downscaleStat (nbElts=%u)", (unsigned)lastEltIndex+1);
127738 +    assert(ZSTD_FREQ_DIV+malus > 0 && ZSTD_FREQ_DIV+malus < 31);
127739 +    for (s=0; s<lastEltIndex+1; s++) {
127740 +        table[s] = 1 + (table[s] >> (ZSTD_FREQ_DIV+malus));
127741 +        sum += table[s];
127742 +    }
127743 +    return sum;
127746 +/* ZSTD_rescaleFreqs() :
127747 + * if first block (detected by optPtr->litLengthSum == 0) : init statistics
127748 + *    take hints from dictionary if there is one
127749 + *    or init from zero, using src for literals stats, or flat 1 for match symbols
127750 + * otherwise downscale existing stats, to be used as seed for next block.
127751 + */
127752 +static void
127753 +ZSTD_rescaleFreqs(optState_t* const optPtr,
127754 +            const BYTE* const src, size_t const srcSize,
127755 +                  int const optLevel)
127757 +    int const compressedLiterals = ZSTD_compressedLiterals(optPtr);
127758 +    DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize);
127759 +    optPtr->priceType = zop_dynamic;
127761 +    if (optPtr->litLengthSum == 0) {  /* first block : init */
127762 +        if (srcSize <= ZSTD_PREDEF_THRESHOLD) {  /* heuristic */
127763 +            DEBUGLOG(5, "(srcSize <= ZSTD_PREDEF_THRESHOLD) => zop_predef");
127764 +            optPtr->priceType = zop_predef;
127765 +        }
127767 +        assert(optPtr->symbolCosts != NULL);
127768 +        if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat_valid) {
127769 +            /* huffman table presumed generated by dictionary */
127770 +            optPtr->priceType = zop_dynamic;
127772 +            if (compressedLiterals) {
127773 +                unsigned lit;
127774 +                assert(optPtr->litFreq != NULL);
127775 +                optPtr->litSum = 0;
127776 +                for (lit=0; lit<=MaxLit; lit++) {
127777 +                    U32 const scaleLog = 11;   /* scale to 2K */
127778 +                    U32 const bitCost = HUF_getNbBits(optPtr->symbolCosts->huf.CTable, lit);
127779 +                    assert(bitCost <= scaleLog);
127780 +                    optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
127781 +                    optPtr->litSum += optPtr->litFreq[lit];
127782 +            }   }
127784 +            {   unsigned ll;
127785 +                FSE_CState_t llstate;
127786 +                FSE_initCState(&llstate, optPtr->symbolCosts->fse.litlengthCTable);
127787 +                optPtr->litLengthSum = 0;
127788 +                for (ll=0; ll<=MaxLL; ll++) {
127789 +                    U32 const scaleLog = 10;   /* scale to 1K */
127790 +                    U32 const bitCost = FSE_getMaxNbBits(llstate.symbolTT, ll);
127791 +                    assert(bitCost < scaleLog);
127792 +                    optPtr->litLengthFreq[ll] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
127793 +                    optPtr->litLengthSum += optPtr->litLengthFreq[ll];
127794 +            }   }
127796 +            {   unsigned ml;
127797 +                FSE_CState_t mlstate;
127798 +                FSE_initCState(&mlstate, optPtr->symbolCosts->fse.matchlengthCTable);
127799 +                optPtr->matchLengthSum = 0;
127800 +                for (ml=0; ml<=MaxML; ml++) {
127801 +                    U32 const scaleLog = 10;
127802 +                    U32 const bitCost = FSE_getMaxNbBits(mlstate.symbolTT, ml);
127803 +                    assert(bitCost < scaleLog);
127804 +                    optPtr->matchLengthFreq[ml] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
127805 +                    optPtr->matchLengthSum += optPtr->matchLengthFreq[ml];
127806 +            }   }
127808 +            {   unsigned of;
127809 +                FSE_CState_t ofstate;
127810 +                FSE_initCState(&ofstate, optPtr->symbolCosts->fse.offcodeCTable);
127811 +                optPtr->offCodeSum = 0;
127812 +                for (of=0; of<=MaxOff; of++) {
127813 +                    U32 const scaleLog = 10;
127814 +                    U32 const bitCost = FSE_getMaxNbBits(ofstate.symbolTT, of);
127815 +                    assert(bitCost < scaleLog);
127816 +                    optPtr->offCodeFreq[of] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/;
127817 +                    optPtr->offCodeSum += optPtr->offCodeFreq[of];
127818 +            }   }
127820 +        } else {  /* not a dictionary */
127822 +            assert(optPtr->litFreq != NULL);
127823 +            if (compressedLiterals) {
127824 +                unsigned lit = MaxLit;
127825 +                HIST_count_simple(optPtr->litFreq, &lit, src, srcSize);   /* use raw first block to init statistics */
127826 +                optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
127827 +            }
127829 +            {   unsigned ll;
127830 +                for (ll=0; ll<=MaxLL; ll++)
127831 +                    optPtr->litLengthFreq[ll] = 1;
127832 +            }
127833 +            optPtr->litLengthSum = MaxLL+1;
127835 +            {   unsigned ml;
127836 +                for (ml=0; ml<=MaxML; ml++)
127837 +                    optPtr->matchLengthFreq[ml] = 1;
127838 +            }
127839 +            optPtr->matchLengthSum = MaxML+1;
127841 +            {   unsigned of;
127842 +                for (of=0; of<=MaxOff; of++)
127843 +                    optPtr->offCodeFreq[of] = 1;
127844 +            }
127845 +            optPtr->offCodeSum = MaxOff+1;
127847 +        }
127849 +    } else {   /* new block : re-use previous statistics, scaled down */
127851 +        if (compressedLiterals)
127852 +            optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
127853 +        optPtr->litLengthSum = ZSTD_downscaleStat(optPtr->litLengthFreq, MaxLL, 0);
127854 +        optPtr->matchLengthSum = ZSTD_downscaleStat(optPtr->matchLengthFreq, MaxML, 0);
127855 +        optPtr->offCodeSum = ZSTD_downscaleStat(optPtr->offCodeFreq, MaxOff, 0);
127856 +    }
127858 +    ZSTD_setBasePrices(optPtr, optLevel);
127861 +/* ZSTD_rawLiteralsCost() :
127862 + * price of literals (only) in specified segment (which length can be 0).
127863 + * does not include price of literalLength symbol */
127864 +static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
127865 +                                const optState_t* const optPtr,
127866 +                                int optLevel)
127868 +    if (litLength == 0) return 0;
127870 +    if (!ZSTD_compressedLiterals(optPtr))
127871 +        return (litLength << 3) * BITCOST_MULTIPLIER;  /* Uncompressed - 8 bytes per literal. */
127873 +    if (optPtr->priceType == zop_predef)
127874 +        return (litLength*6) * BITCOST_MULTIPLIER;  /* 6 bit per literal - no statistic used */
127876 +    /* dynamic statistics */
127877 +    {   U32 price = litLength * optPtr->litSumBasePrice;
127878 +        U32 u;
127879 +        for (u=0; u < litLength; u++) {
127880 +            assert(WEIGHT(optPtr->litFreq[literals[u]], optLevel) <= optPtr->litSumBasePrice);   /* literal cost should never be negative */
127881 +            price -= WEIGHT(optPtr->litFreq[literals[u]], optLevel);
127882 +        }
127883 +        return price;
127884 +    }
127887 +/* ZSTD_litLengthPrice() :
127888 + * cost of literalLength symbol */
127889 +static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr, int optLevel)
127891 +    if (optPtr->priceType == zop_predef) return WEIGHT(litLength, optLevel);
127893 +    /* dynamic statistics */
127894 +    {   U32 const llCode = ZSTD_LLcode(litLength);
127895 +        return (LL_bits[llCode] * BITCOST_MULTIPLIER)
127896 +             + optPtr->litLengthSumBasePrice
127897 +             - WEIGHT(optPtr->litLengthFreq[llCode], optLevel);
127898 +    }
127901 +/* ZSTD_getMatchPrice() :
127902 + * Provides the cost of the match part (offset + matchLength) of a sequence
127903 + * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence.
127904 + * optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) */
127905 +FORCE_INLINE_TEMPLATE U32
127906 +ZSTD_getMatchPrice(U32 const offset,
127907 +                   U32 const matchLength,
127908 +             const optState_t* const optPtr,
127909 +                   int const optLevel)
127911 +    U32 price;
127912 +    U32 const offCode = ZSTD_highbit32(offset+1);
127913 +    U32 const mlBase = matchLength - MINMATCH;
127914 +    assert(matchLength >= MINMATCH);
127916 +    if (optPtr->priceType == zop_predef)  /* fixed scheme, do not use statistics */
127917 +        return WEIGHT(mlBase, optLevel) + ((16 + offCode) * BITCOST_MULTIPLIER);
127919 +    /* dynamic statistics */
127920 +    price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel));
127921 +    if ((optLevel<2) /*static*/ && offCode >= 20)
127922 +        price += (offCode-19)*2 * BITCOST_MULTIPLIER; /* handicap for long distance offsets, favor decompression speed */
127924 +    /* match Length */
127925 +    {   U32 const mlCode = ZSTD_MLcode(mlBase);
127926 +        price += (ML_bits[mlCode] * BITCOST_MULTIPLIER) + (optPtr->matchLengthSumBasePrice - WEIGHT(optPtr->matchLengthFreq[mlCode], optLevel));
127927 +    }
127929 +    price += BITCOST_MULTIPLIER / 5;   /* heuristic : make matches a bit more costly to favor less sequences -> faster decompression speed */
127931 +    DEBUGLOG(8, "ZSTD_getMatchPrice(ml:%u) = %u", matchLength, price);
127932 +    return price;
127935 +/* ZSTD_updateStats() :
127936 + * assumption : literals + litLengtn <= iend */
127937 +static void ZSTD_updateStats(optState_t* const optPtr,
127938 +                             U32 litLength, const BYTE* literals,
127939 +                             U32 offsetCode, U32 matchLength)
127941 +    /* literals */
127942 +    if (ZSTD_compressedLiterals(optPtr)) {
127943 +        U32 u;
127944 +        for (u=0; u < litLength; u++)
127945 +            optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
127946 +        optPtr->litSum += litLength*ZSTD_LITFREQ_ADD;
127947 +    }
127949 +    /* literal Length */
127950 +    {   U32 const llCode = ZSTD_LLcode(litLength);
127951 +        optPtr->litLengthFreq[llCode]++;
127952 +        optPtr->litLengthSum++;
127953 +    }
127955 +    /* match offset code (0-2=>repCode; 3+=>offset+2) */
127956 +    {   U32 const offCode = ZSTD_highbit32(offsetCode+1);
127957 +        assert(offCode <= MaxOff);
127958 +        optPtr->offCodeFreq[offCode]++;
127959 +        optPtr->offCodeSum++;
127960 +    }
127962 +    /* match Length */
127963 +    {   U32 const mlBase = matchLength - MINMATCH;
127964 +        U32 const mlCode = ZSTD_MLcode(mlBase);
127965 +        optPtr->matchLengthFreq[mlCode]++;
127966 +        optPtr->matchLengthSum++;
127967 +    }
127971 +/* ZSTD_readMINMATCH() :
127972 + * function safe only for comparisons
127973 + * assumption : memPtr must be at least 4 bytes before end of buffer */
127974 +MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
127976 +    switch (length)
127977 +    {
127978 +    default :
127979 +    case 4 : return MEM_read32(memPtr);
127980 +    case 3 : if (MEM_isLittleEndian())
127981 +                return MEM_read32(memPtr)<<8;
127982 +             else
127983 +                return MEM_read32(memPtr)>>8;
127984 +    }
127988 +/* Update hashTable3 up to ip (excluded)
127989 +   Assumption : always within prefix (i.e. not within extDict) */
127990 +static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms,
127991 +                                              U32* nextToUpdate3,
127992 +                                              const BYTE* const ip)
127994 +    U32* const hashTable3 = ms->hashTable3;
127995 +    U32 const hashLog3 = ms->hashLog3;
127996 +    const BYTE* const base = ms->window.base;
127997 +    U32 idx = *nextToUpdate3;
127998 +    U32 const target = (U32)(ip - base);
127999 +    size_t const hash3 = ZSTD_hash3Ptr(ip, hashLog3);
128000 +    assert(hashLog3 > 0);
128002 +    while(idx < target) {
128003 +        hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx;
128004 +        idx++;
128005 +    }
128007 +    *nextToUpdate3 = target;
128008 +    return hashTable3[hash3];
128012 +/*-*************************************
128013 +*  Binary Tree search
128014 +***************************************/
128015 +/** ZSTD_insertBt1() : add one or multiple positions to tree.
128016 + *  ip : assumed <= iend-8 .
128017 + * @return : nb of positions added */
128018 +static U32 ZSTD_insertBt1(
128019 +                ZSTD_matchState_t* ms,
128020 +                const BYTE* const ip, const BYTE* const iend,
128021 +                U32 const mls, const int extDict)
128023 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
128024 +    U32*   const hashTable = ms->hashTable;
128025 +    U32    const hashLog = cParams->hashLog;
128026 +    size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);
128027 +    U32*   const bt = ms->chainTable;
128028 +    U32    const btLog  = cParams->chainLog - 1;
128029 +    U32    const btMask = (1 << btLog) - 1;
128030 +    U32 matchIndex = hashTable[h];
128031 +    size_t commonLengthSmaller=0, commonLengthLarger=0;
128032 +    const BYTE* const base = ms->window.base;
128033 +    const BYTE* const dictBase = ms->window.dictBase;
128034 +    const U32 dictLimit = ms->window.dictLimit;
128035 +    const BYTE* const dictEnd = dictBase + dictLimit;
128036 +    const BYTE* const prefixStart = base + dictLimit;
128037 +    const BYTE* match;
128038 +    const U32 curr = (U32)(ip-base);
128039 +    const U32 btLow = btMask >= curr ? 0 : curr - btMask;
128040 +    U32* smallerPtr = bt + 2*(curr&btMask);
128041 +    U32* largerPtr  = smallerPtr + 1;
128042 +    U32 dummy32;   /* to be nullified at the end */
128043 +    U32 const windowLow = ms->window.lowLimit;
128044 +    U32 matchEndIdx = curr+8+1;
128045 +    size_t bestLength = 8;
128046 +    U32 nbCompares = 1U << cParams->searchLog;
128047 +#ifdef ZSTD_C_PREDICT
128048 +    U32 predictedSmall = *(bt + 2*((curr-1)&btMask) + 0);
128049 +    U32 predictedLarge = *(bt + 2*((curr-1)&btMask) + 1);
128050 +    predictedSmall += (predictedSmall>0);
128051 +    predictedLarge += (predictedLarge>0);
128052 +#endif /* ZSTD_C_PREDICT */
128054 +    DEBUGLOG(8, "ZSTD_insertBt1 (%u)", curr);
128056 +    assert(ip <= iend-8);   /* required for h calculation */
128057 +    hashTable[h] = curr;   /* Update Hash Table */
128059 +    assert(windowLow > 0);
128060 +    while (nbCompares-- && (matchIndex >= windowLow)) {
128061 +        U32* const nextPtr = bt + 2*(matchIndex & btMask);
128062 +        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
128063 +        assert(matchIndex < curr);
128065 +#ifdef ZSTD_C_PREDICT   /* note : can create issues when hlog small <= 11 */
128066 +        const U32* predictPtr = bt + 2*((matchIndex-1) & btMask);   /* written this way, as bt is a roll buffer */
128067 +        if (matchIndex == predictedSmall) {
128068 +            /* no need to check length, result known */
128069 +            *smallerPtr = matchIndex;
128070 +            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
128071 +            smallerPtr = nextPtr+1;               /* new "smaller" => larger of match */
128072 +            matchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
128073 +            predictedSmall = predictPtr[1] + (predictPtr[1]>0);
128074 +            continue;
128075 +        }
128076 +        if (matchIndex == predictedLarge) {
128077 +            *largerPtr = matchIndex;
128078 +            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
128079 +            largerPtr = nextPtr;
128080 +            matchIndex = nextPtr[0];
128081 +            predictedLarge = predictPtr[0] + (predictPtr[0]>0);
128082 +            continue;
128083 +        }
128084 +#endif
128086 +        if (!extDict || (matchIndex+matchLength >= dictLimit)) {
128087 +            assert(matchIndex+matchLength >= dictLimit);   /* might be wrong if actually extDict */
128088 +            match = base + matchIndex;
128089 +            matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
128090 +        } else {
128091 +            match = dictBase + matchIndex;
128092 +            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
128093 +            if (matchIndex+matchLength >= dictLimit)
128094 +                match = base + matchIndex;   /* to prepare for next usage of match[matchLength] */
128095 +        }
128097 +        if (matchLength > bestLength) {
128098 +            bestLength = matchLength;
128099 +            if (matchLength > matchEndIdx - matchIndex)
128100 +                matchEndIdx = matchIndex + (U32)matchLength;
128101 +        }
128103 +        if (ip+matchLength == iend) {   /* equal : no way to know if inf or sup */
128104 +            break;   /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
128105 +        }
128107 +        if (match[matchLength] < ip[matchLength]) {  /* necessarily within buffer */
128108 +            /* match is smaller than current */
128109 +            *smallerPtr = matchIndex;             /* update smaller idx */
128110 +            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
128111 +            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop searching */
128112 +            smallerPtr = nextPtr+1;               /* new "candidate" => larger than match, which was smaller than target */
128113 +            matchIndex = nextPtr[1];              /* new matchIndex, larger than previous and closer to current */
128114 +        } else {
128115 +            /* match is larger than current */
128116 +            *largerPtr = matchIndex;
128117 +            commonLengthLarger = matchLength;
128118 +            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop searching */
128119 +            largerPtr = nextPtr;
128120 +            matchIndex = nextPtr[0];
128121 +    }   }
128123 +    *smallerPtr = *largerPtr = 0;
128124 +    {   U32 positions = 0;
128125 +        if (bestLength > 384) positions = MIN(192, (U32)(bestLength - 384));   /* speed optimization */
128126 +        assert(matchEndIdx > curr + 8);
128127 +        return MAX(positions, matchEndIdx - (curr + 8));
128128 +    }
128131 +FORCE_INLINE_TEMPLATE
128132 +void ZSTD_updateTree_internal(
128133 +                ZSTD_matchState_t* ms,
128134 +                const BYTE* const ip, const BYTE* const iend,
128135 +                const U32 mls, const ZSTD_dictMode_e dictMode)
128137 +    const BYTE* const base = ms->window.base;
128138 +    U32 const target = (U32)(ip - base);
128139 +    U32 idx = ms->nextToUpdate;
128140 +    DEBUGLOG(6, "ZSTD_updateTree_internal, from %u to %u  (dictMode:%u)",
128141 +                idx, target, dictMode);
128143 +    while(idx < target) {
128144 +        U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, mls, dictMode == ZSTD_extDict);
128145 +        assert(idx < (U32)(idx + forward));
128146 +        idx += forward;
128147 +    }
128148 +    assert((size_t)(ip - base) <= (size_t)(U32)(-1));
128149 +    assert((size_t)(iend - base) <= (size_t)(U32)(-1));
128150 +    ms->nextToUpdate = target;
128153 +void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) {
128154 +    ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict);
128157 +FORCE_INLINE_TEMPLATE
128158 +U32 ZSTD_insertBtAndGetAllMatches (
128159 +                    ZSTD_match_t* matches,   /* store result (found matches) in this table (presumed large enough) */
128160 +                    ZSTD_matchState_t* ms,
128161 +                    U32* nextToUpdate3,
128162 +                    const BYTE* const ip, const BYTE* const iLimit, const ZSTD_dictMode_e dictMode,
128163 +                    const U32 rep[ZSTD_REP_NUM],
128164 +                    U32 const ll0,   /* tells if associated literal length is 0 or not. This value must be 0 or 1 */
128165 +                    const U32 lengthToBeat,
128166 +                    U32 const mls /* template */)
128168 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
128169 +    U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
128170 +    const BYTE* const base = ms->window.base;
128171 +    U32 const curr = (U32)(ip-base);
128172 +    U32 const hashLog = cParams->hashLog;
128173 +    U32 const minMatch = (mls==3) ? 3 : 4;
128174 +    U32* const hashTable = ms->hashTable;
128175 +    size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);
128176 +    U32 matchIndex  = hashTable[h];
128177 +    U32* const bt   = ms->chainTable;
128178 +    U32 const btLog = cParams->chainLog - 1;
128179 +    U32 const btMask= (1U << btLog) - 1;
128180 +    size_t commonLengthSmaller=0, commonLengthLarger=0;
128181 +    const BYTE* const dictBase = ms->window.dictBase;
128182 +    U32 const dictLimit = ms->window.dictLimit;
128183 +    const BYTE* const dictEnd = dictBase + dictLimit;
128184 +    const BYTE* const prefixStart = base + dictLimit;
128185 +    U32 const btLow = (btMask >= curr) ? 0 : curr - btMask;
128186 +    U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog);
128187 +    U32 const matchLow = windowLow ? windowLow : 1;
128188 +    U32* smallerPtr = bt + 2*(curr&btMask);
128189 +    U32* largerPtr  = bt + 2*(curr&btMask) + 1;
128190 +    U32 matchEndIdx = curr+8+1;   /* farthest referenced position of any match => detects repetitive patterns */
128191 +    U32 dummy32;   /* to be nullified at the end */
128192 +    U32 mnum = 0;
128193 +    U32 nbCompares = 1U << cParams->searchLog;
128195 +    const ZSTD_matchState_t* dms    = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL;
128196 +    const ZSTD_compressionParameters* const dmsCParams =
128197 +                                      dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL;
128198 +    const BYTE* const dmsBase       = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL;
128199 +    const BYTE* const dmsEnd        = dictMode == ZSTD_dictMatchState ? dms->window.nextSrc : NULL;
128200 +    U32         const dmsHighLimit  = dictMode == ZSTD_dictMatchState ? (U32)(dmsEnd - dmsBase) : 0;
128201 +    U32         const dmsLowLimit   = dictMode == ZSTD_dictMatchState ? dms->window.lowLimit : 0;
128202 +    U32         const dmsIndexDelta = dictMode == ZSTD_dictMatchState ? windowLow - dmsHighLimit : 0;
128203 +    U32         const dmsHashLog    = dictMode == ZSTD_dictMatchState ? dmsCParams->hashLog : hashLog;
128204 +    U32         const dmsBtLog      = dictMode == ZSTD_dictMatchState ? dmsCParams->chainLog - 1 : btLog;
128205 +    U32         const dmsBtMask     = dictMode == ZSTD_dictMatchState ? (1U << dmsBtLog) - 1 : 0;
128206 +    U32         const dmsBtLow      = dictMode == ZSTD_dictMatchState && dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit;
128208 +    size_t bestLength = lengthToBeat-1;
128209 +    DEBUGLOG(8, "ZSTD_insertBtAndGetAllMatches: current=%u", curr);
128211 +    /* check repCode */
128212 +    assert(ll0 <= 1);   /* necessarily 1 or 0 */
128213 +    {   U32 const lastR = ZSTD_REP_NUM + ll0;
128214 +        U32 repCode;
128215 +        for (repCode = ll0; repCode < lastR; repCode++) {
128216 +            U32 const repOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
128217 +            U32 const repIndex = curr - repOffset;
128218 +            U32 repLen = 0;
128219 +            assert(curr >= dictLimit);
128220 +            if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < curr-dictLimit) {  /* equivalent to `curr > repIndex >= dictLimit` */
128221 +                /* We must validate the repcode offset because when we're using a dictionary the
128222 +                 * valid offset range shrinks when the dictionary goes out of bounds.
128223 +                 */
128224 +                if ((repIndex >= windowLow) & (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch))) {
128225 +                    repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch;
128226 +                }
128227 +            } else {  /* repIndex < dictLimit || repIndex >= curr */
128228 +                const BYTE* const repMatch = dictMode == ZSTD_dictMatchState ?
128229 +                                             dmsBase + repIndex - dmsIndexDelta :
128230 +                                             dictBase + repIndex;
128231 +                assert(curr >= windowLow);
128232 +                if ( dictMode == ZSTD_extDict
128233 +                  && ( ((repOffset-1) /*intentional overflow*/ < curr - windowLow)  /* equivalent to `curr > repIndex >= windowLow` */
128234 +                     & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */)
128235 +                  && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
128236 +                    repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch;
128237 +                }
128238 +                if (dictMode == ZSTD_dictMatchState
128239 +                  && ( ((repOffset-1) /*intentional overflow*/ < curr - (dmsLowLimit + dmsIndexDelta))  /* equivalent to `curr > repIndex >= dmsLowLimit` */
128240 +                     & ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */
128241 +                  && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
128242 +                    repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch;
128243 +            }   }
128244 +            /* save longer solution */
128245 +            if (repLen > bestLength) {
128246 +                DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u",
128247 +                            repCode, ll0, repOffset, repLen);
128248 +                bestLength = repLen;
128249 +                matches[mnum].off = repCode - ll0;
128250 +                matches[mnum].len = (U32)repLen;
128251 +                mnum++;
128252 +                if ( (repLen > sufficient_len)
128253 +                   | (ip+repLen == iLimit) ) {  /* best possible */
128254 +                    return mnum;
128255 +    }   }   }   }
128257 +    /* HC3 match finder */
128258 +    if ((mls == 3) /*static*/ && (bestLength < mls)) {
128259 +        U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, nextToUpdate3, ip);
128260 +        if ((matchIndex3 >= matchLow)
128261 +          & (curr - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) {
128262 +            size_t mlen;
128263 +            if ((dictMode == ZSTD_noDict) /*static*/ || (dictMode == ZSTD_dictMatchState) /*static*/ || (matchIndex3 >= dictLimit)) {
128264 +                const BYTE* const match = base + matchIndex3;
128265 +                mlen = ZSTD_count(ip, match, iLimit);
128266 +            } else {
128267 +                const BYTE* const match = dictBase + matchIndex3;
128268 +                mlen = ZSTD_count_2segments(ip, match, iLimit, dictEnd, prefixStart);
128269 +            }
128271 +            /* save best solution */
128272 +            if (mlen >= mls /* == 3 > bestLength */) {
128273 +                DEBUGLOG(8, "found small match with hlog3, of length %u",
128274 +                            (U32)mlen);
128275 +                bestLength = mlen;
128276 +                assert(curr > matchIndex3);
128277 +                assert(mnum==0);  /* no prior solution */
128278 +                matches[0].off = (curr - matchIndex3) + ZSTD_REP_MOVE;
128279 +                matches[0].len = (U32)mlen;
128280 +                mnum = 1;
128281 +                if ( (mlen > sufficient_len) |
128282 +                     (ip+mlen == iLimit) ) {  /* best possible length */
128283 +                    ms->nextToUpdate = curr+1;  /* skip insertion */
128284 +                    return 1;
128285 +        }   }   }
128286 +        /* no dictMatchState lookup: dicts don't have a populated HC3 table */
128287 +    }
128289 +    hashTable[h] = curr;   /* Update Hash Table */
128291 +    while (nbCompares-- && (matchIndex >= matchLow)) {
128292 +        U32* const nextPtr = bt + 2*(matchIndex & btMask);
128293 +        const BYTE* match;
128294 +        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
128295 +        assert(curr > matchIndex);
128297 +        if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) {
128298 +            assert(matchIndex+matchLength >= dictLimit);  /* ensure the condition is correct when !extDict */
128299 +            match = base + matchIndex;
128300 +            if (matchIndex >= dictLimit) assert(memcmp(match, ip, matchLength) == 0);  /* ensure early section of match is equal as expected */
128301 +            matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit);
128302 +        } else {
128303 +            match = dictBase + matchIndex;
128304 +            assert(memcmp(match, ip, matchLength) == 0);  /* ensure early section of match is equal as expected */
128305 +            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart);
128306 +            if (matchIndex+matchLength >= dictLimit)
128307 +                match = base + matchIndex;   /* prepare for match[matchLength] read */
128308 +        }
128310 +        if (matchLength > bestLength) {
128311 +            DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)",
128312 +                    (U32)matchLength, curr - matchIndex, curr - matchIndex + ZSTD_REP_MOVE);
128313 +            assert(matchEndIdx > matchIndex);
128314 +            if (matchLength > matchEndIdx - matchIndex)
128315 +                matchEndIdx = matchIndex + (U32)matchLength;
128316 +            bestLength = matchLength;
128317 +            matches[mnum].off = (curr - matchIndex) + ZSTD_REP_MOVE;
128318 +            matches[mnum].len = (U32)matchLength;
128319 +            mnum++;
128320 +            if ( (matchLength > ZSTD_OPT_NUM)
128321 +               | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
128322 +                if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */
128323 +                break; /* drop, to preserve bt consistency (miss a little bit of compression) */
128324 +            }
128325 +        }
128327 +        if (match[matchLength] < ip[matchLength]) {
128328 +            /* match smaller than current */
128329 +            *smallerPtr = matchIndex;             /* update smaller idx */
128330 +            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
128331 +            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
128332 +            smallerPtr = nextPtr+1;               /* new candidate => larger than match, which was smaller than current */
128333 +            matchIndex = nextPtr[1];              /* new matchIndex, larger than previous, closer to current */
128334 +        } else {
128335 +            *largerPtr = matchIndex;
128336 +            commonLengthLarger = matchLength;
128337 +            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
128338 +            largerPtr = nextPtr;
128339 +            matchIndex = nextPtr[0];
128340 +    }   }
128342 +    *smallerPtr = *largerPtr = 0;
128344 +    if (dictMode == ZSTD_dictMatchState && nbCompares) {
128345 +        size_t const dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls);
128346 +        U32 dictMatchIndex = dms->hashTable[dmsH];
128347 +        const U32* const dmsBt = dms->chainTable;
128348 +        commonLengthSmaller = commonLengthLarger = 0;
128349 +        while (nbCompares-- && (dictMatchIndex > dmsLowLimit)) {
128350 +            const U32* const nextPtr = dmsBt + 2*(dictMatchIndex & dmsBtMask);
128351 +            size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
128352 +            const BYTE* match = dmsBase + dictMatchIndex;
128353 +            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dmsEnd, prefixStart);
128354 +            if (dictMatchIndex+matchLength >= dmsHighLimit)
128355 +                match = base + dictMatchIndex + dmsIndexDelta;   /* to prepare for next usage of match[matchLength] */
128357 +            if (matchLength > bestLength) {
128358 +                matchIndex = dictMatchIndex + dmsIndexDelta;
128359 +                DEBUGLOG(8, "found dms match of length %u at distance %u (offCode=%u)",
128360 +                        (U32)matchLength, curr - matchIndex, curr - matchIndex + ZSTD_REP_MOVE);
128361 +                if (matchLength > matchEndIdx - matchIndex)
128362 +                    matchEndIdx = matchIndex + (U32)matchLength;
128363 +                bestLength = matchLength;
128364 +                matches[mnum].off = (curr - matchIndex) + ZSTD_REP_MOVE;
128365 +                matches[mnum].len = (U32)matchLength;
128366 +                mnum++;
128367 +                if ( (matchLength > ZSTD_OPT_NUM)
128368 +                   | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) {
128369 +                    break;   /* drop, to guarantee consistency (miss a little bit of compression) */
128370 +                }
128371 +            }
128373 +            if (dictMatchIndex <= dmsBtLow) { break; }   /* beyond tree size, stop the search */
128374 +            if (match[matchLength] < ip[matchLength]) {
128375 +                commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
128376 +                dictMatchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
128377 +            } else {
128378 +                /* match is larger than current */
128379 +                commonLengthLarger = matchLength;
128380 +                dictMatchIndex = nextPtr[0];
128381 +            }
128382 +        }
128383 +    }
128385 +    assert(matchEndIdx > curr+8);
128386 +    ms->nextToUpdate = matchEndIdx - 8;  /* skip repetitive patterns */
128387 +    return mnum;
128391 +FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches (
128392 +                        ZSTD_match_t* matches,   /* store result (match found, increasing size) in this table */
128393 +                        ZSTD_matchState_t* ms,
128394 +                        U32* nextToUpdate3,
128395 +                        const BYTE* ip, const BYTE* const iHighLimit, const ZSTD_dictMode_e dictMode,
128396 +                        const U32 rep[ZSTD_REP_NUM],
128397 +                        U32 const ll0,
128398 +                        U32 const lengthToBeat)
128400 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
128401 +    U32 const matchLengthSearch = cParams->minMatch;
128402 +    DEBUGLOG(8, "ZSTD_BtGetAllMatches");
128403 +    if (ip < ms->window.base + ms->nextToUpdate) return 0;   /* skipped area */
128404 +    ZSTD_updateTree_internal(ms, ip, iHighLimit, matchLengthSearch, dictMode);
128405 +    switch(matchLengthSearch)
128406 +    {
128407 +    case 3 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 3);
128408 +    default :
128409 +    case 4 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 4);
128410 +    case 5 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 5);
128411 +    case 7 :
128412 +    case 6 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 6);
128413 +    }
128416 +/*************************
128417 +*  LDM helper functions  *
128418 +*************************/
128420 +/* Struct containing info needed to make decision about ldm inclusion */
128421 +typedef struct {
128422 +    rawSeqStore_t seqStore;         /* External match candidates store for this block */
128423 +    U32 startPosInBlock;            /* Start position of the current match candidate */
128424 +    U32 endPosInBlock;              /* End position of the current match candidate */
128425 +    U32 offset;                     /* Offset of the match candidate */
128426 +} ZSTD_optLdm_t;
128428 +/* ZSTD_optLdm_skipRawSeqStoreBytes():
128429 + * Moves forward in rawSeqStore by nbBytes, which will update the fields 'pos' and 'posInSequence'.
128430 + */
128431 +static void ZSTD_optLdm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
128432 +    U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
128433 +    while (currPos && rawSeqStore->pos < rawSeqStore->size) {
128434 +        rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
128435 +        if (currPos >= currSeq.litLength + currSeq.matchLength) {
128436 +            currPos -= currSeq.litLength + currSeq.matchLength;
128437 +            rawSeqStore->pos++;
128438 +        } else {
128439 +            rawSeqStore->posInSequence = currPos;
128440 +            break;
128441 +        }
128442 +    }
128443 +    if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
128444 +        rawSeqStore->posInSequence = 0;
128445 +    }
128448 +/* ZSTD_opt_getNextMatchAndUpdateSeqStore():
128449 + * Calculates the beginning and end of the next match in the current block.
128450 + * Updates 'pos' and 'posInSequence' of the ldmSeqStore.
128451 + */
128452 +static void ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 currPosInBlock,
128453 +                                                   U32 blockBytesRemaining) {
128454 +    rawSeq currSeq;
128455 +    U32 currBlockEndPos;
128456 +    U32 literalsBytesRemaining;
128457 +    U32 matchBytesRemaining;
128459 +    /* Setting match end position to MAX to ensure we never use an LDM during this block */
128460 +    if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) {
128461 +        optLdm->startPosInBlock = UINT_MAX;
128462 +        optLdm->endPosInBlock = UINT_MAX;
128463 +        return;
128464 +    }
128465 +    /* Calculate appropriate bytes left in matchLength and litLength after adjusting
128466 +       based on ldmSeqStore->posInSequence */
128467 +    currSeq = optLdm->seqStore.seq[optLdm->seqStore.pos];
128468 +    assert(optLdm->seqStore.posInSequence <= currSeq.litLength + currSeq.matchLength);
128469 +    currBlockEndPos = currPosInBlock + blockBytesRemaining;
128470 +    literalsBytesRemaining = (optLdm->seqStore.posInSequence < currSeq.litLength) ?
128471 +            currSeq.litLength - (U32)optLdm->seqStore.posInSequence :
128472 +            0;
128473 +    matchBytesRemaining = (literalsBytesRemaining == 0) ?
128474 +            currSeq.matchLength - ((U32)optLdm->seqStore.posInSequence - currSeq.litLength) :
128475 +            currSeq.matchLength;
128477 +    /* If there are more literal bytes than bytes remaining in block, no ldm is possible */
128478 +    if (literalsBytesRemaining >= blockBytesRemaining) {
128479 +        optLdm->startPosInBlock = UINT_MAX;
128480 +        optLdm->endPosInBlock = UINT_MAX;
128481 +        ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, blockBytesRemaining);
128482 +        return;
128483 +    }
128485 +    /* Matches may be < MINMATCH by this process. In that case, we will reject them
128486 +       when we are deciding whether or not to add the ldm */
128487 +    optLdm->startPosInBlock = currPosInBlock + literalsBytesRemaining;
128488 +    optLdm->endPosInBlock = optLdm->startPosInBlock + matchBytesRemaining;
128489 +    optLdm->offset = currSeq.offset;
128491 +    if (optLdm->endPosInBlock > currBlockEndPos) {
128492 +        /* Match ends after the block ends, we can't use the whole match */
128493 +        optLdm->endPosInBlock = currBlockEndPos;
128494 +        ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, currBlockEndPos - currPosInBlock);
128495 +    } else {
128496 +        /* Consume nb of bytes equal to size of sequence left */
128497 +        ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, literalsBytesRemaining + matchBytesRemaining);
128498 +    }
128501 +/* ZSTD_optLdm_maybeAddMatch():
128502 + * Adds a match if it's long enough, based on it's 'matchStartPosInBlock'
128503 + * and 'matchEndPosInBlock', into 'matches'. Maintains the correct ordering of 'matches'
128504 + */
128505 +static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches,
128506 +                                      ZSTD_optLdm_t* optLdm, U32 currPosInBlock) {
128507 +    U32 posDiff = currPosInBlock - optLdm->startPosInBlock;
128508 +    /* Note: ZSTD_match_t actually contains offCode and matchLength (before subtracting MINMATCH) */
128509 +    U32 candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff;
128510 +    U32 candidateOffCode = optLdm->offset + ZSTD_REP_MOVE;
128512 +    /* Ensure that current block position is not outside of the match */
128513 +    if (currPosInBlock < optLdm->startPosInBlock
128514 +      || currPosInBlock >= optLdm->endPosInBlock
128515 +      || candidateMatchLength < MINMATCH) {
128516 +        return;
128517 +    }
128519 +    if (*nbMatches == 0 || ((candidateMatchLength > matches[*nbMatches-1].len) && *nbMatches < ZSTD_OPT_NUM)) {
128520 +        DEBUGLOG(6, "ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offCode: %u matchLength %u) at block position=%u",
128521 +                 candidateOffCode, candidateMatchLength, currPosInBlock);
128522 +        matches[*nbMatches].len = candidateMatchLength;
128523 +        matches[*nbMatches].off = candidateOffCode;
128524 +        (*nbMatches)++;
128525 +    }
128528 +/* ZSTD_optLdm_processMatchCandidate():
128529 + * Wrapper function to update ldm seq store and call ldm functions as necessary.
128530 + */
128531 +static void ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm, ZSTD_match_t* matches, U32* nbMatches,
128532 +                                              U32 currPosInBlock, U32 remainingBytes) {
128533 +    if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) {
128534 +        return;
128535 +    }
128537 +    if (currPosInBlock >= optLdm->endPosInBlock) {
128538 +        if (currPosInBlock > optLdm->endPosInBlock) {
128539 +            /* The position at which ZSTD_optLdm_processMatchCandidate() is called is not necessarily
128540 +             * at the end of a match from the ldm seq store, and will often be some bytes
128541 +             * over beyond matchEndPosInBlock. As such, we need to correct for these "overshoots"
128542 +             */
128543 +            U32 posOvershoot = currPosInBlock - optLdm->endPosInBlock;
128544 +            ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, posOvershoot);
128545 +        }
128546 +        ZSTD_opt_getNextMatchAndUpdateSeqStore(optLdm, currPosInBlock, remainingBytes);
128547 +    }
128548 +    ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock);
128551 +/*-*******************************
128552 +*  Optimal parser
128553 +*********************************/
128556 +static U32 ZSTD_totalLen(ZSTD_optimal_t sol)
128558 +    return sol.litlen + sol.mlen;
128561 +#if 0 /* debug */
128563 +static void
128564 +listStats(const U32* table, int lastEltID)
128566 +    int const nbElts = lastEltID + 1;
128567 +    int enb;
128568 +    for (enb=0; enb < nbElts; enb++) {
128569 +        (void)table;
128570 +        /* RAWLOG(2, "%3i:%3i,  ", enb, table[enb]); */
128571 +        RAWLOG(2, "%4i,", table[enb]);
128572 +    }
128573 +    RAWLOG(2, " \n");
128576 +#endif
128578 +FORCE_INLINE_TEMPLATE size_t
128579 +ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
128580 +                               seqStore_t* seqStore,
128581 +                               U32 rep[ZSTD_REP_NUM],
128582 +                         const void* src, size_t srcSize,
128583 +                         const int optLevel,
128584 +                         const ZSTD_dictMode_e dictMode)
128586 +    optState_t* const optStatePtr = &ms->opt;
128587 +    const BYTE* const istart = (const BYTE*)src;
128588 +    const BYTE* ip = istart;
128589 +    const BYTE* anchor = istart;
128590 +    const BYTE* const iend = istart + srcSize;
128591 +    const BYTE* const ilimit = iend - 8;
128592 +    const BYTE* const base = ms->window.base;
128593 +    const BYTE* const prefixStart = base + ms->window.dictLimit;
128594 +    const ZSTD_compressionParameters* const cParams = &ms->cParams;
128596 +    U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
128597 +    U32 const minMatch = (cParams->minMatch == 3) ? 3 : 4;
128598 +    U32 nextToUpdate3 = ms->nextToUpdate;
128600 +    ZSTD_optimal_t* const opt = optStatePtr->priceTable;
128601 +    ZSTD_match_t* const matches = optStatePtr->matchTable;
128602 +    ZSTD_optimal_t lastSequence;
128603 +    ZSTD_optLdm_t optLdm;
128605 +    optLdm.seqStore = ms->ldmSeqStore ? *ms->ldmSeqStore : kNullRawSeqStore;
128606 +    optLdm.endPosInBlock = optLdm.startPosInBlock = optLdm.offset = 0;
128607 +    ZSTD_opt_getNextMatchAndUpdateSeqStore(&optLdm, (U32)(ip-istart), (U32)(iend-ip));
128609 +    /* init */
128610 +    DEBUGLOG(5, "ZSTD_compressBlock_opt_generic: current=%u, prefix=%u, nextToUpdate=%u",
128611 +                (U32)(ip - base), ms->window.dictLimit, ms->nextToUpdate);
128612 +    assert(optLevel <= 2);
128613 +    ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize, optLevel);
128614 +    ip += (ip==prefixStart);
128616 +    /* Match Loop */
128617 +    while (ip < ilimit) {
128618 +        U32 cur, last_pos = 0;
128620 +        /* find first match */
128621 +        {   U32 const litlen = (U32)(ip - anchor);
128622 +            U32 const ll0 = !litlen;
128623 +            U32 nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, ip, iend, dictMode, rep, ll0, minMatch);
128624 +            ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
128625 +                                              (U32)(ip-istart), (U32)(iend - ip));
128626 +            if (!nbMatches) { ip++; continue; }
128628 +            /* initialize opt[0] */
128629 +            { U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
128630 +            opt[0].mlen = 0;  /* means is_a_literal */
128631 +            opt[0].litlen = litlen;
128632 +            /* We don't need to include the actual price of the literals because
128633 +             * it is static for the duration of the forward pass, and is included
128634 +             * in every price. We include the literal length to avoid negative
128635 +             * prices when we subtract the previous literal length.
128636 +             */
128637 +            opt[0].price = ZSTD_litLengthPrice(litlen, optStatePtr, optLevel);
128639 +            /* large match -> immediate encoding */
128640 +            {   U32 const maxML = matches[nbMatches-1].len;
128641 +                U32 const maxOffset = matches[nbMatches-1].off;
128642 +                DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series",
128643 +                            nbMatches, maxML, maxOffset, (U32)(ip-prefixStart));
128645 +                if (maxML > sufficient_len) {
128646 +                    lastSequence.litlen = litlen;
128647 +                    lastSequence.mlen = maxML;
128648 +                    lastSequence.off = maxOffset;
128649 +                    DEBUGLOG(6, "large match (%u>%u), immediate encoding",
128650 +                                maxML, sufficient_len);
128651 +                    cur = 0;
128652 +                    last_pos = ZSTD_totalLen(lastSequence);
128653 +                    goto _shortestPath;
128654 +            }   }
128656 +            /* set prices for first matches starting position == 0 */
128657 +            {   U32 const literalsPrice = opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
128658 +                U32 pos;
128659 +                U32 matchNb;
128660 +                for (pos = 1; pos < minMatch; pos++) {
128661 +                    opt[pos].price = ZSTD_MAX_PRICE;   /* mlen, litlen and price will be fixed during forward scanning */
128662 +                }
128663 +                for (matchNb = 0; matchNb < nbMatches; matchNb++) {
128664 +                    U32 const offset = matches[matchNb].off;
128665 +                    U32 const end = matches[matchNb].len;
128666 +                    for ( ; pos <= end ; pos++ ) {
128667 +                        U32 const matchPrice = ZSTD_getMatchPrice(offset, pos, optStatePtr, optLevel);
128668 +                        U32 const sequencePrice = literalsPrice + matchPrice;
128669 +                        DEBUGLOG(7, "rPos:%u => set initial price : %.2f",
128670 +                                    pos, ZSTD_fCost(sequencePrice));
128671 +                        opt[pos].mlen = pos;
128672 +                        opt[pos].off = offset;
128673 +                        opt[pos].litlen = litlen;
128674 +                        opt[pos].price = sequencePrice;
128675 +                }   }
128676 +                last_pos = pos-1;
128677 +            }
128678 +        }
128680 +        /* check further positions */
128681 +        for (cur = 1; cur <= last_pos; cur++) {
128682 +            const BYTE* const inr = ip + cur;
128683 +            assert(cur < ZSTD_OPT_NUM);
128684 +            DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur)
128686 +            /* Fix current position with one literal if cheaper */
128687 +            {   U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1;
128688 +                int const price = opt[cur-1].price
128689 +                                + ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel)
128690 +                                + ZSTD_litLengthPrice(litlen, optStatePtr, optLevel)
128691 +                                - ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel);
128692 +                assert(price < 1000000000); /* overflow check */
128693 +                if (price <= opt[cur].price) {
128694 +                    DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)",
128695 +                                inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen,
128696 +                                opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]);
128697 +                    opt[cur].mlen = 0;
128698 +                    opt[cur].off = 0;
128699 +                    opt[cur].litlen = litlen;
128700 +                    opt[cur].price = price;
128701 +                } else {
128702 +                    DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u)",
128703 +                                inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price),
128704 +                                opt[cur].rep[0], opt[cur].rep[1], opt[cur].rep[2]);
128705 +                }
128706 +            }
128708 +            /* Set the repcodes of the current position. We must do it here
128709 +             * because we rely on the repcodes of the 2nd to last sequence being
128710 +             * correct to set the next chunks repcodes during the backward
128711 +             * traversal.
128712 +             */
128713 +            ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(repcodes_t));
128714 +            assert(cur >= opt[cur].mlen);
128715 +            if (opt[cur].mlen != 0) {
128716 +                U32 const prev = cur - opt[cur].mlen;
128717 +                repcodes_t newReps = ZSTD_updateRep(opt[prev].rep, opt[cur].off, opt[cur].litlen==0);
128718 +                ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(repcodes_t));
128719 +            } else {
128720 +                ZSTD_memcpy(opt[cur].rep, opt[cur - 1].rep, sizeof(repcodes_t));
128721 +            }
128723 +            /* last match must start at a minimum distance of 8 from oend */
128724 +            if (inr > ilimit) continue;
128726 +            if (cur == last_pos) break;
128728 +            if ( (optLevel==0) /*static_test*/
128729 +              && (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) {
128730 +                DEBUGLOG(7, "move to next rPos:%u : price is <=", cur+1);
128731 +                continue;  /* skip unpromising positions; about ~+6% speed, -0.01 ratio */
128732 +            }
128734 +            {   U32 const ll0 = (opt[cur].mlen != 0);
128735 +                U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0;
128736 +                U32 const previousPrice = opt[cur].price;
128737 +                U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel);
128738 +                U32 nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, inr, iend, dictMode, opt[cur].rep, ll0, minMatch);
128739 +                U32 matchNb;
128741 +                ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches,
128742 +                                                  (U32)(inr-istart), (U32)(iend-inr));
128744 +                if (!nbMatches) {
128745 +                    DEBUGLOG(7, "rPos:%u : no match found", cur);
128746 +                    continue;
128747 +                }
128749 +                {   U32 const maxML = matches[nbMatches-1].len;
128750 +                    DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of maxLength=%u",
128751 +                                inr-istart, cur, nbMatches, maxML);
128753 +                    if ( (maxML > sufficient_len)
128754 +                      || (cur + maxML >= ZSTD_OPT_NUM) ) {
128755 +                        lastSequence.mlen = maxML;
128756 +                        lastSequence.off = matches[nbMatches-1].off;
128757 +                        lastSequence.litlen = litlen;
128758 +                        cur -= (opt[cur].mlen==0) ? opt[cur].litlen : 0;  /* last sequence is actually only literals, fix cur to last match - note : may underflow, in which case, it's first sequence, and it's okay */
128759 +                        last_pos = cur + ZSTD_totalLen(lastSequence);
128760 +                        if (cur > ZSTD_OPT_NUM) cur = 0;   /* underflow => first match */
128761 +                        goto _shortestPath;
128762 +                }   }
128764 +                /* set prices using matches found at position == cur */
128765 +                for (matchNb = 0; matchNb < nbMatches; matchNb++) {
128766 +                    U32 const offset = matches[matchNb].off;
128767 +                    U32 const lastML = matches[matchNb].len;
128768 +                    U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch;
128769 +                    U32 mlen;
128771 +                    DEBUGLOG(7, "testing match %u => offCode=%4u, mlen=%2u, llen=%2u",
128772 +                                matchNb, matches[matchNb].off, lastML, litlen);
128774 +                    for (mlen = lastML; mlen >= startML; mlen--) {  /* scan downward */
128775 +                        U32 const pos = cur + mlen;
128776 +                        int const price = basePrice + ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel);
128778 +                        if ((pos > last_pos) || (price < opt[pos].price)) {
128779 +                            DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)",
128780 +                                        pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
128781 +                            while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; }   /* fill empty positions */
128782 +                            opt[pos].mlen = mlen;
128783 +                            opt[pos].off = offset;
128784 +                            opt[pos].litlen = litlen;
128785 +                            opt[pos].price = price;
128786 +                        } else {
128787 +                            DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)",
128788 +                                        pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price));
128789 +                            if (optLevel==0) break;  /* early update abort; gets ~+10% speed for about -0.01 ratio loss */
128790 +                        }
128791 +            }   }   }
128792 +        }  /* for (cur = 1; cur <= last_pos; cur++) */
128794 +        lastSequence = opt[last_pos];
128795 +        cur = last_pos > ZSTD_totalLen(lastSequence) ? last_pos - ZSTD_totalLen(lastSequence) : 0;  /* single sequence, and it starts before `ip` */
128796 +        assert(cur < ZSTD_OPT_NUM);  /* control overflow*/
128798 +_shortestPath:   /* cur, last_pos, best_mlen, best_off have to be set */
128799 +        assert(opt[0].mlen == 0);
128801 +        /* Set the next chunk's repcodes based on the repcodes of the beginning
128802 +         * of the last match, and the last sequence. This avoids us having to
128803 +         * update them while traversing the sequences.
128804 +         */
128805 +        if (lastSequence.mlen != 0) {
128806 +            repcodes_t reps = ZSTD_updateRep(opt[cur].rep, lastSequence.off, lastSequence.litlen==0);
128807 +            ZSTD_memcpy(rep, &reps, sizeof(reps));
128808 +        } else {
128809 +            ZSTD_memcpy(rep, opt[cur].rep, sizeof(repcodes_t));
128810 +        }
128812 +        {   U32 const storeEnd = cur + 1;
128813 +            U32 storeStart = storeEnd;
128814 +            U32 seqPos = cur;
128816 +            DEBUGLOG(6, "start reverse traversal (last_pos:%u, cur:%u)",
128817 +                        last_pos, cur); (void)last_pos;
128818 +            assert(storeEnd < ZSTD_OPT_NUM);
128819 +            DEBUGLOG(6, "last sequence copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
128820 +                        storeEnd, lastSequence.litlen, lastSequence.mlen, lastSequence.off);
128821 +            opt[storeEnd] = lastSequence;
128822 +            while (seqPos > 0) {
128823 +                U32 const backDist = ZSTD_totalLen(opt[seqPos]);
128824 +                storeStart--;
128825 +                DEBUGLOG(6, "sequence from rPos=%u copied into pos=%u (llen=%u,mlen=%u,ofc=%u)",
128826 +                            seqPos, storeStart, opt[seqPos].litlen, opt[seqPos].mlen, opt[seqPos].off);
128827 +                opt[storeStart] = opt[seqPos];
128828 +                seqPos = (seqPos > backDist) ? seqPos - backDist : 0;
128829 +            }
128831 +            /* save sequences */
128832 +            DEBUGLOG(6, "sending selected sequences into seqStore")
128833 +            {   U32 storePos;
128834 +                for (storePos=storeStart; storePos <= storeEnd; storePos++) {
128835 +                    U32 const llen = opt[storePos].litlen;
128836 +                    U32 const mlen = opt[storePos].mlen;
128837 +                    U32 const offCode = opt[storePos].off;
128838 +                    U32 const advance = llen + mlen;
128839 +                    DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u",
128840 +                                anchor - istart, (unsigned)llen, (unsigned)mlen);
128842 +                    if (mlen==0) {  /* only literals => must be last "sequence", actually starting a new stream of sequences */
128843 +                        assert(storePos == storeEnd);   /* must be last sequence */
128844 +                        ip = anchor + llen;     /* last "sequence" is a bunch of literals => don't progress anchor */
128845 +                        continue;   /* will finish */
128846 +                    }
128848 +                    assert(anchor + llen <= iend);
128849 +                    ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen);
128850 +                    ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen-MINMATCH);
128851 +                    anchor += advance;
128852 +                    ip = anchor;
128853 +            }   }
128854 +            ZSTD_setBasePrices(optStatePtr, optLevel);
128855 +        }
128856 +    }   /* while (ip < ilimit) */
128858 +    /* Return the last literals size */
128859 +    return (size_t)(iend - anchor);
128863 +size_t ZSTD_compressBlock_btopt(
128864 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
128865 +        const void* src, size_t srcSize)
128867 +    DEBUGLOG(5, "ZSTD_compressBlock_btopt");
128868 +    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_noDict);
128872 +/* used in 2-pass strategy */
128873 +static U32 ZSTD_upscaleStat(unsigned* table, U32 lastEltIndex, int bonus)
128875 +    U32 s, sum=0;
128876 +    assert(ZSTD_FREQ_DIV+bonus >= 0);
128877 +    for (s=0; s<lastEltIndex+1; s++) {
128878 +        table[s] <<= ZSTD_FREQ_DIV+bonus;
128879 +        table[s]--;
128880 +        sum += table[s];
128881 +    }
128882 +    return sum;
128885 +/* used in 2-pass strategy */
128886 +MEM_STATIC void ZSTD_upscaleStats(optState_t* optPtr)
128888 +    if (ZSTD_compressedLiterals(optPtr))
128889 +        optPtr->litSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0);
128890 +    optPtr->litLengthSum = ZSTD_upscaleStat(optPtr->litLengthFreq, MaxLL, 0);
128891 +    optPtr->matchLengthSum = ZSTD_upscaleStat(optPtr->matchLengthFreq, MaxML, 0);
128892 +    optPtr->offCodeSum = ZSTD_upscaleStat(optPtr->offCodeFreq, MaxOff, 0);
128895 +/* ZSTD_initStats_ultra():
128896 + * make a first compression pass, just to seed stats with more accurate starting values.
128897 + * only works on first block, with no dictionary and no ldm.
128898 + * this function cannot error, hence its contract must be respected.
128899 + */
128900 +static void
128901 +ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
128902 +                     seqStore_t* seqStore,
128903 +                     U32 rep[ZSTD_REP_NUM],
128904 +               const void* src, size_t srcSize)
128906 +    U32 tmpRep[ZSTD_REP_NUM];  /* updated rep codes will sink here */
128907 +    ZSTD_memcpy(tmpRep, rep, sizeof(tmpRep));
128909 +    DEBUGLOG(4, "ZSTD_initStats_ultra (srcSize=%zu)", srcSize);
128910 +    assert(ms->opt.litLengthSum == 0);    /* first block */
128911 +    assert(seqStore->sequences == seqStore->sequencesStart);   /* no ldm */
128912 +    assert(ms->window.dictLimit == ms->window.lowLimit);   /* no dictionary */
128913 +    assert(ms->window.dictLimit - ms->nextToUpdate <= 1);  /* no prefix (note: intentional overflow, defined as 2-complement) */
128915 +    ZSTD_compressBlock_opt_generic(ms, seqStore, tmpRep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);   /* generate stats into ms->opt*/
128917 +    /* invalidate first scan from history */
128918 +    ZSTD_resetSeqStore(seqStore);
128919 +    ms->window.base -= srcSize;
128920 +    ms->window.dictLimit += (U32)srcSize;
128921 +    ms->window.lowLimit = ms->window.dictLimit;
128922 +    ms->nextToUpdate = ms->window.dictLimit;
128924 +    /* re-inforce weight of collected statistics */
128925 +    ZSTD_upscaleStats(&ms->opt);
128928 +size_t ZSTD_compressBlock_btultra(
128929 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
128930 +        const void* src, size_t srcSize)
128932 +    DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize);
128933 +    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);
128936 +size_t ZSTD_compressBlock_btultra2(
128937 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
128938 +        const void* src, size_t srcSize)
128940 +    U32 const curr = (U32)((const BYTE*)src - ms->window.base);
128941 +    DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize);
128943 +    /* 2-pass strategy:
128944 +     * this strategy makes a first pass over first block to collect statistics
128945 +     * and seed next round's statistics with it.
128946 +     * After 1st pass, function forgets everything, and starts a new block.
128947 +     * Consequently, this can only work if no data has been previously loaded in tables,
128948 +     * aka, no dictionary, no prefix, no ldm preprocessing.
128949 +     * The compression ratio gain is generally small (~0.5% on first block),
128950 +     * the cost is 2x cpu time on first block. */
128951 +    assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
128952 +    if ( (ms->opt.litLengthSum==0)   /* first block */
128953 +      && (seqStore->sequences == seqStore->sequencesStart)  /* no ldm */
128954 +      && (ms->window.dictLimit == ms->window.lowLimit)   /* no dictionary */
128955 +      && (curr == ms->window.dictLimit)   /* start of frame, nothing already loaded nor skipped */
128956 +      && (srcSize > ZSTD_PREDEF_THRESHOLD)
128957 +      ) {
128958 +        ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize);
128959 +    }
128961 +    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict);
128964 +size_t ZSTD_compressBlock_btopt_dictMatchState(
128965 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
128966 +        const void* src, size_t srcSize)
128968 +    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_dictMatchState);
128971 +size_t ZSTD_compressBlock_btultra_dictMatchState(
128972 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
128973 +        const void* src, size_t srcSize)
128975 +    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_dictMatchState);
128978 +size_t ZSTD_compressBlock_btopt_extDict(
128979 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
128980 +        const void* src, size_t srcSize)
128982 +    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_extDict);
128985 +size_t ZSTD_compressBlock_btultra_extDict(
128986 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
128987 +        const void* src, size_t srcSize)
128989 +    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_extDict);
128992 +/* note : no btultra2 variant for extDict nor dictMatchState,
128993 + * because btultra2 is not meant to work with dictionaries
128994 + * and is only specific for the first block (no prefix) */
128995 diff --git a/lib/zstd/compress/zstd_opt.h b/lib/zstd/compress/zstd_opt.h
128996 new file mode 100644
128997 index 000000000000..22b862858ba7
128998 --- /dev/null
128999 +++ b/lib/zstd/compress/zstd_opt.h
129000 @@ -0,0 +1,50 @@
129002 + * Copyright (c) Yann Collet, Facebook, Inc.
129003 + * All rights reserved.
129005 + * This source code is licensed under both the BSD-style license (found in the
129006 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
129007 + * in the COPYING file in the root directory of this source tree).
129008 + * You may select, at your option, one of the above-listed licenses.
129009 + */
129011 +#ifndef ZSTD_OPT_H
129012 +#define ZSTD_OPT_H
129015 +#include "zstd_compress_internal.h"
129017 +/* used in ZSTD_loadDictionaryContent() */
129018 +void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend);
129020 +size_t ZSTD_compressBlock_btopt(
129021 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129022 +        void const* src, size_t srcSize);
129023 +size_t ZSTD_compressBlock_btultra(
129024 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129025 +        void const* src, size_t srcSize);
129026 +size_t ZSTD_compressBlock_btultra2(
129027 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129028 +        void const* src, size_t srcSize);
129031 +size_t ZSTD_compressBlock_btopt_dictMatchState(
129032 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129033 +        void const* src, size_t srcSize);
129034 +size_t ZSTD_compressBlock_btultra_dictMatchState(
129035 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129036 +        void const* src, size_t srcSize);
129038 +size_t ZSTD_compressBlock_btopt_extDict(
129039 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129040 +        void const* src, size_t srcSize);
129041 +size_t ZSTD_compressBlock_btultra_extDict(
129042 +        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
129043 +        void const* src, size_t srcSize);
129045 +        /* note : no btultra2 variant for extDict nor dictMatchState,
129046 +         * because btultra2 is not meant to work with dictionaries
129047 +         * and is only specific for the first block (no prefix) */
129050 +#endif /* ZSTD_OPT_H */
129051 diff --git a/lib/zstd/decompress.c b/lib/zstd/decompress.c
129052 deleted file mode 100644
129053 index 66cd487a326a..000000000000
129054 --- a/lib/zstd/decompress.c
129055 +++ /dev/null
129056 @@ -1,2531 +0,0 @@
129058 - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
129059 - * All rights reserved.
129061 - * This source code is licensed under the BSD-style license found in the
129062 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
129063 - * An additional grant of patent rights can be found in the PATENTS file in the
129064 - * same directory.
129066 - * This program is free software; you can redistribute it and/or modify it under
129067 - * the terms of the GNU General Public License version 2 as published by the
129068 - * Free Software Foundation. This program is dual-licensed; you may select
129069 - * either version 2 of the GNU General Public License ("GPL") or BSD license
129070 - * ("BSD").
129071 - */
129073 -/* ***************************************************************
129074 -*  Tuning parameters
129075 -*****************************************************************/
129077 -*  MAXWINDOWSIZE_DEFAULT :
129078 -*  maximum window size accepted by DStream, by default.
129079 -*  Frames requiring more memory will be rejected.
129081 -#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT
129082 -#define ZSTD_MAXWINDOWSIZE_DEFAULT ((1 << ZSTD_WINDOWLOG_MAX) + 1) /* defined within zstd.h */
129083 -#endif
129085 -/*-*******************************************************
129086 -*  Dependencies
129087 -*********************************************************/
129088 -#include "fse.h"
129089 -#include "huf.h"
129090 -#include "mem.h" /* low level memory routines */
129091 -#include "zstd_internal.h"
129092 -#include <linux/kernel.h>
129093 -#include <linux/module.h>
129094 -#include <linux/string.h> /* memcpy, memmove, memset */
129096 -#define ZSTD_PREFETCH(ptr) __builtin_prefetch(ptr, 0, 0)
129098 -/*-*************************************
129099 -*  Macros
129100 -***************************************/
129101 -#define ZSTD_isError ERR_isError /* for inlining */
129102 -#define FSE_isError ERR_isError
129103 -#define HUF_isError ERR_isError
129105 -/*_*******************************************************
129106 -*  Memory operations
129107 -**********************************************************/
129108 -static void ZSTD_copy4(void *dst, const void *src) { memcpy(dst, src, 4); }
129110 -/*-*************************************************************
129111 -*   Context management
129112 -***************************************************************/
129113 -typedef enum {
129114 -       ZSTDds_getFrameHeaderSize,
129115 -       ZSTDds_decodeFrameHeader,
129116 -       ZSTDds_decodeBlockHeader,
129117 -       ZSTDds_decompressBlock,
129118 -       ZSTDds_decompressLastBlock,
129119 -       ZSTDds_checkChecksum,
129120 -       ZSTDds_decodeSkippableHeader,
129121 -       ZSTDds_skipFrame
129122 -} ZSTD_dStage;
129124 -typedef struct {
129125 -       FSE_DTable LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];
129126 -       FSE_DTable OFTable[FSE_DTABLE_SIZE_U32(OffFSELog)];
129127 -       FSE_DTable MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];
129128 -       HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */
129129 -       U64 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32 / 2];
129130 -       U32 rep[ZSTD_REP_NUM];
129131 -} ZSTD_entropyTables_t;
129133 -struct ZSTD_DCtx_s {
129134 -       const FSE_DTable *LLTptr;
129135 -       const FSE_DTable *MLTptr;
129136 -       const FSE_DTable *OFTptr;
129137 -       const HUF_DTable *HUFptr;
129138 -       ZSTD_entropyTables_t entropy;
129139 -       const void *previousDstEnd; /* detect continuity */
129140 -       const void *base;          /* start of curr segment */
129141 -       const void *vBase;        /* virtual start of previous segment if it was just before curr one */
129142 -       const void *dictEnd;    /* end of previous segment */
129143 -       size_t expected;
129144 -       ZSTD_frameParams fParams;
129145 -       blockType_e bType; /* used in ZSTD_decompressContinue(), to transfer blockType between header decoding and block decoding stages */
129146 -       ZSTD_dStage stage;
129147 -       U32 litEntropy;
129148 -       U32 fseEntropy;
129149 -       struct xxh64_state xxhState;
129150 -       size_t headerSize;
129151 -       U32 dictID;
129152 -       const BYTE *litPtr;
129153 -       ZSTD_customMem customMem;
129154 -       size_t litSize;
129155 -       size_t rleSize;
129156 -       BYTE litBuffer[ZSTD_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH];
129157 -       BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];
129158 -}; /* typedef'd to ZSTD_DCtx within "zstd.h" */
129160 -size_t ZSTD_DCtxWorkspaceBound(void) { return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_DCtx)); }
129162 -size_t ZSTD_decompressBegin(ZSTD_DCtx *dctx)
129164 -       dctx->expected = ZSTD_frameHeaderSize_prefix;
129165 -       dctx->stage = ZSTDds_getFrameHeaderSize;
129166 -       dctx->previousDstEnd = NULL;
129167 -       dctx->base = NULL;
129168 -       dctx->vBase = NULL;
129169 -       dctx->dictEnd = NULL;
129170 -       dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
129171 -       dctx->litEntropy = dctx->fseEntropy = 0;
129172 -       dctx->dictID = 0;
129173 -       ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
129174 -       memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */
129175 -       dctx->LLTptr = dctx->entropy.LLTable;
129176 -       dctx->MLTptr = dctx->entropy.MLTable;
129177 -       dctx->OFTptr = dctx->entropy.OFTable;
129178 -       dctx->HUFptr = dctx->entropy.hufTable;
129179 -       return 0;
129182 -ZSTD_DCtx *ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
129184 -       ZSTD_DCtx *dctx;
129186 -       if (!customMem.customAlloc || !customMem.customFree)
129187 -               return NULL;
129189 -       dctx = (ZSTD_DCtx *)ZSTD_malloc(sizeof(ZSTD_DCtx), customMem);
129190 -       if (!dctx)
129191 -               return NULL;
129192 -       memcpy(&dctx->customMem, &customMem, sizeof(customMem));
129193 -       ZSTD_decompressBegin(dctx);
129194 -       return dctx;
129197 -ZSTD_DCtx *ZSTD_initDCtx(void *workspace, size_t workspaceSize)
129199 -       ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
129200 -       return ZSTD_createDCtx_advanced(stackMem);
129203 -size_t ZSTD_freeDCtx(ZSTD_DCtx *dctx)
129205 -       if (dctx == NULL)
129206 -               return 0; /* support free on NULL */
129207 -       ZSTD_free(dctx, dctx->customMem);
129208 -       return 0; /* reserved as a potential error code in the future */
129211 -void ZSTD_copyDCtx(ZSTD_DCtx *dstDCtx, const ZSTD_DCtx *srcDCtx)
129213 -       size_t const workSpaceSize = (ZSTD_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH) + ZSTD_frameHeaderSize_max;
129214 -       memcpy(dstDCtx, srcDCtx, sizeof(ZSTD_DCtx) - workSpaceSize); /* no need to copy workspace */
129217 -static void ZSTD_refDDict(ZSTD_DCtx *dstDCtx, const ZSTD_DDict *ddict);
129219 -/*-*************************************************************
129220 -*   Decompression section
129221 -***************************************************************/
129223 -/*! ZSTD_isFrame() :
129224 - *  Tells if the content of `buffer` starts with a valid Frame Identifier.
129225 - *  Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
129226 - *  Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
129227 - *  Note 3 : Skippable Frame Identifiers are considered valid. */
129228 -unsigned ZSTD_isFrame(const void *buffer, size_t size)
129230 -       if (size < 4)
129231 -               return 0;
129232 -       {
129233 -               U32 const magic = ZSTD_readLE32(buffer);
129234 -               if (magic == ZSTD_MAGICNUMBER)
129235 -                       return 1;
129236 -               if ((magic & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START)
129237 -                       return 1;
129238 -       }
129239 -       return 0;
129242 -/** ZSTD_frameHeaderSize() :
129243 -*   srcSize must be >= ZSTD_frameHeaderSize_prefix.
129244 -*   @return : size of the Frame Header */
129245 -static size_t ZSTD_frameHeaderSize(const void *src, size_t srcSize)
129247 -       if (srcSize < ZSTD_frameHeaderSize_prefix)
129248 -               return ERROR(srcSize_wrong);
129249 -       {
129250 -               BYTE const fhd = ((const BYTE *)src)[4];
129251 -               U32 const dictID = fhd & 3;
129252 -               U32 const singleSegment = (fhd >> 5) & 1;
129253 -               U32 const fcsId = fhd >> 6;
129254 -               return ZSTD_frameHeaderSize_prefix + !singleSegment + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId] + (singleSegment && !fcsId);
129255 -       }
129258 -/** ZSTD_getFrameParams() :
129259 -*   decode Frame Header, or require larger `srcSize`.
129260 -*   @return : 0, `fparamsPtr` is correctly filled,
129261 -*            >0, `srcSize` is too small, result is expected `srcSize`,
129262 -*             or an error code, which can be tested using ZSTD_isError() */
129263 -size_t ZSTD_getFrameParams(ZSTD_frameParams *fparamsPtr, const void *src, size_t srcSize)
129265 -       const BYTE *ip = (const BYTE *)src;
129267 -       if (srcSize < ZSTD_frameHeaderSize_prefix)
129268 -               return ZSTD_frameHeaderSize_prefix;
129269 -       if (ZSTD_readLE32(src) != ZSTD_MAGICNUMBER) {
129270 -               if ((ZSTD_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
129271 -                       if (srcSize < ZSTD_skippableHeaderSize)
129272 -                               return ZSTD_skippableHeaderSize; /* magic number + skippable frame length */
129273 -                       memset(fparamsPtr, 0, sizeof(*fparamsPtr));
129274 -                       fparamsPtr->frameContentSize = ZSTD_readLE32((const char *)src + 4);
129275 -                       fparamsPtr->windowSize = 0; /* windowSize==0 means a frame is skippable */
129276 -                       return 0;
129277 -               }
129278 -               return ERROR(prefix_unknown);
129279 -       }
129281 -       /* ensure there is enough `srcSize` to fully read/decode frame header */
129282 -       {
129283 -               size_t const fhsize = ZSTD_frameHeaderSize(src, srcSize);
129284 -               if (srcSize < fhsize)
129285 -                       return fhsize;
129286 -       }
129288 -       {
129289 -               BYTE const fhdByte = ip[4];
129290 -               size_t pos = 5;
129291 -               U32 const dictIDSizeCode = fhdByte & 3;
129292 -               U32 const checksumFlag = (fhdByte >> 2) & 1;
129293 -               U32 const singleSegment = (fhdByte >> 5) & 1;
129294 -               U32 const fcsID = fhdByte >> 6;
129295 -               U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX;
129296 -               U32 windowSize = 0;
129297 -               U32 dictID = 0;
129298 -               U64 frameContentSize = 0;
129299 -               if ((fhdByte & 0x08) != 0)
129300 -                       return ERROR(frameParameter_unsupported); /* reserved bits, which must be zero */
129301 -               if (!singleSegment) {
129302 -                       BYTE const wlByte = ip[pos++];
129303 -                       U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
129304 -                       if (windowLog > ZSTD_WINDOWLOG_MAX)
129305 -                               return ERROR(frameParameter_windowTooLarge); /* avoids issue with 1 << windowLog */
129306 -                       windowSize = (1U << windowLog);
129307 -                       windowSize += (windowSize >> 3) * (wlByte & 7);
129308 -               }
129310 -               switch (dictIDSizeCode) {
129311 -               default: /* impossible */
129312 -               case 0: break;
129313 -               case 1:
129314 -                       dictID = ip[pos];
129315 -                       pos++;
129316 -                       break;
129317 -               case 2:
129318 -                       dictID = ZSTD_readLE16(ip + pos);
129319 -                       pos += 2;
129320 -                       break;
129321 -               case 3:
129322 -                       dictID = ZSTD_readLE32(ip + pos);
129323 -                       pos += 4;
129324 -                       break;
129325 -               }
129326 -               switch (fcsID) {
129327 -               default: /* impossible */
129328 -               case 0:
129329 -                       if (singleSegment)
129330 -                               frameContentSize = ip[pos];
129331 -                       break;
129332 -               case 1: frameContentSize = ZSTD_readLE16(ip + pos) + 256; break;
129333 -               case 2: frameContentSize = ZSTD_readLE32(ip + pos); break;
129334 -               case 3: frameContentSize = ZSTD_readLE64(ip + pos); break;
129335 -               }
129336 -               if (!windowSize)
129337 -                       windowSize = (U32)frameContentSize;
129338 -               if (windowSize > windowSizeMax)
129339 -                       return ERROR(frameParameter_windowTooLarge);
129340 -               fparamsPtr->frameContentSize = frameContentSize;
129341 -               fparamsPtr->windowSize = windowSize;
129342 -               fparamsPtr->dictID = dictID;
129343 -               fparamsPtr->checksumFlag = checksumFlag;
129344 -       }
129345 -       return 0;
129348 -/** ZSTD_getFrameContentSize() :
129349 -*   compatible with legacy mode
129350 -*   @return : decompressed size of the single frame pointed to be `src` if known, otherwise
129351 -*             - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
129352 -*             - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
129353 -unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)
129355 -       {
129356 -               ZSTD_frameParams fParams;
129357 -               if (ZSTD_getFrameParams(&fParams, src, srcSize) != 0)
129358 -                       return ZSTD_CONTENTSIZE_ERROR;
129359 -               if (fParams.windowSize == 0) {
129360 -                       /* Either skippable or empty frame, size == 0 either way */
129361 -                       return 0;
129362 -               } else if (fParams.frameContentSize != 0) {
129363 -                       return fParams.frameContentSize;
129364 -               } else {
129365 -                       return ZSTD_CONTENTSIZE_UNKNOWN;
129366 -               }
129367 -       }
129370 -/** ZSTD_findDecompressedSize() :
129371 - *  compatible with legacy mode
129372 - *  `srcSize` must be the exact length of some number of ZSTD compressed and/or
129373 - *      skippable frames
129374 - *  @return : decompressed size of the frames contained */
129375 -unsigned long long ZSTD_findDecompressedSize(const void *src, size_t srcSize)
129377 -       {
129378 -               unsigned long long totalDstSize = 0;
129379 -               while (srcSize >= ZSTD_frameHeaderSize_prefix) {
129380 -                       const U32 magicNumber = ZSTD_readLE32(src);
129382 -                       if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
129383 -                               size_t skippableSize;
129384 -                               if (srcSize < ZSTD_skippableHeaderSize)
129385 -                                       return ERROR(srcSize_wrong);
129386 -                               skippableSize = ZSTD_readLE32((const BYTE *)src + 4) + ZSTD_skippableHeaderSize;
129387 -                               if (srcSize < skippableSize) {
129388 -                                       return ZSTD_CONTENTSIZE_ERROR;
129389 -                               }
129391 -                               src = (const BYTE *)src + skippableSize;
129392 -                               srcSize -= skippableSize;
129393 -                               continue;
129394 -                       }
129396 -                       {
129397 -                               unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
129398 -                               if (ret >= ZSTD_CONTENTSIZE_ERROR)
129399 -                                       return ret;
129401 -                               /* check for overflow */
129402 -                               if (totalDstSize + ret < totalDstSize)
129403 -                                       return ZSTD_CONTENTSIZE_ERROR;
129404 -                               totalDstSize += ret;
129405 -                       }
129406 -                       {
129407 -                               size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize);
129408 -                               if (ZSTD_isError(frameSrcSize)) {
129409 -                                       return ZSTD_CONTENTSIZE_ERROR;
129410 -                               }
129412 -                               src = (const BYTE *)src + frameSrcSize;
129413 -                               srcSize -= frameSrcSize;
129414 -                       }
129415 -               }
129417 -               if (srcSize) {
129418 -                       return ZSTD_CONTENTSIZE_ERROR;
129419 -               }
129421 -               return totalDstSize;
129422 -       }
129425 -/** ZSTD_decodeFrameHeader() :
129426 -*   `headerSize` must be the size provided by ZSTD_frameHeaderSize().
129427 -*   @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */
129428 -static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx *dctx, const void *src, size_t headerSize)
129430 -       size_t const result = ZSTD_getFrameParams(&(dctx->fParams), src, headerSize);
129431 -       if (ZSTD_isError(result))
129432 -               return result; /* invalid header */
129433 -       if (result > 0)
129434 -               return ERROR(srcSize_wrong); /* headerSize too small */
129435 -       if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID))
129436 -               return ERROR(dictionary_wrong);
129437 -       if (dctx->fParams.checksumFlag)
129438 -               xxh64_reset(&dctx->xxhState, 0);
129439 -       return 0;
129442 -typedef struct {
129443 -       blockType_e blockType;
129444 -       U32 lastBlock;
129445 -       U32 origSize;
129446 -} blockProperties_t;
129448 -/*! ZSTD_getcBlockSize() :
129449 -*   Provides the size of compressed block from block header `src` */
129450 -size_t ZSTD_getcBlockSize(const void *src, size_t srcSize, blockProperties_t *bpPtr)
129452 -       if (srcSize < ZSTD_blockHeaderSize)
129453 -               return ERROR(srcSize_wrong);
129454 -       {
129455 -               U32 const cBlockHeader = ZSTD_readLE24(src);
129456 -               U32 const cSize = cBlockHeader >> 3;
129457 -               bpPtr->lastBlock = cBlockHeader & 1;
129458 -               bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
129459 -               bpPtr->origSize = cSize; /* only useful for RLE */
129460 -               if (bpPtr->blockType == bt_rle)
129461 -                       return 1;
129462 -               if (bpPtr->blockType == bt_reserved)
129463 -                       return ERROR(corruption_detected);
129464 -               return cSize;
129465 -       }
129468 -static size_t ZSTD_copyRawBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
129470 -       if (srcSize > dstCapacity)
129471 -               return ERROR(dstSize_tooSmall);
129472 -       memcpy(dst, src, srcSize);
129473 -       return srcSize;
129476 -static size_t ZSTD_setRleBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize, size_t regenSize)
129478 -       if (srcSize != 1)
129479 -               return ERROR(srcSize_wrong);
129480 -       if (regenSize > dstCapacity)
129481 -               return ERROR(dstSize_tooSmall);
129482 -       memset(dst, *(const BYTE *)src, regenSize);
129483 -       return regenSize;
129486 -/*! ZSTD_decodeLiteralsBlock() :
129487 -       @return : nb of bytes read from src (< srcSize ) */
129488 -size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx *dctx, const void *src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
129490 -       if (srcSize < MIN_CBLOCK_SIZE)
129491 -               return ERROR(corruption_detected);
129493 -       {
129494 -               const BYTE *const istart = (const BYTE *)src;
129495 -               symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
129497 -               switch (litEncType) {
129498 -               case set_repeat:
129499 -                       if (dctx->litEntropy == 0)
129500 -                               return ERROR(dictionary_corrupted);
129501 -                       fallthrough;
129502 -               case set_compressed:
129503 -                       if (srcSize < 5)
129504 -                               return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */
129505 -                       {
129506 -                               size_t lhSize, litSize, litCSize;
129507 -                               U32 singleStream = 0;
129508 -                               U32 const lhlCode = (istart[0] >> 2) & 3;
129509 -                               U32 const lhc = ZSTD_readLE32(istart);
129510 -                               switch (lhlCode) {
129511 -                               case 0:
129512 -                               case 1:
129513 -                               default: /* note : default is impossible, since lhlCode into [0..3] */
129514 -                                       /* 2 - 2 - 10 - 10 */
129515 -                                       singleStream = !lhlCode;
129516 -                                       lhSize = 3;
129517 -                                       litSize = (lhc >> 4) & 0x3FF;
129518 -                                       litCSize = (lhc >> 14) & 0x3FF;
129519 -                                       break;
129520 -                               case 2:
129521 -                                       /* 2 - 2 - 14 - 14 */
129522 -                                       lhSize = 4;
129523 -                                       litSize = (lhc >> 4) & 0x3FFF;
129524 -                                       litCSize = lhc >> 18;
129525 -                                       break;
129526 -                               case 3:
129527 -                                       /* 2 - 2 - 18 - 18 */
129528 -                                       lhSize = 5;
129529 -                                       litSize = (lhc >> 4) & 0x3FFFF;
129530 -                                       litCSize = (lhc >> 22) + (istart[4] << 10);
129531 -                                       break;
129532 -                               }
129533 -                               if (litSize > ZSTD_BLOCKSIZE_ABSOLUTEMAX)
129534 -                                       return ERROR(corruption_detected);
129535 -                               if (litCSize + lhSize > srcSize)
129536 -                                       return ERROR(corruption_detected);
129538 -                               if (HUF_isError(
129539 -                                       (litEncType == set_repeat)
129540 -                                           ? (singleStream ? HUF_decompress1X_usingDTable(dctx->litBuffer, litSize, istart + lhSize, litCSize, dctx->HUFptr)
129541 -                                                           : HUF_decompress4X_usingDTable(dctx->litBuffer, litSize, istart + lhSize, litCSize, dctx->HUFptr))
129542 -                                           : (singleStream
129543 -                                                  ? HUF_decompress1X2_DCtx_wksp(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart + lhSize, litCSize,
129544 -                                                                                dctx->entropy.workspace, sizeof(dctx->entropy.workspace))
129545 -                                                  : HUF_decompress4X_hufOnly_wksp(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart + lhSize, litCSize,
129546 -                                                                                  dctx->entropy.workspace, sizeof(dctx->entropy.workspace)))))
129547 -                                       return ERROR(corruption_detected);
129549 -                               dctx->litPtr = dctx->litBuffer;
129550 -                               dctx->litSize = litSize;
129551 -                               dctx->litEntropy = 1;
129552 -                               if (litEncType == set_compressed)
129553 -                                       dctx->HUFptr = dctx->entropy.hufTable;
129554 -                               memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
129555 -                               return litCSize + lhSize;
129556 -                       }
129558 -               case set_basic: {
129559 -                       size_t litSize, lhSize;
129560 -                       U32 const lhlCode = ((istart[0]) >> 2) & 3;
129561 -                       switch (lhlCode) {
129562 -                       case 0:
129563 -                       case 2:
129564 -                       default: /* note : default is impossible, since lhlCode into [0..3] */
129565 -                               lhSize = 1;
129566 -                               litSize = istart[0] >> 3;
129567 -                               break;
129568 -                       case 1:
129569 -                               lhSize = 2;
129570 -                               litSize = ZSTD_readLE16(istart) >> 4;
129571 -                               break;
129572 -                       case 3:
129573 -                               lhSize = 3;
129574 -                               litSize = ZSTD_readLE24(istart) >> 4;
129575 -                               break;
129576 -                       }
129578 -                       if (lhSize + litSize + WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
129579 -                               if (litSize + lhSize > srcSize)
129580 -                                       return ERROR(corruption_detected);
129581 -                               memcpy(dctx->litBuffer, istart + lhSize, litSize);
129582 -                               dctx->litPtr = dctx->litBuffer;
129583 -                               dctx->litSize = litSize;
129584 -                               memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
129585 -                               return lhSize + litSize;
129586 -                       }
129587 -                       /* direct reference into compressed stream */
129588 -                       dctx->litPtr = istart + lhSize;
129589 -                       dctx->litSize = litSize;
129590 -                       return lhSize + litSize;
129591 -               }
129593 -               case set_rle: {
129594 -                       U32 const lhlCode = ((istart[0]) >> 2) & 3;
129595 -                       size_t litSize, lhSize;
129596 -                       switch (lhlCode) {
129597 -                       case 0:
129598 -                       case 2:
129599 -                       default: /* note : default is impossible, since lhlCode into [0..3] */
129600 -                               lhSize = 1;
129601 -                               litSize = istart[0] >> 3;
129602 -                               break;
129603 -                       case 1:
129604 -                               lhSize = 2;
129605 -                               litSize = ZSTD_readLE16(istart) >> 4;
129606 -                               break;
129607 -                       case 3:
129608 -                               lhSize = 3;
129609 -                               litSize = ZSTD_readLE24(istart) >> 4;
129610 -                               if (srcSize < 4)
129611 -                                       return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
129612 -                               break;
129613 -                       }
129614 -                       if (litSize > ZSTD_BLOCKSIZE_ABSOLUTEMAX)
129615 -                               return ERROR(corruption_detected);
129616 -                       memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
129617 -                       dctx->litPtr = dctx->litBuffer;
129618 -                       dctx->litSize = litSize;
129619 -                       return lhSize + 1;
129620 -               }
129621 -               default:
129622 -                       return ERROR(corruption_detected); /* impossible */
129623 -               }
129624 -       }
129627 -typedef union {
129628 -       FSE_decode_t realData;
129629 -       U32 alignedBy4;
129630 -} FSE_decode_t4;
129632 -static const FSE_decode_t4 LL_defaultDTable[(1 << LL_DEFAULTNORMLOG) + 1] = {
129633 -    {{LL_DEFAULTNORMLOG, 1, 1}}, /* header : tableLog, fastMode, fastMode */
129634 -    {{0, 0, 4}},                /* 0 : base, symbol, bits */
129635 -    {{16, 0, 4}},
129636 -    {{32, 1, 5}},
129637 -    {{0, 3, 5}},
129638 -    {{0, 4, 5}},
129639 -    {{0, 6, 5}},
129640 -    {{0, 7, 5}},
129641 -    {{0, 9, 5}},
129642 -    {{0, 10, 5}},
129643 -    {{0, 12, 5}},
129644 -    {{0, 14, 6}},
129645 -    {{0, 16, 5}},
129646 -    {{0, 18, 5}},
129647 -    {{0, 19, 5}},
129648 -    {{0, 21, 5}},
129649 -    {{0, 22, 5}},
129650 -    {{0, 24, 5}},
129651 -    {{32, 25, 5}},
129652 -    {{0, 26, 5}},
129653 -    {{0, 27, 6}},
129654 -    {{0, 29, 6}},
129655 -    {{0, 31, 6}},
129656 -    {{32, 0, 4}},
129657 -    {{0, 1, 4}},
129658 -    {{0, 2, 5}},
129659 -    {{32, 4, 5}},
129660 -    {{0, 5, 5}},
129661 -    {{32, 7, 5}},
129662 -    {{0, 8, 5}},
129663 -    {{32, 10, 5}},
129664 -    {{0, 11, 5}},
129665 -    {{0, 13, 6}},
129666 -    {{32, 16, 5}},
129667 -    {{0, 17, 5}},
129668 -    {{32, 19, 5}},
129669 -    {{0, 20, 5}},
129670 -    {{32, 22, 5}},
129671 -    {{0, 23, 5}},
129672 -    {{0, 25, 4}},
129673 -    {{16, 25, 4}},
129674 -    {{32, 26, 5}},
129675 -    {{0, 28, 6}},
129676 -    {{0, 30, 6}},
129677 -    {{48, 0, 4}},
129678 -    {{16, 1, 4}},
129679 -    {{32, 2, 5}},
129680 -    {{32, 3, 5}},
129681 -    {{32, 5, 5}},
129682 -    {{32, 6, 5}},
129683 -    {{32, 8, 5}},
129684 -    {{32, 9, 5}},
129685 -    {{32, 11, 5}},
129686 -    {{32, 12, 5}},
129687 -    {{0, 15, 6}},
129688 -    {{32, 17, 5}},
129689 -    {{32, 18, 5}},
129690 -    {{32, 20, 5}},
129691 -    {{32, 21, 5}},
129692 -    {{32, 23, 5}},
129693 -    {{32, 24, 5}},
129694 -    {{0, 35, 6}},
129695 -    {{0, 34, 6}},
129696 -    {{0, 33, 6}},
129697 -    {{0, 32, 6}},
129698 -}; /* LL_defaultDTable */
129700 -static const FSE_decode_t4 ML_defaultDTable[(1 << ML_DEFAULTNORMLOG) + 1] = {
129701 -    {{ML_DEFAULTNORMLOG, 1, 1}}, /* header : tableLog, fastMode, fastMode */
129702 -    {{0, 0, 6}},                /* 0 : base, symbol, bits */
129703 -    {{0, 1, 4}},
129704 -    {{32, 2, 5}},
129705 -    {{0, 3, 5}},
129706 -    {{0, 5, 5}},
129707 -    {{0, 6, 5}},
129708 -    {{0, 8, 5}},
129709 -    {{0, 10, 6}},
129710 -    {{0, 13, 6}},
129711 -    {{0, 16, 6}},
129712 -    {{0, 19, 6}},
129713 -    {{0, 22, 6}},
129714 -    {{0, 25, 6}},
129715 -    {{0, 28, 6}},
129716 -    {{0, 31, 6}},
129717 -    {{0, 33, 6}},
129718 -    {{0, 35, 6}},
129719 -    {{0, 37, 6}},
129720 -    {{0, 39, 6}},
129721 -    {{0, 41, 6}},
129722 -    {{0, 43, 6}},
129723 -    {{0, 45, 6}},
129724 -    {{16, 1, 4}},
129725 -    {{0, 2, 4}},
129726 -    {{32, 3, 5}},
129727 -    {{0, 4, 5}},
129728 -    {{32, 6, 5}},
129729 -    {{0, 7, 5}},
129730 -    {{0, 9, 6}},
129731 -    {{0, 12, 6}},
129732 -    {{0, 15, 6}},
129733 -    {{0, 18, 6}},
129734 -    {{0, 21, 6}},
129735 -    {{0, 24, 6}},
129736 -    {{0, 27, 6}},
129737 -    {{0, 30, 6}},
129738 -    {{0, 32, 6}},
129739 -    {{0, 34, 6}},
129740 -    {{0, 36, 6}},
129741 -    {{0, 38, 6}},
129742 -    {{0, 40, 6}},
129743 -    {{0, 42, 6}},
129744 -    {{0, 44, 6}},
129745 -    {{32, 1, 4}},
129746 -    {{48, 1, 4}},
129747 -    {{16, 2, 4}},
129748 -    {{32, 4, 5}},
129749 -    {{32, 5, 5}},
129750 -    {{32, 7, 5}},
129751 -    {{32, 8, 5}},
129752 -    {{0, 11, 6}},
129753 -    {{0, 14, 6}},
129754 -    {{0, 17, 6}},
129755 -    {{0, 20, 6}},
129756 -    {{0, 23, 6}},
129757 -    {{0, 26, 6}},
129758 -    {{0, 29, 6}},
129759 -    {{0, 52, 6}},
129760 -    {{0, 51, 6}},
129761 -    {{0, 50, 6}},
129762 -    {{0, 49, 6}},
129763 -    {{0, 48, 6}},
129764 -    {{0, 47, 6}},
129765 -    {{0, 46, 6}},
129766 -}; /* ML_defaultDTable */
129768 -static const FSE_decode_t4 OF_defaultDTable[(1 << OF_DEFAULTNORMLOG) + 1] = {
129769 -    {{OF_DEFAULTNORMLOG, 1, 1}}, /* header : tableLog, fastMode, fastMode */
129770 -    {{0, 0, 5}},                /* 0 : base, symbol, bits */
129771 -    {{0, 6, 4}},
129772 -    {{0, 9, 5}},
129773 -    {{0, 15, 5}},
129774 -    {{0, 21, 5}},
129775 -    {{0, 3, 5}},
129776 -    {{0, 7, 4}},
129777 -    {{0, 12, 5}},
129778 -    {{0, 18, 5}},
129779 -    {{0, 23, 5}},
129780 -    {{0, 5, 5}},
129781 -    {{0, 8, 4}},
129782 -    {{0, 14, 5}},
129783 -    {{0, 20, 5}},
129784 -    {{0, 2, 5}},
129785 -    {{16, 7, 4}},
129786 -    {{0, 11, 5}},
129787 -    {{0, 17, 5}},
129788 -    {{0, 22, 5}},
129789 -    {{0, 4, 5}},
129790 -    {{16, 8, 4}},
129791 -    {{0, 13, 5}},
129792 -    {{0, 19, 5}},
129793 -    {{0, 1, 5}},
129794 -    {{16, 6, 4}},
129795 -    {{0, 10, 5}},
129796 -    {{0, 16, 5}},
129797 -    {{0, 28, 5}},
129798 -    {{0, 27, 5}},
129799 -    {{0, 26, 5}},
129800 -    {{0, 25, 5}},
129801 -    {{0, 24, 5}},
129802 -}; /* OF_defaultDTable */
129804 -/*! ZSTD_buildSeqTable() :
129805 -       @return : nb bytes read from src,
129806 -                         or an error code if it fails, testable with ZSTD_isError()
129808 -static size_t ZSTD_buildSeqTable(FSE_DTable *DTableSpace, const FSE_DTable **DTablePtr, symbolEncodingType_e type, U32 max, U32 maxLog, const void *src,
129809 -                                size_t srcSize, const FSE_decode_t4 *defaultTable, U32 flagRepeatTable, void *workspace, size_t workspaceSize)
129811 -       const void *const tmpPtr = defaultTable; /* bypass strict aliasing */
129812 -       switch (type) {
129813 -       case set_rle:
129814 -               if (!srcSize)
129815 -                       return ERROR(srcSize_wrong);
129816 -               if ((*(const BYTE *)src) > max)
129817 -                       return ERROR(corruption_detected);
129818 -               FSE_buildDTable_rle(DTableSpace, *(const BYTE *)src);
129819 -               *DTablePtr = DTableSpace;
129820 -               return 1;
129821 -       case set_basic: *DTablePtr = (const FSE_DTable *)tmpPtr; return 0;
129822 -       case set_repeat:
129823 -               if (!flagRepeatTable)
129824 -                       return ERROR(corruption_detected);
129825 -               return 0;
129826 -       default: /* impossible */
129827 -       case set_compressed: {
129828 -               U32 tableLog;
129829 -               S16 *norm = (S16 *)workspace;
129830 -               size_t const spaceUsed32 = ALIGN(sizeof(S16) * (MaxSeq + 1), sizeof(U32)) >> 2;
129832 -               if ((spaceUsed32 << 2) > workspaceSize)
129833 -                       return ERROR(GENERIC);
129834 -               workspace = (U32 *)workspace + spaceUsed32;
129835 -               workspaceSize -= (spaceUsed32 << 2);
129836 -               {
129837 -                       size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
129838 -                       if (FSE_isError(headerSize))
129839 -                               return ERROR(corruption_detected);
129840 -                       if (tableLog > maxLog)
129841 -                               return ERROR(corruption_detected);
129842 -                       FSE_buildDTable_wksp(DTableSpace, norm, max, tableLog, workspace, workspaceSize);
129843 -                       *DTablePtr = DTableSpace;
129844 -                       return headerSize;
129845 -               }
129846 -       }
129847 -       }
129850 -size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx *dctx, int *nbSeqPtr, const void *src, size_t srcSize)
129852 -       const BYTE *const istart = (const BYTE *const)src;
129853 -       const BYTE *const iend = istart + srcSize;
129854 -       const BYTE *ip = istart;
129856 -       /* check */
129857 -       if (srcSize < MIN_SEQUENCES_SIZE)
129858 -               return ERROR(srcSize_wrong);
129860 -       /* SeqHead */
129861 -       {
129862 -               int nbSeq = *ip++;
129863 -               if (!nbSeq) {
129864 -                       *nbSeqPtr = 0;
129865 -                       return 1;
129866 -               }
129867 -               if (nbSeq > 0x7F) {
129868 -                       if (nbSeq == 0xFF) {
129869 -                               if (ip + 2 > iend)
129870 -                                       return ERROR(srcSize_wrong);
129871 -                               nbSeq = ZSTD_readLE16(ip) + LONGNBSEQ, ip += 2;
129872 -                       } else {
129873 -                               if (ip >= iend)
129874 -                                       return ERROR(srcSize_wrong);
129875 -                               nbSeq = ((nbSeq - 0x80) << 8) + *ip++;
129876 -                       }
129877 -               }
129878 -               *nbSeqPtr = nbSeq;
129879 -       }
129881 -       /* FSE table descriptors */
129882 -       if (ip + 4 > iend)
129883 -               return ERROR(srcSize_wrong); /* minimum possible size */
129884 -       {
129885 -               symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
129886 -               symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
129887 -               symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
129888 -               ip++;
129890 -               /* Build DTables */
129891 -               {
129892 -                       size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr, LLtype, MaxLL, LLFSELog, ip, iend - ip,
129893 -                                                                 LL_defaultDTable, dctx->fseEntropy, dctx->entropy.workspace, sizeof(dctx->entropy.workspace));
129894 -                       if (ZSTD_isError(llhSize))
129895 -                               return ERROR(corruption_detected);
129896 -                       ip += llhSize;
129897 -               }
129898 -               {
129899 -                       size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr, OFtype, MaxOff, OffFSELog, ip, iend - ip,
129900 -                                                                 OF_defaultDTable, dctx->fseEntropy, dctx->entropy.workspace, sizeof(dctx->entropy.workspace));
129901 -                       if (ZSTD_isError(ofhSize))
129902 -                               return ERROR(corruption_detected);
129903 -                       ip += ofhSize;
129904 -               }
129905 -               {
129906 -                       size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr, MLtype, MaxML, MLFSELog, ip, iend - ip,
129907 -                                                                 ML_defaultDTable, dctx->fseEntropy, dctx->entropy.workspace, sizeof(dctx->entropy.workspace));
129908 -                       if (ZSTD_isError(mlhSize))
129909 -                               return ERROR(corruption_detected);
129910 -                       ip += mlhSize;
129911 -               }
129912 -       }
129914 -       return ip - istart;
129917 -typedef struct {
129918 -       size_t litLength;
129919 -       size_t matchLength;
129920 -       size_t offset;
129921 -       const BYTE *match;
129922 -} seq_t;
129924 -typedef struct {
129925 -       BIT_DStream_t DStream;
129926 -       FSE_DState_t stateLL;
129927 -       FSE_DState_t stateOffb;
129928 -       FSE_DState_t stateML;
129929 -       size_t prevOffset[ZSTD_REP_NUM];
129930 -       const BYTE *base;
129931 -       size_t pos;
129932 -       uPtrDiff gotoDict;
129933 -} seqState_t;
129935 -FORCE_NOINLINE
129936 -size_t ZSTD_execSequenceLast7(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const base,
129937 -                             const BYTE *const vBase, const BYTE *const dictEnd)
129939 -       BYTE *const oLitEnd = op + sequence.litLength;
129940 -       size_t const sequenceLength = sequence.litLength + sequence.matchLength;
129941 -       BYTE *const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
129942 -       BYTE *const oend_w = oend - WILDCOPY_OVERLENGTH;
129943 -       const BYTE *const iLitEnd = *litPtr + sequence.litLength;
129944 -       const BYTE *match = oLitEnd - sequence.offset;
129946 -       /* check */
129947 -       if (oMatchEnd > oend)
129948 -               return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
129949 -       if (iLitEnd > litLimit)
129950 -               return ERROR(corruption_detected); /* over-read beyond lit buffer */
129951 -       if (oLitEnd <= oend_w)
129952 -               return ERROR(GENERIC); /* Precondition */
129954 -       /* copy literals */
129955 -       if (op < oend_w) {
129956 -               ZSTD_wildcopy(op, *litPtr, oend_w - op);
129957 -               *litPtr += oend_w - op;
129958 -               op = oend_w;
129959 -       }
129960 -       while (op < oLitEnd)
129961 -               *op++ = *(*litPtr)++;
129963 -       /* copy Match */
129964 -       if (sequence.offset > (size_t)(oLitEnd - base)) {
129965 -               /* offset beyond prefix */
129966 -               if (sequence.offset > (size_t)(oLitEnd - vBase))
129967 -                       return ERROR(corruption_detected);
129968 -               match = dictEnd - (base - match);
129969 -               if (match + sequence.matchLength <= dictEnd) {
129970 -                       memmove(oLitEnd, match, sequence.matchLength);
129971 -                       return sequenceLength;
129972 -               }
129973 -               /* span extDict & currPrefixSegment */
129974 -               {
129975 -                       size_t const length1 = dictEnd - match;
129976 -                       memmove(oLitEnd, match, length1);
129977 -                       op = oLitEnd + length1;
129978 -                       sequence.matchLength -= length1;
129979 -                       match = base;
129980 -               }
129981 -       }
129982 -       while (op < oMatchEnd)
129983 -               *op++ = *match++;
129984 -       return sequenceLength;
129987 -static seq_t ZSTD_decodeSequence(seqState_t *seqState)
129989 -       seq_t seq;
129991 -       U32 const llCode = FSE_peekSymbol(&seqState->stateLL);
129992 -       U32 const mlCode = FSE_peekSymbol(&seqState->stateML);
129993 -       U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */
129995 -       U32 const llBits = LL_bits[llCode];
129996 -       U32 const mlBits = ML_bits[mlCode];
129997 -       U32 const ofBits = ofCode;
129998 -       U32 const totalBits = llBits + mlBits + ofBits;
130000 -       static const U32 LL_base[MaxLL + 1] = {0,  1,  2,  3,  4,  5,  6,  7,  8,    9,     10,    11,    12,    13,     14,     15,     16,     18,
130001 -                                              20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000};
130003 -       static const U32 ML_base[MaxML + 1] = {3,  4,  5,  6,  7,  8,  9,  10,   11,    12,    13,    14,    15,     16,     17,     18,     19,     20,
130004 -                                              21, 22, 23, 24, 25, 26, 27, 28,   29,    30,    31,    32,    33,     34,     35,     37,     39,     41,
130005 -                                              43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, 0x1003, 0x2003, 0x4003, 0x8003, 0x10003};
130007 -       static const U32 OF_base[MaxOff + 1] = {0,       1,     1,      5,      0xD,      0x1D,      0x3D,      0x7D,      0xFD,     0x1FD,
130008 -                                               0x3FD,   0x7FD,    0xFFD,    0x1FFD,   0x3FFD,   0x7FFD,    0xFFFD,    0x1FFFD,   0x3FFFD,  0x7FFFD,
130009 -                                               0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD};
130011 -       /* sequence */
130012 -       {
130013 -               size_t offset;
130014 -               if (!ofCode)
130015 -                       offset = 0;
130016 -               else {
130017 -                       offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <=  (ZSTD_WINDOWLOG_MAX-1) bits */
130018 -                       if (ZSTD_32bits())
130019 -                               BIT_reloadDStream(&seqState->DStream);
130020 -               }
130022 -               if (ofCode <= 1) {
130023 -                       offset += (llCode == 0);
130024 -                       if (offset) {
130025 -                               size_t temp = (offset == 3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
130026 -                               temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
130027 -                               if (offset != 1)
130028 -                                       seqState->prevOffset[2] = seqState->prevOffset[1];
130029 -                               seqState->prevOffset[1] = seqState->prevOffset[0];
130030 -                               seqState->prevOffset[0] = offset = temp;
130031 -                       } else {
130032 -                               offset = seqState->prevOffset[0];
130033 -                       }
130034 -               } else {
130035 -                       seqState->prevOffset[2] = seqState->prevOffset[1];
130036 -                       seqState->prevOffset[1] = seqState->prevOffset[0];
130037 -                       seqState->prevOffset[0] = offset;
130038 -               }
130039 -               seq.offset = offset;
130040 -       }
130042 -       seq.matchLength = ML_base[mlCode] + ((mlCode > 31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <=  16 bits */
130043 -       if (ZSTD_32bits() && (mlBits + llBits > 24))
130044 -               BIT_reloadDStream(&seqState->DStream);
130046 -       seq.litLength = LL_base[llCode] + ((llCode > 15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <=  16 bits */
130047 -       if (ZSTD_32bits() || (totalBits > 64 - 7 - (LLFSELog + MLFSELog + OffFSELog)))
130048 -               BIT_reloadDStream(&seqState->DStream);
130050 -       /* ANS state update */
130051 -       FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <=  9 bits */
130052 -       FSE_updateState(&seqState->stateML, &seqState->DStream); /* <=  9 bits */
130053 -       if (ZSTD_32bits())
130054 -               BIT_reloadDStream(&seqState->DStream);             /* <= 18 bits */
130055 -       FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <=  8 bits */
130057 -       seq.match = NULL;
130059 -       return seq;
130062 -FORCE_INLINE
130063 -size_t ZSTD_execSequence(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const base,
130064 -                        const BYTE *const vBase, const BYTE *const dictEnd)
130066 -       BYTE *const oLitEnd = op + sequence.litLength;
130067 -       size_t const sequenceLength = sequence.litLength + sequence.matchLength;
130068 -       BYTE *const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
130069 -       BYTE *const oend_w = oend - WILDCOPY_OVERLENGTH;
130070 -       const BYTE *const iLitEnd = *litPtr + sequence.litLength;
130071 -       const BYTE *match = oLitEnd - sequence.offset;
130073 -       /* check */
130074 -       if (oMatchEnd > oend)
130075 -               return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
130076 -       if (iLitEnd > litLimit)
130077 -               return ERROR(corruption_detected); /* over-read beyond lit buffer */
130078 -       if (oLitEnd > oend_w)
130079 -               return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd);
130081 -       /* copy Literals */
130082 -       ZSTD_copy8(op, *litPtr);
130083 -       if (sequence.litLength > 8)
130084 -               ZSTD_wildcopy(op + 8, (*litPtr) + 8,
130085 -                             sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
130086 -       op = oLitEnd;
130087 -       *litPtr = iLitEnd; /* update for next sequence */
130089 -       /* copy Match */
130090 -       if (sequence.offset > (size_t)(oLitEnd - base)) {
130091 -               /* offset beyond prefix */
130092 -               if (sequence.offset > (size_t)(oLitEnd - vBase))
130093 -                       return ERROR(corruption_detected);
130094 -               match = dictEnd + (match - base);
130095 -               if (match + sequence.matchLength <= dictEnd) {
130096 -                       memmove(oLitEnd, match, sequence.matchLength);
130097 -                       return sequenceLength;
130098 -               }
130099 -               /* span extDict & currPrefixSegment */
130100 -               {
130101 -                       size_t const length1 = dictEnd - match;
130102 -                       memmove(oLitEnd, match, length1);
130103 -                       op = oLitEnd + length1;
130104 -                       sequence.matchLength -= length1;
130105 -                       match = base;
130106 -                       if (op > oend_w || sequence.matchLength < MINMATCH) {
130107 -                               U32 i;
130108 -                               for (i = 0; i < sequence.matchLength; ++i)
130109 -                                       op[i] = match[i];
130110 -                               return sequenceLength;
130111 -                       }
130112 -               }
130113 -       }
130114 -       /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
130116 -       /* match within prefix */
130117 -       if (sequence.offset < 8) {
130118 -               /* close range match, overlap */
130119 -               static const U32 dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};   /* added */
130120 -               static const int dec64table[] = {8, 8, 8, 7, 8, 9, 10, 11}; /* subtracted */
130121 -               int const sub2 = dec64table[sequence.offset];
130122 -               op[0] = match[0];
130123 -               op[1] = match[1];
130124 -               op[2] = match[2];
130125 -               op[3] = match[3];
130126 -               match += dec32table[sequence.offset];
130127 -               ZSTD_copy4(op + 4, match);
130128 -               match -= sub2;
130129 -       } else {
130130 -               ZSTD_copy8(op, match);
130131 -       }
130132 -       op += 8;
130133 -       match += 8;
130135 -       if (oMatchEnd > oend - (16 - MINMATCH)) {
130136 -               if (op < oend_w) {
130137 -                       ZSTD_wildcopy(op, match, oend_w - op);
130138 -                       match += oend_w - op;
130139 -                       op = oend_w;
130140 -               }
130141 -               while (op < oMatchEnd)
130142 -                       *op++ = *match++;
130143 -       } else {
130144 -               ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8); /* works even if matchLength < 8 */
130145 -       }
130146 -       return sequenceLength;
130149 -static size_t ZSTD_decompressSequences(ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize)
130151 -       const BYTE *ip = (const BYTE *)seqStart;
130152 -       const BYTE *const iend = ip + seqSize;
130153 -       BYTE *const ostart = (BYTE * const)dst;
130154 -       BYTE *const oend = ostart + maxDstSize;
130155 -       BYTE *op = ostart;
130156 -       const BYTE *litPtr = dctx->litPtr;
130157 -       const BYTE *const litEnd = litPtr + dctx->litSize;
130158 -       const BYTE *const base = (const BYTE *)(dctx->base);
130159 -       const BYTE *const vBase = (const BYTE *)(dctx->vBase);
130160 -       const BYTE *const dictEnd = (const BYTE *)(dctx->dictEnd);
130161 -       int nbSeq;
130163 -       /* Build Decoding Tables */
130164 -       {
130165 -               size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize);
130166 -               if (ZSTD_isError(seqHSize))
130167 -                       return seqHSize;
130168 -               ip += seqHSize;
130169 -       }
130171 -       /* Regen sequences */
130172 -       if (nbSeq) {
130173 -               seqState_t seqState;
130174 -               dctx->fseEntropy = 1;
130175 -               {
130176 -                       U32 i;
130177 -                       for (i = 0; i < ZSTD_REP_NUM; i++)
130178 -                               seqState.prevOffset[i] = dctx->entropy.rep[i];
130179 -               }
130180 -               CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend - ip), corruption_detected);
130181 -               FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
130182 -               FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
130183 -               FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
130185 -               for (; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq;) {
130186 -                       nbSeq--;
130187 -                       {
130188 -                               seq_t const sequence = ZSTD_decodeSequence(&seqState);
130189 -                               size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
130190 -                               if (ZSTD_isError(oneSeqSize))
130191 -                                       return oneSeqSize;
130192 -                               op += oneSeqSize;
130193 -                       }
130194 -               }
130196 -               /* check if reached exact end */
130197 -               if (nbSeq)
130198 -                       return ERROR(corruption_detected);
130199 -               /* save reps for next block */
130200 -               {
130201 -                       U32 i;
130202 -                       for (i = 0; i < ZSTD_REP_NUM; i++)
130203 -                               dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]);
130204 -               }
130205 -       }
130207 -       /* last literal segment */
130208 -       {
130209 -               size_t const lastLLSize = litEnd - litPtr;
130210 -               if (lastLLSize > (size_t)(oend - op))
130211 -                       return ERROR(dstSize_tooSmall);
130212 -               memcpy(op, litPtr, lastLLSize);
130213 -               op += lastLLSize;
130214 -       }
130216 -       return op - ostart;
130219 -FORCE_INLINE seq_t ZSTD_decodeSequenceLong_generic(seqState_t *seqState, int const longOffsets)
130221 -       seq_t seq;
130223 -       U32 const llCode = FSE_peekSymbol(&seqState->stateLL);
130224 -       U32 const mlCode = FSE_peekSymbol(&seqState->stateML);
130225 -       U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */
130227 -       U32 const llBits = LL_bits[llCode];
130228 -       U32 const mlBits = ML_bits[mlCode];
130229 -       U32 const ofBits = ofCode;
130230 -       U32 const totalBits = llBits + mlBits + ofBits;
130232 -       static const U32 LL_base[MaxLL + 1] = {0,  1,  2,  3,  4,  5,  6,  7,  8,    9,     10,    11,    12,    13,     14,     15,     16,     18,
130233 -                                              20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000};
130235 -       static const U32 ML_base[MaxML + 1] = {3,  4,  5,  6,  7,  8,  9,  10,   11,    12,    13,    14,    15,     16,     17,     18,     19,     20,
130236 -                                              21, 22, 23, 24, 25, 26, 27, 28,   29,    30,    31,    32,    33,     34,     35,     37,     39,     41,
130237 -                                              43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, 0x1003, 0x2003, 0x4003, 0x8003, 0x10003};
130239 -       static const U32 OF_base[MaxOff + 1] = {0,       1,     1,      5,      0xD,      0x1D,      0x3D,      0x7D,      0xFD,     0x1FD,
130240 -                                               0x3FD,   0x7FD,    0xFFD,    0x1FFD,   0x3FFD,   0x7FFD,    0xFFFD,    0x1FFFD,   0x3FFFD,  0x7FFFD,
130241 -                                               0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD};
130243 -       /* sequence */
130244 -       {
130245 -               size_t offset;
130246 -               if (!ofCode)
130247 -                       offset = 0;
130248 -               else {
130249 -                       if (longOffsets) {
130250 -                               int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN);
130251 -                               offset = OF_base[ofCode] + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
130252 -                               if (ZSTD_32bits() || extraBits)
130253 -                                       BIT_reloadDStream(&seqState->DStream);
130254 -                               if (extraBits)
130255 -                                       offset += BIT_readBitsFast(&seqState->DStream, extraBits);
130256 -                       } else {
130257 -                               offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <=  (ZSTD_WINDOWLOG_MAX-1) bits */
130258 -                               if (ZSTD_32bits())
130259 -                                       BIT_reloadDStream(&seqState->DStream);
130260 -                       }
130261 -               }
130263 -               if (ofCode <= 1) {
130264 -                       offset += (llCode == 0);
130265 -                       if (offset) {
130266 -                               size_t temp = (offset == 3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
130267 -                               temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
130268 -                               if (offset != 1)
130269 -                                       seqState->prevOffset[2] = seqState->prevOffset[1];
130270 -                               seqState->prevOffset[1] = seqState->prevOffset[0];
130271 -                               seqState->prevOffset[0] = offset = temp;
130272 -                       } else {
130273 -                               offset = seqState->prevOffset[0];
130274 -                       }
130275 -               } else {
130276 -                       seqState->prevOffset[2] = seqState->prevOffset[1];
130277 -                       seqState->prevOffset[1] = seqState->prevOffset[0];
130278 -                       seqState->prevOffset[0] = offset;
130279 -               }
130280 -               seq.offset = offset;
130281 -       }
130283 -       seq.matchLength = ML_base[mlCode] + ((mlCode > 31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <=  16 bits */
130284 -       if (ZSTD_32bits() && (mlBits + llBits > 24))
130285 -               BIT_reloadDStream(&seqState->DStream);
130287 -       seq.litLength = LL_base[llCode] + ((llCode > 15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <=  16 bits */
130288 -       if (ZSTD_32bits() || (totalBits > 64 - 7 - (LLFSELog + MLFSELog + OffFSELog)))
130289 -               BIT_reloadDStream(&seqState->DStream);
130291 -       {
130292 -               size_t const pos = seqState->pos + seq.litLength;
130293 -               seq.match = seqState->base + pos - seq.offset; /* single memory segment */
130294 -               if (seq.offset > pos)
130295 -                       seq.match += seqState->gotoDict; /* separate memory segment */
130296 -               seqState->pos = pos + seq.matchLength;
130297 -       }
130299 -       /* ANS state update */
130300 -       FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <=  9 bits */
130301 -       FSE_updateState(&seqState->stateML, &seqState->DStream); /* <=  9 bits */
130302 -       if (ZSTD_32bits())
130303 -               BIT_reloadDStream(&seqState->DStream);             /* <= 18 bits */
130304 -       FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <=  8 bits */
130306 -       return seq;
130309 -static seq_t ZSTD_decodeSequenceLong(seqState_t *seqState, unsigned const windowSize)
130311 -       if (ZSTD_highbit32(windowSize) > STREAM_ACCUMULATOR_MIN) {
130312 -               return ZSTD_decodeSequenceLong_generic(seqState, 1);
130313 -       } else {
130314 -               return ZSTD_decodeSequenceLong_generic(seqState, 0);
130315 -       }
130318 -FORCE_INLINE
130319 -size_t ZSTD_execSequenceLong(BYTE *op, BYTE *const oend, seq_t sequence, const BYTE **litPtr, const BYTE *const litLimit, const BYTE *const base,
130320 -                            const BYTE *const vBase, const BYTE *const dictEnd)
130322 -       BYTE *const oLitEnd = op + sequence.litLength;
130323 -       size_t const sequenceLength = sequence.litLength + sequence.matchLength;
130324 -       BYTE *const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
130325 -       BYTE *const oend_w = oend - WILDCOPY_OVERLENGTH;
130326 -       const BYTE *const iLitEnd = *litPtr + sequence.litLength;
130327 -       const BYTE *match = sequence.match;
130329 -       /* check */
130330 -       if (oMatchEnd > oend)
130331 -               return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
130332 -       if (iLitEnd > litLimit)
130333 -               return ERROR(corruption_detected); /* over-read beyond lit buffer */
130334 -       if (oLitEnd > oend_w)
130335 -               return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd);
130337 -       /* copy Literals */
130338 -       ZSTD_copy8(op, *litPtr);
130339 -       if (sequence.litLength > 8)
130340 -               ZSTD_wildcopy(op + 8, (*litPtr) + 8,
130341 -                             sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
130342 -       op = oLitEnd;
130343 -       *litPtr = iLitEnd; /* update for next sequence */
130345 -       /* copy Match */
130346 -       if (sequence.offset > (size_t)(oLitEnd - base)) {
130347 -               /* offset beyond prefix */
130348 -               if (sequence.offset > (size_t)(oLitEnd - vBase))
130349 -                       return ERROR(corruption_detected);
130350 -               if (match + sequence.matchLength <= dictEnd) {
130351 -                       memmove(oLitEnd, match, sequence.matchLength);
130352 -                       return sequenceLength;
130353 -               }
130354 -               /* span extDict & currPrefixSegment */
130355 -               {
130356 -                       size_t const length1 = dictEnd - match;
130357 -                       memmove(oLitEnd, match, length1);
130358 -                       op = oLitEnd + length1;
130359 -                       sequence.matchLength -= length1;
130360 -                       match = base;
130361 -                       if (op > oend_w || sequence.matchLength < MINMATCH) {
130362 -                               U32 i;
130363 -                               for (i = 0; i < sequence.matchLength; ++i)
130364 -                                       op[i] = match[i];
130365 -                               return sequenceLength;
130366 -                       }
130367 -               }
130368 -       }
130369 -       /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
130371 -       /* match within prefix */
130372 -       if (sequence.offset < 8) {
130373 -               /* close range match, overlap */
130374 -               static const U32 dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};   /* added */
130375 -               static const int dec64table[] = {8, 8, 8, 7, 8, 9, 10, 11}; /* subtracted */
130376 -               int const sub2 = dec64table[sequence.offset];
130377 -               op[0] = match[0];
130378 -               op[1] = match[1];
130379 -               op[2] = match[2];
130380 -               op[3] = match[3];
130381 -               match += dec32table[sequence.offset];
130382 -               ZSTD_copy4(op + 4, match);
130383 -               match -= sub2;
130384 -       } else {
130385 -               ZSTD_copy8(op, match);
130386 -       }
130387 -       op += 8;
130388 -       match += 8;
130390 -       if (oMatchEnd > oend - (16 - MINMATCH)) {
130391 -               if (op < oend_w) {
130392 -                       ZSTD_wildcopy(op, match, oend_w - op);
130393 -                       match += oend_w - op;
130394 -                       op = oend_w;
130395 -               }
130396 -               while (op < oMatchEnd)
130397 -                       *op++ = *match++;
130398 -       } else {
130399 -               ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8); /* works even if matchLength < 8 */
130400 -       }
130401 -       return sequenceLength;
130404 -static size_t ZSTD_decompressSequencesLong(ZSTD_DCtx *dctx, void *dst, size_t maxDstSize, const void *seqStart, size_t seqSize)
130406 -       const BYTE *ip = (const BYTE *)seqStart;
130407 -       const BYTE *const iend = ip + seqSize;
130408 -       BYTE *const ostart = (BYTE * const)dst;
130409 -       BYTE *const oend = ostart + maxDstSize;
130410 -       BYTE *op = ostart;
130411 -       const BYTE *litPtr = dctx->litPtr;
130412 -       const BYTE *const litEnd = litPtr + dctx->litSize;
130413 -       const BYTE *const base = (const BYTE *)(dctx->base);
130414 -       const BYTE *const vBase = (const BYTE *)(dctx->vBase);
130415 -       const BYTE *const dictEnd = (const BYTE *)(dctx->dictEnd);
130416 -       unsigned const windowSize = dctx->fParams.windowSize;
130417 -       int nbSeq;
130419 -       /* Build Decoding Tables */
130420 -       {
130421 -               size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize);
130422 -               if (ZSTD_isError(seqHSize))
130423 -                       return seqHSize;
130424 -               ip += seqHSize;
130425 -       }
130427 -       /* Regen sequences */
130428 -       if (nbSeq) {
130429 -#define STORED_SEQS 4
130430 -#define STOSEQ_MASK (STORED_SEQS - 1)
130431 -#define ADVANCED_SEQS 4
130432 -               seq_t *sequences = (seq_t *)dctx->entropy.workspace;
130433 -               int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS);
130434 -               seqState_t seqState;
130435 -               int seqNb;
130436 -               ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.workspace) >= sizeof(seq_t) * STORED_SEQS);
130437 -               dctx->fseEntropy = 1;
130438 -               {
130439 -                       U32 i;
130440 -                       for (i = 0; i < ZSTD_REP_NUM; i++)
130441 -                               seqState.prevOffset[i] = dctx->entropy.rep[i];
130442 -               }
130443 -               seqState.base = base;
130444 -               seqState.pos = (size_t)(op - base);
130445 -               seqState.gotoDict = (uPtrDiff)dictEnd - (uPtrDiff)base; /* cast to avoid undefined behaviour */
130446 -               CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend - ip), corruption_detected);
130447 -               FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
130448 -               FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
130449 -               FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
130451 -               /* prepare in advance */
130452 -               for (seqNb = 0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && seqNb < seqAdvance; seqNb++) {
130453 -                       sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState, windowSize);
130454 -               }
130455 -               if (seqNb < seqAdvance)
130456 -                       return ERROR(corruption_detected);
130458 -               /* decode and decompress */
130459 -               for (; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && seqNb < nbSeq; seqNb++) {
130460 -                       seq_t const sequence = ZSTD_decodeSequenceLong(&seqState, windowSize);
130461 -                       size_t const oneSeqSize =
130462 -                           ZSTD_execSequenceLong(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STOSEQ_MASK], &litPtr, litEnd, base, vBase, dictEnd);
130463 -                       if (ZSTD_isError(oneSeqSize))
130464 -                               return oneSeqSize;
130465 -                       ZSTD_PREFETCH(sequence.match);
130466 -                       sequences[seqNb & STOSEQ_MASK] = sequence;
130467 -                       op += oneSeqSize;
130468 -               }
130469 -               if (seqNb < nbSeq)
130470 -                       return ERROR(corruption_detected);
130472 -               /* finish queue */
130473 -               seqNb -= seqAdvance;
130474 -               for (; seqNb < nbSeq; seqNb++) {
130475 -                       size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[seqNb & STOSEQ_MASK], &litPtr, litEnd, base, vBase, dictEnd);
130476 -                       if (ZSTD_isError(oneSeqSize))
130477 -                               return oneSeqSize;
130478 -                       op += oneSeqSize;
130479 -               }
130481 -               /* save reps for next block */
130482 -               {
130483 -                       U32 i;
130484 -                       for (i = 0; i < ZSTD_REP_NUM; i++)
130485 -                               dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]);
130486 -               }
130487 -       }
130489 -       /* last literal segment */
130490 -       {
130491 -               size_t const lastLLSize = litEnd - litPtr;
130492 -               if (lastLLSize > (size_t)(oend - op))
130493 -                       return ERROR(dstSize_tooSmall);
130494 -               memcpy(op, litPtr, lastLLSize);
130495 -               op += lastLLSize;
130496 -       }
130498 -       return op - ostart;
130501 -static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
130502 -{ /* blockType == blockCompressed */
130503 -       const BYTE *ip = (const BYTE *)src;
130505 -       if (srcSize >= ZSTD_BLOCKSIZE_ABSOLUTEMAX)
130506 -               return ERROR(srcSize_wrong);
130508 -       /* Decode literals section */
130509 -       {
130510 -               size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
130511 -               if (ZSTD_isError(litCSize))
130512 -                       return litCSize;
130513 -               ip += litCSize;
130514 -               srcSize -= litCSize;
130515 -       }
130516 -       if (sizeof(size_t) > 4) /* do not enable prefetching on 32-bits x86, as it's performance detrimental */
130517 -                               /* likely because of register pressure */
130518 -                               /* if that's the correct cause, then 32-bits ARM should be affected differently */
130519 -                               /* it would be good to test this on ARM real hardware, to see if prefetch version improves speed */
130520 -               if (dctx->fParams.windowSize > (1 << 23))
130521 -                       return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize);
130522 -       return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize);
130525 -static void ZSTD_checkContinuity(ZSTD_DCtx *dctx, const void *dst)
130527 -       if (dst != dctx->previousDstEnd) { /* not contiguous */
130528 -               dctx->dictEnd = dctx->previousDstEnd;
130529 -               dctx->vBase = (const char *)dst - ((const char *)(dctx->previousDstEnd) - (const char *)(dctx->base));
130530 -               dctx->base = dst;
130531 -               dctx->previousDstEnd = dst;
130532 -       }
130535 -size_t ZSTD_decompressBlock(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
130537 -       size_t dSize;
130538 -       ZSTD_checkContinuity(dctx, dst);
130539 -       dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
130540 -       dctx->previousDstEnd = (char *)dst + dSize;
130541 -       return dSize;
130544 -/** ZSTD_insertBlock() :
130545 -       insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
130546 -size_t ZSTD_insertBlock(ZSTD_DCtx *dctx, const void *blockStart, size_t blockSize)
130548 -       ZSTD_checkContinuity(dctx, blockStart);
130549 -       dctx->previousDstEnd = (const char *)blockStart + blockSize;
130550 -       return blockSize;
130553 -size_t ZSTD_generateNxBytes(void *dst, size_t dstCapacity, BYTE byte, size_t length)
130555 -       if (length > dstCapacity)
130556 -               return ERROR(dstSize_tooSmall);
130557 -       memset(dst, byte, length);
130558 -       return length;
130561 -/** ZSTD_findFrameCompressedSize() :
130562 - *  compatible with legacy mode
130563 - *  `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame
130564 - *  `srcSize` must be at least as large as the frame contained
130565 - *  @return : the compressed size of the frame starting at `src` */
130566 -size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
130568 -       if (srcSize >= ZSTD_skippableHeaderSize && (ZSTD_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
130569 -               return ZSTD_skippableHeaderSize + ZSTD_readLE32((const BYTE *)src + 4);
130570 -       } else {
130571 -               const BYTE *ip = (const BYTE *)src;
130572 -               const BYTE *const ipstart = ip;
130573 -               size_t remainingSize = srcSize;
130574 -               ZSTD_frameParams fParams;
130576 -               size_t const headerSize = ZSTD_frameHeaderSize(ip, remainingSize);
130577 -               if (ZSTD_isError(headerSize))
130578 -                       return headerSize;
130580 -               /* Frame Header */
130581 -               {
130582 -                       size_t const ret = ZSTD_getFrameParams(&fParams, ip, remainingSize);
130583 -                       if (ZSTD_isError(ret))
130584 -                               return ret;
130585 -                       if (ret > 0)
130586 -                               return ERROR(srcSize_wrong);
130587 -               }
130589 -               ip += headerSize;
130590 -               remainingSize -= headerSize;
130592 -               /* Loop on each block */
130593 -               while (1) {
130594 -                       blockProperties_t blockProperties;
130595 -                       size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
130596 -                       if (ZSTD_isError(cBlockSize))
130597 -                               return cBlockSize;
130599 -                       if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)
130600 -                               return ERROR(srcSize_wrong);
130602 -                       ip += ZSTD_blockHeaderSize + cBlockSize;
130603 -                       remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
130605 -                       if (blockProperties.lastBlock)
130606 -                               break;
130607 -               }
130609 -               if (fParams.checksumFlag) { /* Frame content checksum */
130610 -                       if (remainingSize < 4)
130611 -                               return ERROR(srcSize_wrong);
130612 -                       ip += 4;
130613 -                       remainingSize -= 4;
130614 -               }
130616 -               return ip - ipstart;
130617 -       }
130620 -/*! ZSTD_decompressFrame() :
130621 -*   @dctx must be properly initialized */
130622 -static size_t ZSTD_decompressFrame(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void **srcPtr, size_t *srcSizePtr)
130624 -       const BYTE *ip = (const BYTE *)(*srcPtr);
130625 -       BYTE *const ostart = (BYTE * const)dst;
130626 -       BYTE *const oend = ostart + dstCapacity;
130627 -       BYTE *op = ostart;
130628 -       size_t remainingSize = *srcSizePtr;
130630 -       /* check */
130631 -       if (remainingSize < ZSTD_frameHeaderSize_min + ZSTD_blockHeaderSize)
130632 -               return ERROR(srcSize_wrong);
130634 -       /* Frame Header */
130635 -       {
130636 -               size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_frameHeaderSize_prefix);
130637 -               if (ZSTD_isError(frameHeaderSize))
130638 -                       return frameHeaderSize;
130639 -               if (remainingSize < frameHeaderSize + ZSTD_blockHeaderSize)
130640 -                       return ERROR(srcSize_wrong);
130641 -               CHECK_F(ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize));
130642 -               ip += frameHeaderSize;
130643 -               remainingSize -= frameHeaderSize;
130644 -       }
130646 -       /* Loop on each block */
130647 -       while (1) {
130648 -               size_t decodedSize;
130649 -               blockProperties_t blockProperties;
130650 -               size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
130651 -               if (ZSTD_isError(cBlockSize))
130652 -                       return cBlockSize;
130654 -               ip += ZSTD_blockHeaderSize;
130655 -               remainingSize -= ZSTD_blockHeaderSize;
130656 -               if (cBlockSize > remainingSize)
130657 -                       return ERROR(srcSize_wrong);
130659 -               switch (blockProperties.blockType) {
130660 -               case bt_compressed: decodedSize = ZSTD_decompressBlock_internal(dctx, op, oend - op, ip, cBlockSize); break;
130661 -               case bt_raw: decodedSize = ZSTD_copyRawBlock(op, oend - op, ip, cBlockSize); break;
130662 -               case bt_rle: decodedSize = ZSTD_generateNxBytes(op, oend - op, *ip, blockProperties.origSize); break;
130663 -               case bt_reserved:
130664 -               default: return ERROR(corruption_detected);
130665 -               }
130667 -               if (ZSTD_isError(decodedSize))
130668 -                       return decodedSize;
130669 -               if (dctx->fParams.checksumFlag)
130670 -                       xxh64_update(&dctx->xxhState, op, decodedSize);
130671 -               op += decodedSize;
130672 -               ip += cBlockSize;
130673 -               remainingSize -= cBlockSize;
130674 -               if (blockProperties.lastBlock)
130675 -                       break;
130676 -       }
130678 -       if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
130679 -               U32 const checkCalc = (U32)xxh64_digest(&dctx->xxhState);
130680 -               U32 checkRead;
130681 -               if (remainingSize < 4)
130682 -                       return ERROR(checksum_wrong);
130683 -               checkRead = ZSTD_readLE32(ip);
130684 -               if (checkRead != checkCalc)
130685 -                       return ERROR(checksum_wrong);
130686 -               ip += 4;
130687 -               remainingSize -= 4;
130688 -       }
130690 -       /* Allow caller to get size read */
130691 -       *srcPtr = ip;
130692 -       *srcSizePtr = remainingSize;
130693 -       return op - ostart;
130696 -static const void *ZSTD_DDictDictContent(const ZSTD_DDict *ddict);
130697 -static size_t ZSTD_DDictDictSize(const ZSTD_DDict *ddict);
130699 -static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize,
130700 -                                       const ZSTD_DDict *ddict)
130702 -       void *const dststart = dst;
130704 -       if (ddict) {
130705 -               if (dict) {
130706 -                       /* programmer error, these two cases should be mutually exclusive */
130707 -                       return ERROR(GENERIC);
130708 -               }
130710 -               dict = ZSTD_DDictDictContent(ddict);
130711 -               dictSize = ZSTD_DDictDictSize(ddict);
130712 -       }
130714 -       while (srcSize >= ZSTD_frameHeaderSize_prefix) {
130715 -               U32 magicNumber;
130717 -               magicNumber = ZSTD_readLE32(src);
130718 -               if (magicNumber != ZSTD_MAGICNUMBER) {
130719 -                       if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
130720 -                               size_t skippableSize;
130721 -                               if (srcSize < ZSTD_skippableHeaderSize)
130722 -                                       return ERROR(srcSize_wrong);
130723 -                               skippableSize = ZSTD_readLE32((const BYTE *)src + 4) + ZSTD_skippableHeaderSize;
130724 -                               if (srcSize < skippableSize) {
130725 -                                       return ERROR(srcSize_wrong);
130726 -                               }
130728 -                               src = (const BYTE *)src + skippableSize;
130729 -                               srcSize -= skippableSize;
130730 -                               continue;
130731 -                       } else {
130732 -                               return ERROR(prefix_unknown);
130733 -                       }
130734 -               }
130736 -               if (ddict) {
130737 -                       /* we were called from ZSTD_decompress_usingDDict */
130738 -                       ZSTD_refDDict(dctx, ddict);
130739 -               } else {
130740 -                       /* this will initialize correctly with no dict if dict == NULL, so
130741 -                        * use this in all cases but ddict */
130742 -                       CHECK_F(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize));
130743 -               }
130744 -               ZSTD_checkContinuity(dctx, dst);
130746 -               {
130747 -                       const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity, &src, &srcSize);
130748 -                       if (ZSTD_isError(res))
130749 -                               return res;
130750 -                       /* don't need to bounds check this, ZSTD_decompressFrame will have
130751 -                        * already */
130752 -                       dst = (BYTE *)dst + res;
130753 -                       dstCapacity -= res;
130754 -               }
130755 -       }
130757 -       if (srcSize)
130758 -               return ERROR(srcSize_wrong); /* input not entirely consumed */
130760 -       return (BYTE *)dst - (BYTE *)dststart;
130763 -size_t ZSTD_decompress_usingDict(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize)
130765 -       return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL);
130768 -size_t ZSTD_decompressDCtx(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
130770 -       return ZSTD_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0);
130773 -/*-**************************************
130774 -*   Advanced Streaming Decompression API
130775 -*   Bufferless and synchronous
130776 -****************************************/
130777 -size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx *dctx) { return dctx->expected; }
130779 -ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx *dctx)
130781 -       switch (dctx->stage) {
130782 -       default: /* should not happen */
130783 -       case ZSTDds_getFrameHeaderSize:
130784 -       case ZSTDds_decodeFrameHeader: return ZSTDnit_frameHeader;
130785 -       case ZSTDds_decodeBlockHeader: return ZSTDnit_blockHeader;
130786 -       case ZSTDds_decompressBlock: return ZSTDnit_block;
130787 -       case ZSTDds_decompressLastBlock: return ZSTDnit_lastBlock;
130788 -       case ZSTDds_checkChecksum: return ZSTDnit_checksum;
130789 -       case ZSTDds_decodeSkippableHeader:
130790 -       case ZSTDds_skipFrame: return ZSTDnit_skippableFrame;
130791 -       }
130794 -int ZSTD_isSkipFrame(ZSTD_DCtx *dctx) { return dctx->stage == ZSTDds_skipFrame; } /* for zbuff */
130796 -/** ZSTD_decompressContinue() :
130797 -*   @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity)
130798 -*             or an error code, which can be tested using ZSTD_isError() */
130799 -size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
130801 -       /* Sanity check */
130802 -       if (srcSize != dctx->expected)
130803 -               return ERROR(srcSize_wrong);
130804 -       if (dstCapacity)
130805 -               ZSTD_checkContinuity(dctx, dst);
130807 -       switch (dctx->stage) {
130808 -       case ZSTDds_getFrameHeaderSize:
130809 -               if (srcSize != ZSTD_frameHeaderSize_prefix)
130810 -                       return ERROR(srcSize_wrong);                                    /* impossible */
130811 -               if ((ZSTD_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
130812 -                       memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix);
130813 -                       dctx->expected = ZSTD_skippableHeaderSize - ZSTD_frameHeaderSize_prefix; /* magic number + skippable frame length */
130814 -                       dctx->stage = ZSTDds_decodeSkippableHeader;
130815 -                       return 0;
130816 -               }
130817 -               dctx->headerSize = ZSTD_frameHeaderSize(src, ZSTD_frameHeaderSize_prefix);
130818 -               if (ZSTD_isError(dctx->headerSize))
130819 -                       return dctx->headerSize;
130820 -               memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix);
130821 -               if (dctx->headerSize > ZSTD_frameHeaderSize_prefix) {
130822 -                       dctx->expected = dctx->headerSize - ZSTD_frameHeaderSize_prefix;
130823 -                       dctx->stage = ZSTDds_decodeFrameHeader;
130824 -                       return 0;
130825 -               }
130826 -               dctx->expected = 0; /* not necessary to copy more */
130827 -               fallthrough;
130829 -       case ZSTDds_decodeFrameHeader:
130830 -               memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected);
130831 -               CHECK_F(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize));
130832 -               dctx->expected = ZSTD_blockHeaderSize;
130833 -               dctx->stage = ZSTDds_decodeBlockHeader;
130834 -               return 0;
130836 -       case ZSTDds_decodeBlockHeader: {
130837 -               blockProperties_t bp;
130838 -               size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
130839 -               if (ZSTD_isError(cBlockSize))
130840 -                       return cBlockSize;
130841 -               dctx->expected = cBlockSize;
130842 -               dctx->bType = bp.blockType;
130843 -               dctx->rleSize = bp.origSize;
130844 -               if (cBlockSize) {
130845 -                       dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock;
130846 -                       return 0;
130847 -               }
130848 -               /* empty block */
130849 -               if (bp.lastBlock) {
130850 -                       if (dctx->fParams.checksumFlag) {
130851 -                               dctx->expected = 4;
130852 -                               dctx->stage = ZSTDds_checkChecksum;
130853 -                       } else {
130854 -                               dctx->expected = 0; /* end of frame */
130855 -                               dctx->stage = ZSTDds_getFrameHeaderSize;
130856 -                       }
130857 -               } else {
130858 -                       dctx->expected = 3; /* go directly to next header */
130859 -                       dctx->stage = ZSTDds_decodeBlockHeader;
130860 -               }
130861 -               return 0;
130862 -       }
130863 -       case ZSTDds_decompressLastBlock:
130864 -       case ZSTDds_decompressBlock: {
130865 -               size_t rSize;
130866 -               switch (dctx->bType) {
130867 -               case bt_compressed: rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize); break;
130868 -               case bt_raw: rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize); break;
130869 -               case bt_rle: rSize = ZSTD_setRleBlock(dst, dstCapacity, src, srcSize, dctx->rleSize); break;
130870 -               case bt_reserved: /* should never happen */
130871 -               default: return ERROR(corruption_detected);
130872 -               }
130873 -               if (ZSTD_isError(rSize))
130874 -                       return rSize;
130875 -               if (dctx->fParams.checksumFlag)
130876 -                       xxh64_update(&dctx->xxhState, dst, rSize);
130878 -               if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */
130879 -                       if (dctx->fParams.checksumFlag) {       /* another round for frame checksum */
130880 -                               dctx->expected = 4;
130881 -                               dctx->stage = ZSTDds_checkChecksum;
130882 -                       } else {
130883 -                               dctx->expected = 0; /* ends here */
130884 -                               dctx->stage = ZSTDds_getFrameHeaderSize;
130885 -                       }
130886 -               } else {
130887 -                       dctx->stage = ZSTDds_decodeBlockHeader;
130888 -                       dctx->expected = ZSTD_blockHeaderSize;
130889 -                       dctx->previousDstEnd = (char *)dst + rSize;
130890 -               }
130891 -               return rSize;
130892 -       }
130893 -       case ZSTDds_checkChecksum: {
130894 -               U32 const h32 = (U32)xxh64_digest(&dctx->xxhState);
130895 -               U32 const check32 = ZSTD_readLE32(src); /* srcSize == 4, guaranteed by dctx->expected */
130896 -               if (check32 != h32)
130897 -                       return ERROR(checksum_wrong);
130898 -               dctx->expected = 0;
130899 -               dctx->stage = ZSTDds_getFrameHeaderSize;
130900 -               return 0;
130901 -       }
130902 -       case ZSTDds_decodeSkippableHeader: {
130903 -               memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected);
130904 -               dctx->expected = ZSTD_readLE32(dctx->headerBuffer + 4);
130905 -               dctx->stage = ZSTDds_skipFrame;
130906 -               return 0;
130907 -       }
130908 -       case ZSTDds_skipFrame: {
130909 -               dctx->expected = 0;
130910 -               dctx->stage = ZSTDds_getFrameHeaderSize;
130911 -               return 0;
130912 -       }
130913 -       default:
130914 -               return ERROR(GENERIC); /* impossible */
130915 -       }
130918 -static size_t ZSTD_refDictContent(ZSTD_DCtx *dctx, const void *dict, size_t dictSize)
130920 -       dctx->dictEnd = dctx->previousDstEnd;
130921 -       dctx->vBase = (const char *)dict - ((const char *)(dctx->previousDstEnd) - (const char *)(dctx->base));
130922 -       dctx->base = dict;
130923 -       dctx->previousDstEnd = (const char *)dict + dictSize;
130924 -       return 0;
130927 -/* ZSTD_loadEntropy() :
130928 - * dict : must point at beginning of a valid zstd dictionary
130929 - * @return : size of entropy tables read */
130930 -static size_t ZSTD_loadEntropy(ZSTD_entropyTables_t *entropy, const void *const dict, size_t const dictSize)
130932 -       const BYTE *dictPtr = (const BYTE *)dict;
130933 -       const BYTE *const dictEnd = dictPtr + dictSize;
130935 -       if (dictSize <= 8)
130936 -               return ERROR(dictionary_corrupted);
130937 -       dictPtr += 8; /* skip header = magic + dictID */
130939 -       {
130940 -               size_t const hSize = HUF_readDTableX4_wksp(entropy->hufTable, dictPtr, dictEnd - dictPtr, entropy->workspace, sizeof(entropy->workspace));
130941 -               if (HUF_isError(hSize))
130942 -                       return ERROR(dictionary_corrupted);
130943 -               dictPtr += hSize;
130944 -       }
130946 -       {
130947 -               short offcodeNCount[MaxOff + 1];
130948 -               U32 offcodeMaxValue = MaxOff, offcodeLog;
130949 -               size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd - dictPtr);
130950 -               if (FSE_isError(offcodeHeaderSize))
130951 -                       return ERROR(dictionary_corrupted);
130952 -               if (offcodeLog > OffFSELog)
130953 -                       return ERROR(dictionary_corrupted);
130954 -               CHECK_E(FSE_buildDTable_wksp(entropy->OFTable, offcodeNCount, offcodeMaxValue, offcodeLog, entropy->workspace, sizeof(entropy->workspace)), dictionary_corrupted);
130955 -               dictPtr += offcodeHeaderSize;
130956 -       }
130958 -       {
130959 -               short matchlengthNCount[MaxML + 1];
130960 -               unsigned matchlengthMaxValue = MaxML, matchlengthLog;
130961 -               size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd - dictPtr);
130962 -               if (FSE_isError(matchlengthHeaderSize))
130963 -                       return ERROR(dictionary_corrupted);
130964 -               if (matchlengthLog > MLFSELog)
130965 -                       return ERROR(dictionary_corrupted);
130966 -               CHECK_E(FSE_buildDTable_wksp(entropy->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, entropy->workspace, sizeof(entropy->workspace)), dictionary_corrupted);
130967 -               dictPtr += matchlengthHeaderSize;
130968 -       }
130970 -       {
130971 -               short litlengthNCount[MaxLL + 1];
130972 -               unsigned litlengthMaxValue = MaxLL, litlengthLog;
130973 -               size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd - dictPtr);
130974 -               if (FSE_isError(litlengthHeaderSize))
130975 -                       return ERROR(dictionary_corrupted);
130976 -               if (litlengthLog > LLFSELog)
130977 -                       return ERROR(dictionary_corrupted);
130978 -               CHECK_E(FSE_buildDTable_wksp(entropy->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog, entropy->workspace, sizeof(entropy->workspace)), dictionary_corrupted);
130979 -               dictPtr += litlengthHeaderSize;
130980 -       }
130982 -       if (dictPtr + 12 > dictEnd)
130983 -               return ERROR(dictionary_corrupted);
130984 -       {
130985 -               int i;
130986 -               size_t const dictContentSize = (size_t)(dictEnd - (dictPtr + 12));
130987 -               for (i = 0; i < 3; i++) {
130988 -                       U32 const rep = ZSTD_readLE32(dictPtr);
130989 -                       dictPtr += 4;
130990 -                       if (rep == 0 || rep >= dictContentSize)
130991 -                               return ERROR(dictionary_corrupted);
130992 -                       entropy->rep[i] = rep;
130993 -               }
130994 -       }
130996 -       return dictPtr - (const BYTE *)dict;
130999 -static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx *dctx, const void *dict, size_t dictSize)
131001 -       if (dictSize < 8)
131002 -               return ZSTD_refDictContent(dctx, dict, dictSize);
131003 -       {
131004 -               U32 const magic = ZSTD_readLE32(dict);
131005 -               if (magic != ZSTD_DICT_MAGIC) {
131006 -                       return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */
131007 -               }
131008 -       }
131009 -       dctx->dictID = ZSTD_readLE32((const char *)dict + 4);
131011 -       /* load entropy tables */
131012 -       {
131013 -               size_t const eSize = ZSTD_loadEntropy(&dctx->entropy, dict, dictSize);
131014 -               if (ZSTD_isError(eSize))
131015 -                       return ERROR(dictionary_corrupted);
131016 -               dict = (const char *)dict + eSize;
131017 -               dictSize -= eSize;
131018 -       }
131019 -       dctx->litEntropy = dctx->fseEntropy = 1;
131021 -       /* reference dictionary content */
131022 -       return ZSTD_refDictContent(dctx, dict, dictSize);
131025 -size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx *dctx, const void *dict, size_t dictSize)
131027 -       CHECK_F(ZSTD_decompressBegin(dctx));
131028 -       if (dict && dictSize)
131029 -               CHECK_E(ZSTD_decompress_insertDictionary(dctx, dict, dictSize), dictionary_corrupted);
131030 -       return 0;
131033 -/* ======   ZSTD_DDict   ====== */
131035 -struct ZSTD_DDict_s {
131036 -       void *dictBuffer;
131037 -       const void *dictContent;
131038 -       size_t dictSize;
131039 -       ZSTD_entropyTables_t entropy;
131040 -       U32 dictID;
131041 -       U32 entropyPresent;
131042 -       ZSTD_customMem cMem;
131043 -}; /* typedef'd to ZSTD_DDict within "zstd.h" */
131045 -size_t ZSTD_DDictWorkspaceBound(void) { return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_DDict)); }
131047 -static const void *ZSTD_DDictDictContent(const ZSTD_DDict *ddict) { return ddict->dictContent; }
131049 -static size_t ZSTD_DDictDictSize(const ZSTD_DDict *ddict) { return ddict->dictSize; }
131051 -static void ZSTD_refDDict(ZSTD_DCtx *dstDCtx, const ZSTD_DDict *ddict)
131053 -       ZSTD_decompressBegin(dstDCtx); /* init */
131054 -       if (ddict) {                   /* support refDDict on NULL */
131055 -               dstDCtx->dictID = ddict->dictID;
131056 -               dstDCtx->base = ddict->dictContent;
131057 -               dstDCtx->vBase = ddict->dictContent;
131058 -               dstDCtx->dictEnd = (const BYTE *)ddict->dictContent + ddict->dictSize;
131059 -               dstDCtx->previousDstEnd = dstDCtx->dictEnd;
131060 -               if (ddict->entropyPresent) {
131061 -                       dstDCtx->litEntropy = 1;
131062 -                       dstDCtx->fseEntropy = 1;
131063 -                       dstDCtx->LLTptr = ddict->entropy.LLTable;
131064 -                       dstDCtx->MLTptr = ddict->entropy.MLTable;
131065 -                       dstDCtx->OFTptr = ddict->entropy.OFTable;
131066 -                       dstDCtx->HUFptr = ddict->entropy.hufTable;
131067 -                       dstDCtx->entropy.rep[0] = ddict->entropy.rep[0];
131068 -                       dstDCtx->entropy.rep[1] = ddict->entropy.rep[1];
131069 -                       dstDCtx->entropy.rep[2] = ddict->entropy.rep[2];
131070 -               } else {
131071 -                       dstDCtx->litEntropy = 0;
131072 -                       dstDCtx->fseEntropy = 0;
131073 -               }
131074 -       }
131077 -static size_t ZSTD_loadEntropy_inDDict(ZSTD_DDict *ddict)
131079 -       ddict->dictID = 0;
131080 -       ddict->entropyPresent = 0;
131081 -       if (ddict->dictSize < 8)
131082 -               return 0;
131083 -       {
131084 -               U32 const magic = ZSTD_readLE32(ddict->dictContent);
131085 -               if (magic != ZSTD_DICT_MAGIC)
131086 -                       return 0; /* pure content mode */
131087 -       }
131088 -       ddict->dictID = ZSTD_readLE32((const char *)ddict->dictContent + 4);
131090 -       /* load entropy tables */
131091 -       CHECK_E(ZSTD_loadEntropy(&ddict->entropy, ddict->dictContent, ddict->dictSize), dictionary_corrupted);
131092 -       ddict->entropyPresent = 1;
131093 -       return 0;
131096 -static ZSTD_DDict *ZSTD_createDDict_advanced(const void *dict, size_t dictSize, unsigned byReference, ZSTD_customMem customMem)
131098 -       if (!customMem.customAlloc || !customMem.customFree)
131099 -               return NULL;
131101 -       {
131102 -               ZSTD_DDict *const ddict = (ZSTD_DDict *)ZSTD_malloc(sizeof(ZSTD_DDict), customMem);
131103 -               if (!ddict)
131104 -                       return NULL;
131105 -               ddict->cMem = customMem;
131107 -               if ((byReference) || (!dict) || (!dictSize)) {
131108 -                       ddict->dictBuffer = NULL;
131109 -                       ddict->dictContent = dict;
131110 -               } else {
131111 -                       void *const internalBuffer = ZSTD_malloc(dictSize, customMem);
131112 -                       if (!internalBuffer) {
131113 -                               ZSTD_freeDDict(ddict);
131114 -                               return NULL;
131115 -                       }
131116 -                       memcpy(internalBuffer, dict, dictSize);
131117 -                       ddict->dictBuffer = internalBuffer;
131118 -                       ddict->dictContent = internalBuffer;
131119 -               }
131120 -               ddict->dictSize = dictSize;
131121 -               ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
131122 -               /* parse dictionary content */
131123 -               {
131124 -                       size_t const errorCode = ZSTD_loadEntropy_inDDict(ddict);
131125 -                       if (ZSTD_isError(errorCode)) {
131126 -                               ZSTD_freeDDict(ddict);
131127 -                               return NULL;
131128 -                       }
131129 -               }
131131 -               return ddict;
131132 -       }
131135 -/*! ZSTD_initDDict() :
131136 -*   Create a digested dictionary, to start decompression without startup delay.
131137 -*   `dict` content is copied inside DDict.
131138 -*   Consequently, `dict` can be released after `ZSTD_DDict` creation */
131139 -ZSTD_DDict *ZSTD_initDDict(const void *dict, size_t dictSize, void *workspace, size_t workspaceSize)
131141 -       ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
131142 -       return ZSTD_createDDict_advanced(dict, dictSize, 1, stackMem);
131145 -size_t ZSTD_freeDDict(ZSTD_DDict *ddict)
131147 -       if (ddict == NULL)
131148 -               return 0; /* support free on NULL */
131149 -       {
131150 -               ZSTD_customMem const cMem = ddict->cMem;
131151 -               ZSTD_free(ddict->dictBuffer, cMem);
131152 -               ZSTD_free(ddict, cMem);
131153 -               return 0;
131154 -       }
131157 -/*! ZSTD_getDictID_fromDict() :
131158 - *  Provides the dictID stored within dictionary.
131159 - *  if @return == 0, the dictionary is not conformant with Zstandard specification.
131160 - *  It can still be loaded, but as a content-only dictionary. */
131161 -unsigned ZSTD_getDictID_fromDict(const void *dict, size_t dictSize)
131163 -       if (dictSize < 8)
131164 -               return 0;
131165 -       if (ZSTD_readLE32(dict) != ZSTD_DICT_MAGIC)
131166 -               return 0;
131167 -       return ZSTD_readLE32((const char *)dict + 4);
131170 -/*! ZSTD_getDictID_fromDDict() :
131171 - *  Provides the dictID of the dictionary loaded into `ddict`.
131172 - *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
131173 - *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
131174 -unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict *ddict)
131176 -       if (ddict == NULL)
131177 -               return 0;
131178 -       return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize);
131181 -/*! ZSTD_getDictID_fromFrame() :
131182 - *  Provides the dictID required to decompressed the frame stored within `src`.
131183 - *  If @return == 0, the dictID could not be decoded.
131184 - *  This could for one of the following reasons :
131185 - *  - The frame does not require a dictionary to be decoded (most common case).
131186 - *  - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
131187 - *    Note : this use case also happens when using a non-conformant dictionary.
131188 - *  - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
131189 - *  - This is not a Zstandard frame.
131190 - *  When identifying the exact failure cause, it's possible to used ZSTD_getFrameParams(), which will provide a more precise error code. */
131191 -unsigned ZSTD_getDictID_fromFrame(const void *src, size_t srcSize)
131193 -       ZSTD_frameParams zfp = {0, 0, 0, 0};
131194 -       size_t const hError = ZSTD_getFrameParams(&zfp, src, srcSize);
131195 -       if (ZSTD_isError(hError))
131196 -               return 0;
131197 -       return zfp.dictID;
131200 -/*! ZSTD_decompress_usingDDict() :
131201 -*   Decompression using a pre-digested Dictionary
131202 -*   Use dictionary without significant overhead. */
131203 -size_t ZSTD_decompress_usingDDict(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const ZSTD_DDict *ddict)
131205 -       /* pass content and size in case legacy frames are encountered */
131206 -       return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, NULL, 0, ddict);
131209 -/*=====================================
131210 -*   Streaming decompression
131211 -*====================================*/
131213 -typedef enum { zdss_init, zdss_loadHeader, zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage;
131215 -/* *** Resource management *** */
131216 -struct ZSTD_DStream_s {
131217 -       ZSTD_DCtx *dctx;
131218 -       ZSTD_DDict *ddictLocal;
131219 -       const ZSTD_DDict *ddict;
131220 -       ZSTD_frameParams fParams;
131221 -       ZSTD_dStreamStage stage;
131222 -       char *inBuff;
131223 -       size_t inBuffSize;
131224 -       size_t inPos;
131225 -       size_t maxWindowSize;
131226 -       char *outBuff;
131227 -       size_t outBuffSize;
131228 -       size_t outStart;
131229 -       size_t outEnd;
131230 -       size_t blockSize;
131231 -       BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX]; /* tmp buffer to store frame header */
131232 -       size_t lhSize;
131233 -       ZSTD_customMem customMem;
131234 -       void *legacyContext;
131235 -       U32 previousLegacyVersion;
131236 -       U32 legacyVersion;
131237 -       U32 hostageByte;
131238 -}; /* typedef'd to ZSTD_DStream within "zstd.h" */
131240 -size_t ZSTD_DStreamWorkspaceBound(size_t maxWindowSize)
131242 -       size_t const blockSize = MIN(maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX);
131243 -       size_t const inBuffSize = blockSize;
131244 -       size_t const outBuffSize = maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
131245 -       return ZSTD_DCtxWorkspaceBound() + ZSTD_ALIGN(sizeof(ZSTD_DStream)) + ZSTD_ALIGN(inBuffSize) + ZSTD_ALIGN(outBuffSize);
131248 -static ZSTD_DStream *ZSTD_createDStream_advanced(ZSTD_customMem customMem)
131250 -       ZSTD_DStream *zds;
131252 -       if (!customMem.customAlloc || !customMem.customFree)
131253 -               return NULL;
131255 -       zds = (ZSTD_DStream *)ZSTD_malloc(sizeof(ZSTD_DStream), customMem);
131256 -       if (zds == NULL)
131257 -               return NULL;
131258 -       memset(zds, 0, sizeof(ZSTD_DStream));
131259 -       memcpy(&zds->customMem, &customMem, sizeof(ZSTD_customMem));
131260 -       zds->dctx = ZSTD_createDCtx_advanced(customMem);
131261 -       if (zds->dctx == NULL) {
131262 -               ZSTD_freeDStream(zds);
131263 -               return NULL;
131264 -       }
131265 -       zds->stage = zdss_init;
131266 -       zds->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
131267 -       return zds;
131270 -ZSTD_DStream *ZSTD_initDStream(size_t maxWindowSize, void *workspace, size_t workspaceSize)
131272 -       ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
131273 -       ZSTD_DStream *zds = ZSTD_createDStream_advanced(stackMem);
131274 -       if (!zds) {
131275 -               return NULL;
131276 -       }
131278 -       zds->maxWindowSize = maxWindowSize;
131279 -       zds->stage = zdss_loadHeader;
131280 -       zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
131281 -       ZSTD_freeDDict(zds->ddictLocal);
131282 -       zds->ddictLocal = NULL;
131283 -       zds->ddict = zds->ddictLocal;
131284 -       zds->legacyVersion = 0;
131285 -       zds->hostageByte = 0;
131287 -       {
131288 -               size_t const blockSize = MIN(zds->maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX);
131289 -               size_t const neededOutSize = zds->maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
131291 -               zds->inBuff = (char *)ZSTD_malloc(blockSize, zds->customMem);
131292 -               zds->inBuffSize = blockSize;
131293 -               zds->outBuff = (char *)ZSTD_malloc(neededOutSize, zds->customMem);
131294 -               zds->outBuffSize = neededOutSize;
131295 -               if (zds->inBuff == NULL || zds->outBuff == NULL) {
131296 -                       ZSTD_freeDStream(zds);
131297 -                       return NULL;
131298 -               }
131299 -       }
131300 -       return zds;
131303 -ZSTD_DStream *ZSTD_initDStream_usingDDict(size_t maxWindowSize, const ZSTD_DDict *ddict, void *workspace, size_t workspaceSize)
131305 -       ZSTD_DStream *zds = ZSTD_initDStream(maxWindowSize, workspace, workspaceSize);
131306 -       if (zds) {
131307 -               zds->ddict = ddict;
131308 -       }
131309 -       return zds;
131312 -size_t ZSTD_freeDStream(ZSTD_DStream *zds)
131314 -       if (zds == NULL)
131315 -               return 0; /* support free on null */
131316 -       {
131317 -               ZSTD_customMem const cMem = zds->customMem;
131318 -               ZSTD_freeDCtx(zds->dctx);
131319 -               zds->dctx = NULL;
131320 -               ZSTD_freeDDict(zds->ddictLocal);
131321 -               zds->ddictLocal = NULL;
131322 -               ZSTD_free(zds->inBuff, cMem);
131323 -               zds->inBuff = NULL;
131324 -               ZSTD_free(zds->outBuff, cMem);
131325 -               zds->outBuff = NULL;
131326 -               ZSTD_free(zds, cMem);
131327 -               return 0;
131328 -       }
131331 -/* *** Initialization *** */
131333 -size_t ZSTD_DStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX + ZSTD_blockHeaderSize; }
131334 -size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; }
131336 -size_t ZSTD_resetDStream(ZSTD_DStream *zds)
131338 -       zds->stage = zdss_loadHeader;
131339 -       zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
131340 -       zds->legacyVersion = 0;
131341 -       zds->hostageByte = 0;
131342 -       return ZSTD_frameHeaderSize_prefix;
131345 -/* *****   Decompression   ***** */
131347 -ZSTD_STATIC size_t ZSTD_limitCopy(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
131349 -       size_t const length = MIN(dstCapacity, srcSize);
131350 -       memcpy(dst, src, length);
131351 -       return length;
131354 -size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, ZSTD_inBuffer *input)
131356 -       const char *const istart = (const char *)(input->src) + input->pos;
131357 -       const char *const iend = (const char *)(input->src) + input->size;
131358 -       const char *ip = istart;
131359 -       char *const ostart = (char *)(output->dst) + output->pos;
131360 -       char *const oend = (char *)(output->dst) + output->size;
131361 -       char *op = ostart;
131362 -       U32 someMoreWork = 1;
131364 -       while (someMoreWork) {
131365 -               switch (zds->stage) {
131366 -               case zdss_init:
131367 -                       ZSTD_resetDStream(zds); /* transparent reset on starting decoding a new frame */
131368 -                       fallthrough;
131370 -               case zdss_loadHeader: {
131371 -                       size_t const hSize = ZSTD_getFrameParams(&zds->fParams, zds->headerBuffer, zds->lhSize);
131372 -                       if (ZSTD_isError(hSize))
131373 -                               return hSize;
131374 -                       if (hSize != 0) {                                  /* need more input */
131375 -                               size_t const toLoad = hSize - zds->lhSize; /* if hSize!=0, hSize > zds->lhSize */
131376 -                               if (toLoad > (size_t)(iend - ip)) {     /* not enough input to load full header */
131377 -                                       memcpy(zds->headerBuffer + zds->lhSize, ip, iend - ip);
131378 -                                       zds->lhSize += iend - ip;
131379 -                                       input->pos = input->size;
131380 -                                       return (MAX(ZSTD_frameHeaderSize_min, hSize) - zds->lhSize) +
131381 -                                              ZSTD_blockHeaderSize; /* remaining header bytes + next block header */
131382 -                               }
131383 -                               memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad);
131384 -                               zds->lhSize = hSize;
131385 -                               ip += toLoad;
131386 -                               break;
131387 -                       }
131389 -                       /* check for single-pass mode opportunity */
131390 -                       if (zds->fParams.frameContentSize && zds->fParams.windowSize /* skippable frame if == 0 */
131391 -                           && (U64)(size_t)(oend - op) >= zds->fParams.frameContentSize) {
131392 -                               size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend - istart);
131393 -                               if (cSize <= (size_t)(iend - istart)) {
131394 -                                       size_t const decompressedSize = ZSTD_decompress_usingDDict(zds->dctx, op, oend - op, istart, cSize, zds->ddict);
131395 -                                       if (ZSTD_isError(decompressedSize))
131396 -                                               return decompressedSize;
131397 -                                       ip = istart + cSize;
131398 -                                       op += decompressedSize;
131399 -                                       zds->dctx->expected = 0;
131400 -                                       zds->stage = zdss_init;
131401 -                                       someMoreWork = 0;
131402 -                                       break;
131403 -                               }
131404 -                       }
131406 -                       /* Consume header */
131407 -                       ZSTD_refDDict(zds->dctx, zds->ddict);
131408 -                       {
131409 -                               size_t const h1Size = ZSTD_nextSrcSizeToDecompress(zds->dctx); /* == ZSTD_frameHeaderSize_prefix */
131410 -                               CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer, h1Size));
131411 -                               {
131412 -                                       size_t const h2Size = ZSTD_nextSrcSizeToDecompress(zds->dctx);
131413 -                                       CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer + h1Size, h2Size));
131414 -                               }
131415 -                       }
131417 -                       zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
131418 -                       if (zds->fParams.windowSize > zds->maxWindowSize)
131419 -                               return ERROR(frameParameter_windowTooLarge);
131421 -                       /* Buffers are preallocated, but double check */
131422 -                       {
131423 -                               size_t const blockSize = MIN(zds->maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX);
131424 -                               size_t const neededOutSize = zds->maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
131425 -                               if (zds->inBuffSize < blockSize) {
131426 -                                       return ERROR(GENERIC);
131427 -                               }
131428 -                               if (zds->outBuffSize < neededOutSize) {
131429 -                                       return ERROR(GENERIC);
131430 -                               }
131431 -                               zds->blockSize = blockSize;
131432 -                       }
131433 -                       zds->stage = zdss_read;
131434 -               }
131435 -                       fallthrough;
131437 -               case zdss_read: {
131438 -                       size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx);
131439 -                       if (neededInSize == 0) { /* end of frame */
131440 -                               zds->stage = zdss_init;
131441 -                               someMoreWork = 0;
131442 -                               break;
131443 -                       }
131444 -                       if ((size_t)(iend - ip) >= neededInSize) { /* decode directly from src */
131445 -                               const int isSkipFrame = ZSTD_isSkipFrame(zds->dctx);
131446 -                               size_t const decodedSize = ZSTD_decompressContinue(zds->dctx, zds->outBuff + zds->outStart,
131447 -                                                                                  (isSkipFrame ? 0 : zds->outBuffSize - zds->outStart), ip, neededInSize);
131448 -                               if (ZSTD_isError(decodedSize))
131449 -                                       return decodedSize;
131450 -                               ip += neededInSize;
131451 -                               if (!decodedSize && !isSkipFrame)
131452 -                                       break; /* this was just a header */
131453 -                               zds->outEnd = zds->outStart + decodedSize;
131454 -                               zds->stage = zdss_flush;
131455 -                               break;
131456 -                       }
131457 -                       if (ip == iend) {
131458 -                               someMoreWork = 0;
131459 -                               break;
131460 -                       } /* no more input */
131461 -                       zds->stage = zdss_load;
131462 -                       /* pass-through */
131463 -               }
131464 -                       fallthrough;
131466 -               case zdss_load: {
131467 -                       size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx);
131468 -                       size_t const toLoad = neededInSize - zds->inPos; /* should always be <= remaining space within inBuff */
131469 -                       size_t loadedSize;
131470 -                       if (toLoad > zds->inBuffSize - zds->inPos)
131471 -                               return ERROR(corruption_detected); /* should never happen */
131472 -                       loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend - ip);
131473 -                       ip += loadedSize;
131474 -                       zds->inPos += loadedSize;
131475 -                       if (loadedSize < toLoad) {
131476 -                               someMoreWork = 0;
131477 -                               break;
131478 -                       } /* not enough input, wait for more */
131480 -                       /* decode loaded input */
131481 -                       {
131482 -                               const int isSkipFrame = ZSTD_isSkipFrame(zds->dctx);
131483 -                               size_t const decodedSize = ZSTD_decompressContinue(zds->dctx, zds->outBuff + zds->outStart, zds->outBuffSize - zds->outStart,
131484 -                                                                                  zds->inBuff, neededInSize);
131485 -                               if (ZSTD_isError(decodedSize))
131486 -                                       return decodedSize;
131487 -                               zds->inPos = 0; /* input is consumed */
131488 -                               if (!decodedSize && !isSkipFrame) {
131489 -                                       zds->stage = zdss_read;
131490 -                                       break;
131491 -                               } /* this was just a header */
131492 -                               zds->outEnd = zds->outStart + decodedSize;
131493 -                               zds->stage = zdss_flush;
131494 -                               /* pass-through */
131495 -                       }
131496 -               }
131497 -                       fallthrough;
131499 -               case zdss_flush: {
131500 -                       size_t const toFlushSize = zds->outEnd - zds->outStart;
131501 -                       size_t const flushedSize = ZSTD_limitCopy(op, oend - op, zds->outBuff + zds->outStart, toFlushSize);
131502 -                       op += flushedSize;
131503 -                       zds->outStart += flushedSize;
131504 -                       if (flushedSize == toFlushSize) { /* flush completed */
131505 -                               zds->stage = zdss_read;
131506 -                               if (zds->outStart + zds->blockSize > zds->outBuffSize)
131507 -                                       zds->outStart = zds->outEnd = 0;
131508 -                               break;
131509 -                       }
131510 -                       /* cannot complete flush */
131511 -                       someMoreWork = 0;
131512 -                       break;
131513 -               }
131514 -               default:
131515 -                       return ERROR(GENERIC); /* impossible */
131516 -               }
131517 -       }
131519 -       /* result */
131520 -       input->pos += (size_t)(ip - istart);
131521 -       output->pos += (size_t)(op - ostart);
131522 -       {
131523 -               size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds->dctx);
131524 -               if (!nextSrcSizeHint) {                     /* frame fully decoded */
131525 -                       if (zds->outEnd == zds->outStart) { /* output fully flushed */
131526 -                               if (zds->hostageByte) {
131527 -                                       if (input->pos >= input->size) {
131528 -                                               zds->stage = zdss_read;
131529 -                                               return 1;
131530 -                                       }            /* can't release hostage (not present) */
131531 -                                       input->pos++; /* release hostage */
131532 -                               }
131533 -                               return 0;
131534 -                       }
131535 -                       if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */
131536 -                               input->pos--;    /* note : pos > 0, otherwise, impossible to finish reading last block */
131537 -                               zds->hostageByte = 1;
131538 -                       }
131539 -                       return 1;
131540 -               }
131541 -               nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds->dctx) == ZSTDnit_block); /* preload header of next block */
131542 -               if (zds->inPos > nextSrcSizeHint)
131543 -                       return ERROR(GENERIC); /* should never happen */
131544 -               nextSrcSizeHint -= zds->inPos; /* already loaded*/
131545 -               return nextSrcSizeHint;
131546 -       }
131549 -EXPORT_SYMBOL(ZSTD_DCtxWorkspaceBound);
131550 -EXPORT_SYMBOL(ZSTD_initDCtx);
131551 -EXPORT_SYMBOL(ZSTD_decompressDCtx);
131552 -EXPORT_SYMBOL(ZSTD_decompress_usingDict);
131554 -EXPORT_SYMBOL(ZSTD_DDictWorkspaceBound);
131555 -EXPORT_SYMBOL(ZSTD_initDDict);
131556 -EXPORT_SYMBOL(ZSTD_decompress_usingDDict);
131558 -EXPORT_SYMBOL(ZSTD_DStreamWorkspaceBound);
131559 -EXPORT_SYMBOL(ZSTD_initDStream);
131560 -EXPORT_SYMBOL(ZSTD_initDStream_usingDDict);
131561 -EXPORT_SYMBOL(ZSTD_resetDStream);
131562 -EXPORT_SYMBOL(ZSTD_decompressStream);
131563 -EXPORT_SYMBOL(ZSTD_DStreamInSize);
131564 -EXPORT_SYMBOL(ZSTD_DStreamOutSize);
131566 -EXPORT_SYMBOL(ZSTD_findFrameCompressedSize);
131567 -EXPORT_SYMBOL(ZSTD_getFrameContentSize);
131568 -EXPORT_SYMBOL(ZSTD_findDecompressedSize);
131570 -EXPORT_SYMBOL(ZSTD_isFrame);
131571 -EXPORT_SYMBOL(ZSTD_getDictID_fromDict);
131572 -EXPORT_SYMBOL(ZSTD_getDictID_fromDDict);
131573 -EXPORT_SYMBOL(ZSTD_getDictID_fromFrame);
131575 -EXPORT_SYMBOL(ZSTD_getFrameParams);
131576 -EXPORT_SYMBOL(ZSTD_decompressBegin);
131577 -EXPORT_SYMBOL(ZSTD_decompressBegin_usingDict);
131578 -EXPORT_SYMBOL(ZSTD_copyDCtx);
131579 -EXPORT_SYMBOL(ZSTD_nextSrcSizeToDecompress);
131580 -EXPORT_SYMBOL(ZSTD_decompressContinue);
131581 -EXPORT_SYMBOL(ZSTD_nextInputType);
131583 -EXPORT_SYMBOL(ZSTD_decompressBlock);
131584 -EXPORT_SYMBOL(ZSTD_insertBlock);
131586 -MODULE_LICENSE("Dual BSD/GPL");
131587 -MODULE_DESCRIPTION("Zstd Decompressor");
131588 diff --git a/lib/zstd/decompress/huf_decompress.c b/lib/zstd/decompress/huf_decompress.c
131589 new file mode 100644
131590 index 000000000000..dee939434873
131591 --- /dev/null
131592 +++ b/lib/zstd/decompress/huf_decompress.c
131593 @@ -0,0 +1,1205 @@
131594 +/* ******************************************************************
131595 + * huff0 huffman decoder,
131596 + * part of Finite State Entropy library
131597 + * Copyright (c) Yann Collet, Facebook, Inc.
131599 + *  You can contact the author at :
131600 + *  - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
131602 + * This source code is licensed under both the BSD-style license (found in the
131603 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
131604 + * in the COPYING file in the root directory of this source tree).
131605 + * You may select, at your option, one of the above-listed licenses.
131606 +****************************************************************** */
131608 +/* **************************************************************
131609 +*  Dependencies
131610 +****************************************************************/
131611 +#include "../common/zstd_deps.h"  /* ZSTD_memcpy, ZSTD_memset */
131612 +#include "../common/compiler.h"
131613 +#include "../common/bitstream.h"  /* BIT_* */
131614 +#include "../common/fse.h"        /* to compress headers */
131615 +#define HUF_STATIC_LINKING_ONLY
131616 +#include "../common/huf.h"
131617 +#include "../common/error_private.h"
131619 +/* **************************************************************
131620 +*  Macros
131621 +****************************************************************/
131623 +/* These two optional macros force the use one way or another of the two
131624 + * Huffman decompression implementations. You can't force in both directions
131625 + * at the same time.
131626 + */
131627 +#if defined(HUF_FORCE_DECOMPRESS_X1) && \
131628 +    defined(HUF_FORCE_DECOMPRESS_X2)
131629 +#error "Cannot force the use of the X1 and X2 decoders at the same time!"
131630 +#endif
131633 +/* **************************************************************
131634 +*  Error Management
131635 +****************************************************************/
131636 +#define HUF_isError ERR_isError
131639 +/* **************************************************************
131640 +*  Byte alignment for workSpace management
131641 +****************************************************************/
131642 +#define HUF_ALIGN(x, a)         HUF_ALIGN_MASK((x), (a) - 1)
131643 +#define HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
131646 +/* **************************************************************
131647 +*  BMI2 Variant Wrappers
131648 +****************************************************************/
131649 +#if DYNAMIC_BMI2
131651 +#define HUF_DGEN(fn)                                                        \
131652 +                                                                            \
131653 +    static size_t fn##_default(                                             \
131654 +                  void* dst,  size_t dstSize,                               \
131655 +            const void* cSrc, size_t cSrcSize,                              \
131656 +            const HUF_DTable* DTable)                                       \
131657 +    {                                                                       \
131658 +        return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable);             \
131659 +    }                                                                       \
131660 +                                                                            \
131661 +    static TARGET_ATTRIBUTE("bmi2") size_t fn##_bmi2(                       \
131662 +                  void* dst,  size_t dstSize,                               \
131663 +            const void* cSrc, size_t cSrcSize,                              \
131664 +            const HUF_DTable* DTable)                                       \
131665 +    {                                                                       \
131666 +        return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable);             \
131667 +    }                                                                       \
131668 +                                                                            \
131669 +    static size_t fn(void* dst, size_t dstSize, void const* cSrc,           \
131670 +                     size_t cSrcSize, HUF_DTable const* DTable, int bmi2)   \
131671 +    {                                                                       \
131672 +        if (bmi2) {                                                         \
131673 +            return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable);         \
131674 +        }                                                                   \
131675 +        return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable);          \
131676 +    }
131678 +#else
131680 +#define HUF_DGEN(fn)                                                        \
131681 +    static size_t fn(void* dst, size_t dstSize, void const* cSrc,           \
131682 +                     size_t cSrcSize, HUF_DTable const* DTable, int bmi2)   \
131683 +    {                                                                       \
131684 +        (void)bmi2;                                                         \
131685 +        return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable);             \
131686 +    }
131688 +#endif
131691 +/*-***************************/
131692 +/*  generic DTableDesc       */
131693 +/*-***************************/
131694 +typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc;
131696 +static DTableDesc HUF_getDTableDesc(const HUF_DTable* table)
131698 +    DTableDesc dtd;
131699 +    ZSTD_memcpy(&dtd, table, sizeof(dtd));
131700 +    return dtd;
131704 +#ifndef HUF_FORCE_DECOMPRESS_X2
131706 +/*-***************************/
131707 +/*  single-symbol decoding   */
131708 +/*-***************************/
131709 +typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX1;   /* single-symbol decoding */
131712 + * Packs 4 HUF_DEltX1 structs into a U64. This is used to lay down 4 entries at
131713 + * a time.
131714 + */
131715 +static U64 HUF_DEltX1_set4(BYTE symbol, BYTE nbBits) {
131716 +    U64 D4;
131717 +    if (MEM_isLittleEndian()) {
131718 +        D4 = symbol + (nbBits << 8);
131719 +    } else {
131720 +        D4 = (symbol << 8) + nbBits;
131721 +    }
131722 +    D4 *= 0x0001000100010001ULL;
131723 +    return D4;
131726 +typedef struct {
131727 +        U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1];
131728 +        U32 rankStart[HUF_TABLELOG_ABSOLUTEMAX + 1];
131729 +        U32 statsWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
131730 +        BYTE symbols[HUF_SYMBOLVALUE_MAX + 1];
131731 +        BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1];
131732 +} HUF_ReadDTableX1_Workspace;
131735 +size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize)
131737 +    return HUF_readDTableX1_wksp_bmi2(DTable, src, srcSize, workSpace, wkspSize, /* bmi2 */ 0);
131740 +size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2)
131742 +    U32 tableLog = 0;
131743 +    U32 nbSymbols = 0;
131744 +    size_t iSize;
131745 +    void* const dtPtr = DTable + 1;
131746 +    HUF_DEltX1* const dt = (HUF_DEltX1*)dtPtr;
131747 +    HUF_ReadDTableX1_Workspace* wksp = (HUF_ReadDTableX1_Workspace*)workSpace;
131749 +    DEBUG_STATIC_ASSERT(HUF_DECOMPRESS_WORKSPACE_SIZE >= sizeof(*wksp));
131750 +    if (sizeof(*wksp) > wkspSize) return ERROR(tableLog_tooLarge);
131752 +    DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
131753 +    /* ZSTD_memset(huffWeight, 0, sizeof(huffWeight)); */   /* is not necessary, even though some analyzer complain ... */
131755 +    iSize = HUF_readStats_wksp(wksp->huffWeight, HUF_SYMBOLVALUE_MAX + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(wksp->statsWksp), bmi2);
131756 +    if (HUF_isError(iSize)) return iSize;
131758 +    /* Table header */
131759 +    {   DTableDesc dtd = HUF_getDTableDesc(DTable);
131760 +        if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge);   /* DTable too small, Huffman tree cannot fit in */
131761 +        dtd.tableType = 0;
131762 +        dtd.tableLog = (BYTE)tableLog;
131763 +        ZSTD_memcpy(DTable, &dtd, sizeof(dtd));
131764 +    }
131766 +    /* Compute symbols and rankStart given rankVal:
131767 +     *
131768 +     * rankVal already contains the number of values of each weight.
131769 +     *
131770 +     * symbols contains the symbols ordered by weight. First are the rankVal[0]
131771 +     * weight 0 symbols, followed by the rankVal[1] weight 1 symbols, and so on.
131772 +     * symbols[0] is filled (but unused) to avoid a branch.
131773 +     *
131774 +     * rankStart contains the offset where each rank belongs in the DTable.
131775 +     * rankStart[0] is not filled because there are no entries in the table for
131776 +     * weight 0.
131777 +     */
131778 +    {
131779 +        int n;
131780 +        int nextRankStart = 0;
131781 +        int const unroll = 4;
131782 +        int const nLimit = (int)nbSymbols - unroll + 1;
131783 +        for (n=0; n<(int)tableLog+1; n++) {
131784 +            U32 const curr = nextRankStart;
131785 +            nextRankStart += wksp->rankVal[n];
131786 +            wksp->rankStart[n] = curr;
131787 +        }
131788 +        for (n=0; n < nLimit; n += unroll) {
131789 +            int u;
131790 +            for (u=0; u < unroll; ++u) {
131791 +                size_t const w = wksp->huffWeight[n+u];
131792 +                wksp->symbols[wksp->rankStart[w]++] = (BYTE)(n+u);
131793 +            }
131794 +        }
131795 +        for (; n < (int)nbSymbols; ++n) {
131796 +            size_t const w = wksp->huffWeight[n];
131797 +            wksp->symbols[wksp->rankStart[w]++] = (BYTE)n;
131798 +        }
131799 +    }
131801 +    /* fill DTable
131802 +     * We fill all entries of each weight in order.
131803 +     * That way length is a constant for each iteration of the outter loop.
131804 +     * We can switch based on the length to a different inner loop which is
131805 +     * optimized for that particular case.
131806 +     */
131807 +    {
131808 +        U32 w;
131809 +        int symbol=wksp->rankVal[0];
131810 +        int rankStart=0;
131811 +        for (w=1; w<tableLog+1; ++w) {
131812 +            int const symbolCount = wksp->rankVal[w];
131813 +            int const length = (1 << w) >> 1;
131814 +            int uStart = rankStart;
131815 +            BYTE const nbBits = (BYTE)(tableLog + 1 - w);
131816 +            int s;
131817 +            int u;
131818 +            switch (length) {
131819 +            case 1:
131820 +                for (s=0; s<symbolCount; ++s) {
131821 +                    HUF_DEltX1 D;
131822 +                    D.byte = wksp->symbols[symbol + s];
131823 +                    D.nbBits = nbBits;
131824 +                    dt[uStart] = D;
131825 +                    uStart += 1;
131826 +                }
131827 +                break;
131828 +            case 2:
131829 +                for (s=0; s<symbolCount; ++s) {
131830 +                    HUF_DEltX1 D;
131831 +                    D.byte = wksp->symbols[symbol + s];
131832 +                    D.nbBits = nbBits;
131833 +                    dt[uStart+0] = D;
131834 +                    dt[uStart+1] = D;
131835 +                    uStart += 2;
131836 +                }
131837 +                break;
131838 +            case 4:
131839 +                for (s=0; s<symbolCount; ++s) {
131840 +                    U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
131841 +                    MEM_write64(dt + uStart, D4);
131842 +                    uStart += 4;
131843 +                }
131844 +                break;
131845 +            case 8:
131846 +                for (s=0; s<symbolCount; ++s) {
131847 +                    U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
131848 +                    MEM_write64(dt + uStart, D4);
131849 +                    MEM_write64(dt + uStart + 4, D4);
131850 +                    uStart += 8;
131851 +                }
131852 +                break;
131853 +            default:
131854 +                for (s=0; s<symbolCount; ++s) {
131855 +                    U64 const D4 = HUF_DEltX1_set4(wksp->symbols[symbol + s], nbBits);
131856 +                    for (u=0; u < length; u += 16) {
131857 +                        MEM_write64(dt + uStart + u + 0, D4);
131858 +                        MEM_write64(dt + uStart + u + 4, D4);
131859 +                        MEM_write64(dt + uStart + u + 8, D4);
131860 +                        MEM_write64(dt + uStart + u + 12, D4);
131861 +                    }
131862 +                    assert(u == length);
131863 +                    uStart += length;
131864 +                }
131865 +                break;
131866 +            }
131867 +            symbol += symbolCount;
131868 +            rankStart += symbolCount * length;
131869 +        }
131870 +    }
131871 +    return iSize;
131874 +FORCE_INLINE_TEMPLATE BYTE
131875 +HUF_decodeSymbolX1(BIT_DStream_t* Dstream, const HUF_DEltX1* dt, const U32 dtLog)
131877 +    size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
131878 +    BYTE const c = dt[val].byte;
131879 +    BIT_skipBits(Dstream, dt[val].nbBits);
131880 +    return c;
131883 +#define HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) \
131884 +    *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog)
131886 +#define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr)  \
131887 +    if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
131888 +        HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
131890 +#define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \
131891 +    if (MEM_64bits()) \
131892 +        HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr)
131894 +HINT_INLINE size_t
131895 +HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX1* const dt, const U32 dtLog)
131897 +    BYTE* const pStart = p;
131899 +    /* up to 4 symbols at a time */
131900 +    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) {
131901 +        HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
131902 +        HUF_DECODE_SYMBOLX1_1(p, bitDPtr);
131903 +        HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
131904 +        HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
131905 +    }
131907 +    /* [0-3] symbols remaining */
131908 +    if (MEM_32bits())
131909 +        while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd))
131910 +            HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
131912 +    /* no more data to retrieve from bitstream, no need to reload */
131913 +    while (p < pEnd)
131914 +        HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
131916 +    return pEnd-pStart;
131919 +FORCE_INLINE_TEMPLATE size_t
131920 +HUF_decompress1X1_usingDTable_internal_body(
131921 +          void* dst,  size_t dstSize,
131922 +    const void* cSrc, size_t cSrcSize,
131923 +    const HUF_DTable* DTable)
131925 +    BYTE* op = (BYTE*)dst;
131926 +    BYTE* const oend = op + dstSize;
131927 +    const void* dtPtr = DTable + 1;
131928 +    const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
131929 +    BIT_DStream_t bitD;
131930 +    DTableDesc const dtd = HUF_getDTableDesc(DTable);
131931 +    U32 const dtLog = dtd.tableLog;
131933 +    CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
131935 +    HUF_decodeStreamX1(op, &bitD, oend, dt, dtLog);
131937 +    if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
131939 +    return dstSize;
131942 +FORCE_INLINE_TEMPLATE size_t
131943 +HUF_decompress4X1_usingDTable_internal_body(
131944 +          void* dst,  size_t dstSize,
131945 +    const void* cSrc, size_t cSrcSize,
131946 +    const HUF_DTable* DTable)
131948 +    /* Check */
131949 +    if (cSrcSize < 10) return ERROR(corruption_detected);  /* strict minimum : jump table + 1 byte per stream */
131951 +    {   const BYTE* const istart = (const BYTE*) cSrc;
131952 +        BYTE* const ostart = (BYTE*) dst;
131953 +        BYTE* const oend = ostart + dstSize;
131954 +        BYTE* const olimit = oend - 3;
131955 +        const void* const dtPtr = DTable + 1;
131956 +        const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
131958 +        /* Init */
131959 +        BIT_DStream_t bitD1;
131960 +        BIT_DStream_t bitD2;
131961 +        BIT_DStream_t bitD3;
131962 +        BIT_DStream_t bitD4;
131963 +        size_t const length1 = MEM_readLE16(istart);
131964 +        size_t const length2 = MEM_readLE16(istart+2);
131965 +        size_t const length3 = MEM_readLE16(istart+4);
131966 +        size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
131967 +        const BYTE* const istart1 = istart + 6;  /* jumpTable */
131968 +        const BYTE* const istart2 = istart1 + length1;
131969 +        const BYTE* const istart3 = istart2 + length2;
131970 +        const BYTE* const istart4 = istart3 + length3;
131971 +        const size_t segmentSize = (dstSize+3) / 4;
131972 +        BYTE* const opStart2 = ostart + segmentSize;
131973 +        BYTE* const opStart3 = opStart2 + segmentSize;
131974 +        BYTE* const opStart4 = opStart3 + segmentSize;
131975 +        BYTE* op1 = ostart;
131976 +        BYTE* op2 = opStart2;
131977 +        BYTE* op3 = opStart3;
131978 +        BYTE* op4 = opStart4;
131979 +        DTableDesc const dtd = HUF_getDTableDesc(DTable);
131980 +        U32 const dtLog = dtd.tableLog;
131981 +        U32 endSignal = 1;
131983 +        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
131984 +        CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
131985 +        CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
131986 +        CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
131987 +        CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
131989 +        /* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */
131990 +        for ( ; (endSignal) & (op4 < olimit) ; ) {
131991 +            HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
131992 +            HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
131993 +            HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
131994 +            HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
131995 +            HUF_DECODE_SYMBOLX1_1(op1, &bitD1);
131996 +            HUF_DECODE_SYMBOLX1_1(op2, &bitD2);
131997 +            HUF_DECODE_SYMBOLX1_1(op3, &bitD3);
131998 +            HUF_DECODE_SYMBOLX1_1(op4, &bitD4);
131999 +            HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
132000 +            HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
132001 +            HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
132002 +            HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
132003 +            HUF_DECODE_SYMBOLX1_0(op1, &bitD1);
132004 +            HUF_DECODE_SYMBOLX1_0(op2, &bitD2);
132005 +            HUF_DECODE_SYMBOLX1_0(op3, &bitD3);
132006 +            HUF_DECODE_SYMBOLX1_0(op4, &bitD4);
132007 +            endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;
132008 +            endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;
132009 +            endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;
132010 +            endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;
132011 +        }
132013 +        /* check corruption */
132014 +        /* note : should not be necessary : op# advance in lock step, and we control op4.
132015 +         *        but curiously, binary generated by gcc 7.2 & 7.3 with -mbmi2 runs faster when >=1 test is present */
132016 +        if (op1 > opStart2) return ERROR(corruption_detected);
132017 +        if (op2 > opStart3) return ERROR(corruption_detected);
132018 +        if (op3 > opStart4) return ERROR(corruption_detected);
132019 +        /* note : op4 supposed already verified within main loop */
132021 +        /* finish bitStreams one by one */
132022 +        HUF_decodeStreamX1(op1, &bitD1, opStart2, dt, dtLog);
132023 +        HUF_decodeStreamX1(op2, &bitD2, opStart3, dt, dtLog);
132024 +        HUF_decodeStreamX1(op3, &bitD3, opStart4, dt, dtLog);
132025 +        HUF_decodeStreamX1(op4, &bitD4, oend,     dt, dtLog);
132027 +        /* check */
132028 +        { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
132029 +          if (!endCheck) return ERROR(corruption_detected); }
132031 +        /* decoded size */
132032 +        return dstSize;
132033 +    }
132037 +typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize,
132038 +                                               const void *cSrc,
132039 +                                               size_t cSrcSize,
132040 +                                               const HUF_DTable *DTable);
132042 +HUF_DGEN(HUF_decompress1X1_usingDTable_internal)
132043 +HUF_DGEN(HUF_decompress4X1_usingDTable_internal)
132047 +size_t HUF_decompress1X1_usingDTable(
132048 +          void* dst,  size_t dstSize,
132049 +    const void* cSrc, size_t cSrcSize,
132050 +    const HUF_DTable* DTable)
132052 +    DTableDesc dtd = HUF_getDTableDesc(DTable);
132053 +    if (dtd.tableType != 0) return ERROR(GENERIC);
132054 +    return HUF_decompress1X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
132057 +size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
132058 +                                   const void* cSrc, size_t cSrcSize,
132059 +                                   void* workSpace, size_t wkspSize)
132061 +    const BYTE* ip = (const BYTE*) cSrc;
132063 +    size_t const hSize = HUF_readDTableX1_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize);
132064 +    if (HUF_isError(hSize)) return hSize;
132065 +    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
132066 +    ip += hSize; cSrcSize -= hSize;
132068 +    return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
132072 +size_t HUF_decompress4X1_usingDTable(
132073 +          void* dst,  size_t dstSize,
132074 +    const void* cSrc, size_t cSrcSize,
132075 +    const HUF_DTable* DTable)
132077 +    DTableDesc dtd = HUF_getDTableDesc(DTable);
132078 +    if (dtd.tableType != 0) return ERROR(GENERIC);
132079 +    return HUF_decompress4X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
132082 +static size_t HUF_decompress4X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
132083 +                                   const void* cSrc, size_t cSrcSize,
132084 +                                   void* workSpace, size_t wkspSize, int bmi2)
132086 +    const BYTE* ip = (const BYTE*) cSrc;
132088 +    size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
132089 +    if (HUF_isError(hSize)) return hSize;
132090 +    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
132091 +    ip += hSize; cSrcSize -= hSize;
132093 +    return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
132096 +size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
132097 +                                   const void* cSrc, size_t cSrcSize,
132098 +                                   void* workSpace, size_t wkspSize)
132100 +    return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0);
132104 +#endif /* HUF_FORCE_DECOMPRESS_X2 */
132107 +#ifndef HUF_FORCE_DECOMPRESS_X1
132109 +/* *************************/
132110 +/* double-symbols decoding */
132111 +/* *************************/
132113 +typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX2;  /* double-symbols decoding */
132114 +typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
132115 +typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1];
132116 +typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX];
132119 +/* HUF_fillDTableX2Level2() :
132120 + * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */
132121 +static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 sizeLog, const U32 consumed,
132122 +                           const U32* rankValOrigin, const int minWeight,
132123 +                           const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
132124 +                           U32 nbBitsBaseline, U16 baseSeq, U32* wksp, size_t wkspSize)
132126 +    HUF_DEltX2 DElt;
132127 +    U32* rankVal = wksp;
132129 +    assert(wkspSize >= HUF_TABLELOG_MAX + 1);
132130 +    (void)wkspSize;
132131 +    /* get pre-calculated rankVal */
132132 +    ZSTD_memcpy(rankVal, rankValOrigin, sizeof(U32) * (HUF_TABLELOG_MAX + 1));
132134 +    /* fill skipped values */
132135 +    if (minWeight>1) {
132136 +        U32 i, skipSize = rankVal[minWeight];
132137 +        MEM_writeLE16(&(DElt.sequence), baseSeq);
132138 +        DElt.nbBits   = (BYTE)(consumed);
132139 +        DElt.length   = 1;
132140 +        for (i = 0; i < skipSize; i++)
132141 +            DTable[i] = DElt;
132142 +    }
132144 +    /* fill DTable */
132145 +    {   U32 s; for (s=0; s<sortedListSize; s++) {   /* note : sortedSymbols already skipped */
132146 +            const U32 symbol = sortedSymbols[s].symbol;
132147 +            const U32 weight = sortedSymbols[s].weight;
132148 +            const U32 nbBits = nbBitsBaseline - weight;
132149 +            const U32 length = 1 << (sizeLog-nbBits);
132150 +            const U32 start = rankVal[weight];
132151 +            U32 i = start;
132152 +            const U32 end = start + length;
132154 +            MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
132155 +            DElt.nbBits = (BYTE)(nbBits + consumed);
132156 +            DElt.length = 2;
132157 +            do { DTable[i++] = DElt; } while (i<end);   /* since length >= 1 */
132159 +            rankVal[weight] += length;
132160 +    }   }
132164 +static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog,
132165 +                           const sortedSymbol_t* sortedList, const U32 sortedListSize,
132166 +                           const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
132167 +                           const U32 nbBitsBaseline, U32* wksp, size_t wkspSize)
132169 +    U32* rankVal = wksp;
132170 +    const int scaleLog = nbBitsBaseline - targetLog;   /* note : targetLog >= srcLog, hence scaleLog <= 1 */
132171 +    const U32 minBits  = nbBitsBaseline - maxWeight;
132172 +    U32 s;
132174 +    assert(wkspSize >= HUF_TABLELOG_MAX + 1);
132175 +    wksp += HUF_TABLELOG_MAX + 1;
132176 +    wkspSize -= HUF_TABLELOG_MAX + 1;
132178 +    ZSTD_memcpy(rankVal, rankValOrigin, sizeof(U32) * (HUF_TABLELOG_MAX + 1));
132180 +    /* fill DTable */
132181 +    for (s=0; s<sortedListSize; s++) {
132182 +        const U16 symbol = sortedList[s].symbol;
132183 +        const U32 weight = sortedList[s].weight;
132184 +        const U32 nbBits = nbBitsBaseline - weight;
132185 +        const U32 start = rankVal[weight];
132186 +        const U32 length = 1 << (targetLog-nbBits);
132188 +        if (targetLog-nbBits >= minBits) {   /* enough room for a second symbol */
132189 +            U32 sortedRank;
132190 +            int minWeight = nbBits + scaleLog;
132191 +            if (minWeight < 1) minWeight = 1;
132192 +            sortedRank = rankStart[minWeight];
132193 +            HUF_fillDTableX2Level2(DTable+start, targetLog-nbBits, nbBits,
132194 +                           rankValOrigin[nbBits], minWeight,
132195 +                           sortedList+sortedRank, sortedListSize-sortedRank,
132196 +                           nbBitsBaseline, symbol, wksp, wkspSize);
132197 +        } else {
132198 +            HUF_DEltX2 DElt;
132199 +            MEM_writeLE16(&(DElt.sequence), symbol);
132200 +            DElt.nbBits = (BYTE)(nbBits);
132201 +            DElt.length = 1;
132202 +            {   U32 const end = start + length;
132203 +                U32 u;
132204 +                for (u = start; u < end; u++) DTable[u] = DElt;
132205 +        }   }
132206 +        rankVal[weight] += length;
132207 +    }
132210 +typedef struct {
132211 +    rankValCol_t rankVal[HUF_TABLELOG_MAX];
132212 +    U32 rankStats[HUF_TABLELOG_MAX + 1];
132213 +    U32 rankStart0[HUF_TABLELOG_MAX + 2];
132214 +    sortedSymbol_t sortedSymbol[HUF_SYMBOLVALUE_MAX + 1];
132215 +    BYTE weightList[HUF_SYMBOLVALUE_MAX + 1];
132216 +    U32 calleeWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
132217 +} HUF_ReadDTableX2_Workspace;
132219 +size_t HUF_readDTableX2_wksp(HUF_DTable* DTable,
132220 +                       const void* src, size_t srcSize,
132221 +                             void* workSpace, size_t wkspSize)
132223 +    U32 tableLog, maxW, sizeOfSort, nbSymbols;
132224 +    DTableDesc dtd = HUF_getDTableDesc(DTable);
132225 +    U32 const maxTableLog = dtd.maxTableLog;
132226 +    size_t iSize;
132227 +    void* dtPtr = DTable+1;   /* force compiler to avoid strict-aliasing */
132228 +    HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
132229 +    U32 *rankStart;
132231 +    HUF_ReadDTableX2_Workspace* const wksp = (HUF_ReadDTableX2_Workspace*)workSpace;
132233 +    if (sizeof(*wksp) > wkspSize) return ERROR(GENERIC);
132235 +    rankStart = wksp->rankStart0 + 1;
132236 +    ZSTD_memset(wksp->rankStats, 0, sizeof(wksp->rankStats));
132237 +    ZSTD_memset(wksp->rankStart0, 0, sizeof(wksp->rankStart0));
132239 +    DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(HUF_DTable));   /* if compiler fails here, assertion is wrong */
132240 +    if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
132241 +    /* ZSTD_memset(weightList, 0, sizeof(weightList)); */  /* is not necessary, even though some analyzer complain ... */
132243 +    iSize = HUF_readStats_wksp(wksp->weightList, HUF_SYMBOLVALUE_MAX + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(wksp->calleeWksp), /* bmi2 */ 0);
132244 +    if (HUF_isError(iSize)) return iSize;
132246 +    /* check result */
132247 +    if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge);   /* DTable can't fit code depth */
132249 +    /* find maxWeight */
132250 +    for (maxW = tableLog; wksp->rankStats[maxW]==0; maxW--) {}  /* necessarily finds a solution before 0 */
132252 +    /* Get start index of each weight */
132253 +    {   U32 w, nextRankStart = 0;
132254 +        for (w=1; w<maxW+1; w++) {
132255 +            U32 curr = nextRankStart;
132256 +            nextRankStart += wksp->rankStats[w];
132257 +            rankStart[w] = curr;
132258 +        }
132259 +        rankStart[0] = nextRankStart;   /* put all 0w symbols at the end of sorted list*/
132260 +        sizeOfSort = nextRankStart;
132261 +    }
132263 +    /* sort symbols by weight */
132264 +    {   U32 s;
132265 +        for (s=0; s<nbSymbols; s++) {
132266 +            U32 const w = wksp->weightList[s];
132267 +            U32 const r = rankStart[w]++;
132268 +            wksp->sortedSymbol[r].symbol = (BYTE)s;
132269 +            wksp->sortedSymbol[r].weight = (BYTE)w;
132270 +        }
132271 +        rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */
132272 +    }
132274 +    /* Build rankVal */
132275 +    {   U32* const rankVal0 = wksp->rankVal[0];
132276 +        {   int const rescale = (maxTableLog-tableLog) - 1;   /* tableLog <= maxTableLog */
132277 +            U32 nextRankVal = 0;
132278 +            U32 w;
132279 +            for (w=1; w<maxW+1; w++) {
132280 +                U32 curr = nextRankVal;
132281 +                nextRankVal += wksp->rankStats[w] << (w+rescale);
132282 +                rankVal0[w] = curr;
132283 +        }   }
132284 +        {   U32 const minBits = tableLog+1 - maxW;
132285 +            U32 consumed;
132286 +            for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
132287 +                U32* const rankValPtr = wksp->rankVal[consumed];
132288 +                U32 w;
132289 +                for (w = 1; w < maxW+1; w++) {
132290 +                    rankValPtr[w] = rankVal0[w] >> consumed;
132291 +    }   }   }   }
132293 +    HUF_fillDTableX2(dt, maxTableLog,
132294 +                   wksp->sortedSymbol, sizeOfSort,
132295 +                   wksp->rankStart0, wksp->rankVal, maxW,
132296 +                   tableLog+1,
132297 +                   wksp->calleeWksp, sizeof(wksp->calleeWksp) / sizeof(U32));
132299 +    dtd.tableLog = (BYTE)maxTableLog;
132300 +    dtd.tableType = 1;
132301 +    ZSTD_memcpy(DTable, &dtd, sizeof(dtd));
132302 +    return iSize;
132306 +FORCE_INLINE_TEMPLATE U32
132307 +HUF_decodeSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
132309 +    size_t const val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
132310 +    ZSTD_memcpy(op, dt+val, 2);
132311 +    BIT_skipBits(DStream, dt[val].nbBits);
132312 +    return dt[val].length;
132315 +FORCE_INLINE_TEMPLATE U32
132316 +HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog)
132318 +    size_t const val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
132319 +    ZSTD_memcpy(op, dt+val, 1);
132320 +    if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
132321 +    else {
132322 +        if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
132323 +            BIT_skipBits(DStream, dt[val].nbBits);
132324 +            if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
132325 +                /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
132326 +                DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);
132327 +    }   }
132328 +    return 1;
132331 +#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
132332 +    ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
132334 +#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
132335 +    if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
132336 +        ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
132338 +#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
132339 +    if (MEM_64bits()) \
132340 +        ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog)
132342 +HINT_INLINE size_t
132343 +HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd,
132344 +                const HUF_DEltX2* const dt, const U32 dtLog)
132346 +    BYTE* const pStart = p;
132348 +    /* up to 8 symbols at a time */
132349 +    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) {
132350 +        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
132351 +        HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
132352 +        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
132353 +        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
132354 +    }
132356 +    /* closer to end : up to 2 symbols at a time */
132357 +    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2))
132358 +        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
132360 +    while (p <= pEnd-2)
132361 +        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */
132363 +    if (p < pEnd)
132364 +        p += HUF_decodeLastSymbolX2(p, bitDPtr, dt, dtLog);
132366 +    return p-pStart;
132369 +FORCE_INLINE_TEMPLATE size_t
132370 +HUF_decompress1X2_usingDTable_internal_body(
132371 +          void* dst,  size_t dstSize,
132372 +    const void* cSrc, size_t cSrcSize,
132373 +    const HUF_DTable* DTable)
132375 +    BIT_DStream_t bitD;
132377 +    /* Init */
132378 +    CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
132380 +    /* decode */
132381 +    {   BYTE* const ostart = (BYTE*) dst;
132382 +        BYTE* const oend = ostart + dstSize;
132383 +        const void* const dtPtr = DTable+1;   /* force compiler to not use strict-aliasing */
132384 +        const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
132385 +        DTableDesc const dtd = HUF_getDTableDesc(DTable);
132386 +        HUF_decodeStreamX2(ostart, &bitD, oend, dt, dtd.tableLog);
132387 +    }
132389 +    /* check */
132390 +    if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
132392 +    /* decoded size */
132393 +    return dstSize;
132396 +FORCE_INLINE_TEMPLATE size_t
132397 +HUF_decompress4X2_usingDTable_internal_body(
132398 +          void* dst,  size_t dstSize,
132399 +    const void* cSrc, size_t cSrcSize,
132400 +    const HUF_DTable* DTable)
132402 +    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
132404 +    {   const BYTE* const istart = (const BYTE*) cSrc;
132405 +        BYTE* const ostart = (BYTE*) dst;
132406 +        BYTE* const oend = ostart + dstSize;
132407 +        BYTE* const olimit = oend - (sizeof(size_t)-1);
132408 +        const void* const dtPtr = DTable+1;
132409 +        const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
132411 +        /* Init */
132412 +        BIT_DStream_t bitD1;
132413 +        BIT_DStream_t bitD2;
132414 +        BIT_DStream_t bitD3;
132415 +        BIT_DStream_t bitD4;
132416 +        size_t const length1 = MEM_readLE16(istart);
132417 +        size_t const length2 = MEM_readLE16(istart+2);
132418 +        size_t const length3 = MEM_readLE16(istart+4);
132419 +        size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
132420 +        const BYTE* const istart1 = istart + 6;  /* jumpTable */
132421 +        const BYTE* const istart2 = istart1 + length1;
132422 +        const BYTE* const istart3 = istart2 + length2;
132423 +        const BYTE* const istart4 = istart3 + length3;
132424 +        size_t const segmentSize = (dstSize+3) / 4;
132425 +        BYTE* const opStart2 = ostart + segmentSize;
132426 +        BYTE* const opStart3 = opStart2 + segmentSize;
132427 +        BYTE* const opStart4 = opStart3 + segmentSize;
132428 +        BYTE* op1 = ostart;
132429 +        BYTE* op2 = opStart2;
132430 +        BYTE* op3 = opStart3;
132431 +        BYTE* op4 = opStart4;
132432 +        U32 endSignal = 1;
132433 +        DTableDesc const dtd = HUF_getDTableDesc(DTable);
132434 +        U32 const dtLog = dtd.tableLog;
132436 +        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
132437 +        CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
132438 +        CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
132439 +        CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
132440 +        CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
132442 +        /* 16-32 symbols per loop (4-8 symbols per stream) */
132443 +        for ( ; (endSignal) & (op4 < olimit); ) {
132444 +#if defined(__clang__) && (defined(__x86_64__) || defined(__i386__))
132445 +            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
132446 +            HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
132447 +            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
132448 +            HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
132449 +            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
132450 +            HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
132451 +            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
132452 +            HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
132453 +            endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;
132454 +            endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;
132455 +            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
132456 +            HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
132457 +            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
132458 +            HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
132459 +            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
132460 +            HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
132461 +            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
132462 +            HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
132463 +            endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;
132464 +            endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;
132465 +#else
132466 +            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
132467 +            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
132468 +            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
132469 +            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
132470 +            HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
132471 +            HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
132472 +            HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
132473 +            HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
132474 +            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
132475 +            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
132476 +            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
132477 +            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
132478 +            HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
132479 +            HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
132480 +            HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
132481 +            HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
132482 +            endSignal = (U32)LIKELY(
132483 +                        (BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished)
132484 +                      & (BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished)
132485 +                      & (BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished)
132486 +                      & (BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished));
132487 +#endif
132488 +        }
132490 +        /* check corruption */
132491 +        if (op1 > opStart2) return ERROR(corruption_detected);
132492 +        if (op2 > opStart3) return ERROR(corruption_detected);
132493 +        if (op3 > opStart4) return ERROR(corruption_detected);
132494 +        /* note : op4 already verified within main loop */
132496 +        /* finish bitStreams one by one */
132497 +        HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
132498 +        HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
132499 +        HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
132500 +        HUF_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);
132502 +        /* check */
132503 +        { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
132504 +          if (!endCheck) return ERROR(corruption_detected); }
132506 +        /* decoded size */
132507 +        return dstSize;
132508 +    }
132511 +HUF_DGEN(HUF_decompress1X2_usingDTable_internal)
132512 +HUF_DGEN(HUF_decompress4X2_usingDTable_internal)
132514 +size_t HUF_decompress1X2_usingDTable(
132515 +          void* dst,  size_t dstSize,
132516 +    const void* cSrc, size_t cSrcSize,
132517 +    const HUF_DTable* DTable)
132519 +    DTableDesc dtd = HUF_getDTableDesc(DTable);
132520 +    if (dtd.tableType != 1) return ERROR(GENERIC);
132521 +    return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
132524 +size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
132525 +                                   const void* cSrc, size_t cSrcSize,
132526 +                                   void* workSpace, size_t wkspSize)
132528 +    const BYTE* ip = (const BYTE*) cSrc;
132530 +    size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize,
132531 +                                               workSpace, wkspSize);
132532 +    if (HUF_isError(hSize)) return hSize;
132533 +    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
132534 +    ip += hSize; cSrcSize -= hSize;
132536 +    return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
132540 +size_t HUF_decompress4X2_usingDTable(
132541 +          void* dst,  size_t dstSize,
132542 +    const void* cSrc, size_t cSrcSize,
132543 +    const HUF_DTable* DTable)
132545 +    DTableDesc dtd = HUF_getDTableDesc(DTable);
132546 +    if (dtd.tableType != 1) return ERROR(GENERIC);
132547 +    return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
132550 +static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
132551 +                                   const void* cSrc, size_t cSrcSize,
132552 +                                   void* workSpace, size_t wkspSize, int bmi2)
132554 +    const BYTE* ip = (const BYTE*) cSrc;
132556 +    size_t hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize,
132557 +                                         workSpace, wkspSize);
132558 +    if (HUF_isError(hSize)) return hSize;
132559 +    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
132560 +    ip += hSize; cSrcSize -= hSize;
132562 +    return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
132565 +size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
132566 +                                   const void* cSrc, size_t cSrcSize,
132567 +                                   void* workSpace, size_t wkspSize)
132569 +    return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0);
132573 +#endif /* HUF_FORCE_DECOMPRESS_X1 */
132576 +/* ***********************************/
132577 +/* Universal decompression selectors */
132578 +/* ***********************************/
132580 +size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize,
132581 +                                    const void* cSrc, size_t cSrcSize,
132582 +                                    const HUF_DTable* DTable)
132584 +    DTableDesc const dtd = HUF_getDTableDesc(DTable);
132585 +#if defined(HUF_FORCE_DECOMPRESS_X1)
132586 +    (void)dtd;
132587 +    assert(dtd.tableType == 0);
132588 +    return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
132589 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
132590 +    (void)dtd;
132591 +    assert(dtd.tableType == 1);
132592 +    return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
132593 +#else
132594 +    return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
132595 +                           HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
132596 +#endif
132599 +size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize,
132600 +                                    const void* cSrc, size_t cSrcSize,
132601 +                                    const HUF_DTable* DTable)
132603 +    DTableDesc const dtd = HUF_getDTableDesc(DTable);
132604 +#if defined(HUF_FORCE_DECOMPRESS_X1)
132605 +    (void)dtd;
132606 +    assert(dtd.tableType == 0);
132607 +    return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
132608 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
132609 +    (void)dtd;
132610 +    assert(dtd.tableType == 1);
132611 +    return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
132612 +#else
132613 +    return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
132614 +                           HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
132615 +#endif
132619 +#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2)
132620 +typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
132621 +static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
132623 +    /* single, double, quad */
132624 +    {{0,0}, {1,1}, {2,2}},  /* Q==0 : impossible */
132625 +    {{0,0}, {1,1}, {2,2}},  /* Q==1 : impossible */
132626 +    {{  38,130}, {1313, 74}, {2151, 38}},   /* Q == 2 : 12-18% */
132627 +    {{ 448,128}, {1353, 74}, {2238, 41}},   /* Q == 3 : 18-25% */
132628 +    {{ 556,128}, {1353, 74}, {2238, 47}},   /* Q == 4 : 25-32% */
132629 +    {{ 714,128}, {1418, 74}, {2436, 53}},   /* Q == 5 : 32-38% */
132630 +    {{ 883,128}, {1437, 74}, {2464, 61}},   /* Q == 6 : 38-44% */
132631 +    {{ 897,128}, {1515, 75}, {2622, 68}},   /* Q == 7 : 44-50% */
132632 +    {{ 926,128}, {1613, 75}, {2730, 75}},   /* Q == 8 : 50-56% */
132633 +    {{ 947,128}, {1729, 77}, {3359, 77}},   /* Q == 9 : 56-62% */
132634 +    {{1107,128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */
132635 +    {{1177,128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */
132636 +    {{1242,128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */
132637 +    {{1349,128}, {2644,106}, {5260,106}},   /* Q ==13 : 81-87% */
132638 +    {{1455,128}, {2422,124}, {4174,124}},   /* Q ==14 : 87-93% */
132639 +    {{ 722,128}, {1891,145}, {1936,146}},   /* Q ==15 : 93-99% */
132641 +#endif
132643 +/** HUF_selectDecoder() :
132644 + *  Tells which decoder is likely to decode faster,
132645 + *  based on a set of pre-computed metrics.
132646 + * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
132647 + *  Assumption : 0 < dstSize <= 128 KB */
132648 +U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
132650 +    assert(dstSize > 0);
132651 +    assert(dstSize <= 128*1024);
132652 +#if defined(HUF_FORCE_DECOMPRESS_X1)
132653 +    (void)dstSize;
132654 +    (void)cSrcSize;
132655 +    return 0;
132656 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
132657 +    (void)dstSize;
132658 +    (void)cSrcSize;
132659 +    return 1;
132660 +#else
132661 +    /* decoder timing evaluation */
132662 +    {   U32 const Q = (cSrcSize >= dstSize) ? 15 : (U32)(cSrcSize * 16 / dstSize);   /* Q < 16 */
132663 +        U32 const D256 = (U32)(dstSize >> 8);
132664 +        U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
132665 +        U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
132666 +        DTime1 += DTime1 >> 3;  /* advantage to algorithm using less memory, to reduce cache eviction */
132667 +        return DTime1 < DTime0;
132668 +    }
132669 +#endif
132673 +size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst,
132674 +                                     size_t dstSize, const void* cSrc,
132675 +                                     size_t cSrcSize, void* workSpace,
132676 +                                     size_t wkspSize)
132678 +    /* validation checks */
132679 +    if (dstSize == 0) return ERROR(dstSize_tooSmall);
132680 +    if (cSrcSize == 0) return ERROR(corruption_detected);
132682 +    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
132683 +#if defined(HUF_FORCE_DECOMPRESS_X1)
132684 +        (void)algoNb;
132685 +        assert(algoNb == 0);
132686 +        return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
132687 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
132688 +        (void)algoNb;
132689 +        assert(algoNb == 1);
132690 +        return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
132691 +#else
132692 +        return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
132693 +                            cSrcSize, workSpace, wkspSize):
132694 +                        HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
132695 +#endif
132696 +    }
132699 +size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
132700 +                                  const void* cSrc, size_t cSrcSize,
132701 +                                  void* workSpace, size_t wkspSize)
132703 +    /* validation checks */
132704 +    if (dstSize == 0) return ERROR(dstSize_tooSmall);
132705 +    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */
132706 +    if (cSrcSize == dstSize) { ZSTD_memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */
132707 +    if (cSrcSize == 1) { ZSTD_memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
132709 +    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
132710 +#if defined(HUF_FORCE_DECOMPRESS_X1)
132711 +        (void)algoNb;
132712 +        assert(algoNb == 0);
132713 +        return HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
132714 +                                cSrcSize, workSpace, wkspSize);
132715 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
132716 +        (void)algoNb;
132717 +        assert(algoNb == 1);
132718 +        return HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
132719 +                                cSrcSize, workSpace, wkspSize);
132720 +#else
132721 +        return algoNb ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
132722 +                                cSrcSize, workSpace, wkspSize):
132723 +                        HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc,
132724 +                                cSrcSize, workSpace, wkspSize);
132725 +#endif
132726 +    }
132730 +size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
132732 +    DTableDesc const dtd = HUF_getDTableDesc(DTable);
132733 +#if defined(HUF_FORCE_DECOMPRESS_X1)
132734 +    (void)dtd;
132735 +    assert(dtd.tableType == 0);
132736 +    return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
132737 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
132738 +    (void)dtd;
132739 +    assert(dtd.tableType == 1);
132740 +    return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
132741 +#else
132742 +    return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
132743 +                           HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
132744 +#endif
132747 +#ifndef HUF_FORCE_DECOMPRESS_X2
132748 +size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
132750 +    const BYTE* ip = (const BYTE*) cSrc;
132752 +    size_t const hSize = HUF_readDTableX1_wksp_bmi2(dctx, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
132753 +    if (HUF_isError(hSize)) return hSize;
132754 +    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
132755 +    ip += hSize; cSrcSize -= hSize;
132757 +    return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
132759 +#endif
132761 +size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
132763 +    DTableDesc const dtd = HUF_getDTableDesc(DTable);
132764 +#if defined(HUF_FORCE_DECOMPRESS_X1)
132765 +    (void)dtd;
132766 +    assert(dtd.tableType == 0);
132767 +    return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
132768 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
132769 +    (void)dtd;
132770 +    assert(dtd.tableType == 1);
132771 +    return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
132772 +#else
132773 +    return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
132774 +                           HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
132775 +#endif
132778 +size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
132780 +    /* validation checks */
132781 +    if (dstSize == 0) return ERROR(dstSize_tooSmall);
132782 +    if (cSrcSize == 0) return ERROR(corruption_detected);
132784 +    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
132785 +#if defined(HUF_FORCE_DECOMPRESS_X1)
132786 +        (void)algoNb;
132787 +        assert(algoNb == 0);
132788 +        return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
132789 +#elif defined(HUF_FORCE_DECOMPRESS_X2)
132790 +        (void)algoNb;
132791 +        assert(algoNb == 1);
132792 +        return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
132793 +#else
132794 +        return algoNb ? HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) :
132795 +                        HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
132796 +#endif
132797 +    }
132799 diff --git a/lib/zstd/decompress/zstd_ddict.c b/lib/zstd/decompress/zstd_ddict.c
132800 new file mode 100644
132801 index 000000000000..dbbc7919de53
132802 --- /dev/null
132803 +++ b/lib/zstd/decompress/zstd_ddict.c
132804 @@ -0,0 +1,241 @@
132806 + * Copyright (c) Yann Collet, Facebook, Inc.
132807 + * All rights reserved.
132809 + * This source code is licensed under both the BSD-style license (found in the
132810 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
132811 + * in the COPYING file in the root directory of this source tree).
132812 + * You may select, at your option, one of the above-listed licenses.
132813 + */
132815 +/* zstd_ddict.c :
132816 + * concentrates all logic that needs to know the internals of ZSTD_DDict object */
132818 +/*-*******************************************************
132819 +*  Dependencies
132820 +*********************************************************/
132821 +#include "../common/zstd_deps.h"   /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
132822 +#include "../common/cpu.h"         /* bmi2 */
132823 +#include "../common/mem.h"         /* low level memory routines */
132824 +#define FSE_STATIC_LINKING_ONLY
132825 +#include "../common/fse.h"
132826 +#define HUF_STATIC_LINKING_ONLY
132827 +#include "../common/huf.h"
132828 +#include "zstd_decompress_internal.h"
132829 +#include "zstd_ddict.h"
132834 +/*-*******************************************************
132835 +*  Types
132836 +*********************************************************/
132837 +struct ZSTD_DDict_s {
132838 +    void* dictBuffer;
132839 +    const void* dictContent;
132840 +    size_t dictSize;
132841 +    ZSTD_entropyDTables_t entropy;
132842 +    U32 dictID;
132843 +    U32 entropyPresent;
132844 +    ZSTD_customMem cMem;
132845 +};  /* typedef'd to ZSTD_DDict within "zstd.h" */
132847 +const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict)
132849 +    assert(ddict != NULL);
132850 +    return ddict->dictContent;
132853 +size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict)
132855 +    assert(ddict != NULL);
132856 +    return ddict->dictSize;
132859 +void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
132861 +    DEBUGLOG(4, "ZSTD_copyDDictParameters");
132862 +    assert(dctx != NULL);
132863 +    assert(ddict != NULL);
132864 +    dctx->dictID = ddict->dictID;
132865 +    dctx->prefixStart = ddict->dictContent;
132866 +    dctx->virtualStart = ddict->dictContent;
132867 +    dctx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize;
132868 +    dctx->previousDstEnd = dctx->dictEnd;
132869 +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
132870 +    dctx->dictContentBeginForFuzzing = dctx->prefixStart;
132871 +    dctx->dictContentEndForFuzzing = dctx->previousDstEnd;
132872 +#endif
132873 +    if (ddict->entropyPresent) {
132874 +        dctx->litEntropy = 1;
132875 +        dctx->fseEntropy = 1;
132876 +        dctx->LLTptr = ddict->entropy.LLTable;
132877 +        dctx->MLTptr = ddict->entropy.MLTable;
132878 +        dctx->OFTptr = ddict->entropy.OFTable;
132879 +        dctx->HUFptr = ddict->entropy.hufTable;
132880 +        dctx->entropy.rep[0] = ddict->entropy.rep[0];
132881 +        dctx->entropy.rep[1] = ddict->entropy.rep[1];
132882 +        dctx->entropy.rep[2] = ddict->entropy.rep[2];
132883 +    } else {
132884 +        dctx->litEntropy = 0;
132885 +        dctx->fseEntropy = 0;
132886 +    }
132890 +static size_t
132891 +ZSTD_loadEntropy_intoDDict(ZSTD_DDict* ddict,
132892 +                           ZSTD_dictContentType_e dictContentType)
132894 +    ddict->dictID = 0;
132895 +    ddict->entropyPresent = 0;
132896 +    if (dictContentType == ZSTD_dct_rawContent) return 0;
132898 +    if (ddict->dictSize < 8) {
132899 +        if (dictContentType == ZSTD_dct_fullDict)
132900 +            return ERROR(dictionary_corrupted);   /* only accept specified dictionaries */
132901 +        return 0;   /* pure content mode */
132902 +    }
132903 +    {   U32 const magic = MEM_readLE32(ddict->dictContent);
132904 +        if (magic != ZSTD_MAGIC_DICTIONARY) {
132905 +            if (dictContentType == ZSTD_dct_fullDict)
132906 +                return ERROR(dictionary_corrupted);   /* only accept specified dictionaries */
132907 +            return 0;   /* pure content mode */
132908 +        }
132909 +    }
132910 +    ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_FRAMEIDSIZE);
132912 +    /* load entropy tables */
132913 +    RETURN_ERROR_IF(ZSTD_isError(ZSTD_loadDEntropy(
132914 +            &ddict->entropy, ddict->dictContent, ddict->dictSize)),
132915 +        dictionary_corrupted, "");
132916 +    ddict->entropyPresent = 1;
132917 +    return 0;
132921 +static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict,
132922 +                                      const void* dict, size_t dictSize,
132923 +                                      ZSTD_dictLoadMethod_e dictLoadMethod,
132924 +                                      ZSTD_dictContentType_e dictContentType)
132926 +    if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dict) || (!dictSize)) {
132927 +        ddict->dictBuffer = NULL;
132928 +        ddict->dictContent = dict;
132929 +        if (!dict) dictSize = 0;
132930 +    } else {
132931 +        void* const internalBuffer = ZSTD_customMalloc(dictSize, ddict->cMem);
132932 +        ddict->dictBuffer = internalBuffer;
132933 +        ddict->dictContent = internalBuffer;
132934 +        if (!internalBuffer) return ERROR(memory_allocation);
132935 +        ZSTD_memcpy(internalBuffer, dict, dictSize);
132936 +    }
132937 +    ddict->dictSize = dictSize;
132938 +    ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001);  /* cover both little and big endian */
132940 +    /* parse dictionary content */
132941 +    FORWARD_IF_ERROR( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) , "");
132943 +    return 0;
132946 +ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
132947 +                                      ZSTD_dictLoadMethod_e dictLoadMethod,
132948 +                                      ZSTD_dictContentType_e dictContentType,
132949 +                                      ZSTD_customMem customMem)
132951 +    if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
132953 +    {   ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_customMalloc(sizeof(ZSTD_DDict), customMem);
132954 +        if (ddict == NULL) return NULL;
132955 +        ddict->cMem = customMem;
132956 +        {   size_t const initResult = ZSTD_initDDict_internal(ddict,
132957 +                                            dict, dictSize,
132958 +                                            dictLoadMethod, dictContentType);
132959 +            if (ZSTD_isError(initResult)) {
132960 +                ZSTD_freeDDict(ddict);
132961 +                return NULL;
132962 +        }   }
132963 +        return ddict;
132964 +    }
132967 +/*! ZSTD_createDDict() :
132968 +*   Create a digested dictionary, to start decompression without startup delay.
132969 +*   `dict` content is copied inside DDict.
132970 +*   Consequently, `dict` can be released after `ZSTD_DDict` creation */
132971 +ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize)
132973 +    ZSTD_customMem const allocator = { NULL, NULL, NULL };
132974 +    return ZSTD_createDDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto, allocator);
132977 +/*! ZSTD_createDDict_byReference() :
132978 + *  Create a digested dictionary, to start decompression without startup delay.
132979 + *  Dictionary content is simply referenced, it will be accessed during decompression.
132980 + *  Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer) */
132981 +ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize)
132983 +    ZSTD_customMem const allocator = { NULL, NULL, NULL };
132984 +    return ZSTD_createDDict_advanced(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, allocator);
132988 +const ZSTD_DDict* ZSTD_initStaticDDict(
132989 +                                void* sBuffer, size_t sBufferSize,
132990 +                                const void* dict, size_t dictSize,
132991 +                                ZSTD_dictLoadMethod_e dictLoadMethod,
132992 +                                ZSTD_dictContentType_e dictContentType)
132994 +    size_t const neededSpace = sizeof(ZSTD_DDict)
132995 +                             + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
132996 +    ZSTD_DDict* const ddict = (ZSTD_DDict*)sBuffer;
132997 +    assert(sBuffer != NULL);
132998 +    assert(dict != NULL);
132999 +    if ((size_t)sBuffer & 7) return NULL;   /* 8-aligned */
133000 +    if (sBufferSize < neededSpace) return NULL;
133001 +    if (dictLoadMethod == ZSTD_dlm_byCopy) {
133002 +        ZSTD_memcpy(ddict+1, dict, dictSize);  /* local copy */
133003 +        dict = ddict+1;
133004 +    }
133005 +    if (ZSTD_isError( ZSTD_initDDict_internal(ddict,
133006 +                                              dict, dictSize,
133007 +                                              ZSTD_dlm_byRef, dictContentType) ))
133008 +        return NULL;
133009 +    return ddict;
133013 +size_t ZSTD_freeDDict(ZSTD_DDict* ddict)
133015 +    if (ddict==NULL) return 0;   /* support free on NULL */
133016 +    {   ZSTD_customMem const cMem = ddict->cMem;
133017 +        ZSTD_customFree(ddict->dictBuffer, cMem);
133018 +        ZSTD_customFree(ddict, cMem);
133019 +        return 0;
133020 +    }
133023 +/*! ZSTD_estimateDDictSize() :
133024 + *  Estimate amount of memory that will be needed to create a dictionary for decompression.
133025 + *  Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */
133026 +size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod)
133028 +    return sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
133031 +size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict)
133033 +    if (ddict==NULL) return 0;   /* support sizeof on NULL */
133034 +    return sizeof(*ddict) + (ddict->dictBuffer ? ddict->dictSize : 0) ;
133037 +/*! ZSTD_getDictID_fromDDict() :
133038 + *  Provides the dictID of the dictionary loaded into `ddict`.
133039 + *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
133040 + *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
133041 +unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict)
133043 +    if (ddict==NULL) return 0;
133044 +    return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize);
133046 diff --git a/lib/zstd/decompress/zstd_ddict.h b/lib/zstd/decompress/zstd_ddict.h
133047 new file mode 100644
133048 index 000000000000..8c1a79d666f8
133049 --- /dev/null
133050 +++ b/lib/zstd/decompress/zstd_ddict.h
133051 @@ -0,0 +1,44 @@
133053 + * Copyright (c) Yann Collet, Facebook, Inc.
133054 + * All rights reserved.
133056 + * This source code is licensed under both the BSD-style license (found in the
133057 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
133058 + * in the COPYING file in the root directory of this source tree).
133059 + * You may select, at your option, one of the above-listed licenses.
133060 + */
133063 +#ifndef ZSTD_DDICT_H
133064 +#define ZSTD_DDICT_H
133066 +/*-*******************************************************
133067 + *  Dependencies
133068 + *********************************************************/
133069 +#include "../common/zstd_deps.h"   /* size_t */
133070 +#include <linux/zstd.h>     /* ZSTD_DDict, and several public functions */
133073 +/*-*******************************************************
133074 + *  Interface
133075 + *********************************************************/
133077 +/* note: several prototypes are already published in `zstd.h` :
133078 + * ZSTD_createDDict()
133079 + * ZSTD_createDDict_byReference()
133080 + * ZSTD_createDDict_advanced()
133081 + * ZSTD_freeDDict()
133082 + * ZSTD_initStaticDDict()
133083 + * ZSTD_sizeof_DDict()
133084 + * ZSTD_estimateDDictSize()
133085 + * ZSTD_getDictID_fromDict()
133086 + */
133088 +const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict);
133089 +size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict);
133091 +void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
133095 +#endif /* ZSTD_DDICT_H */
133096 diff --git a/lib/zstd/decompress/zstd_decompress.c b/lib/zstd/decompress/zstd_decompress.c
133097 new file mode 100644
133098 index 000000000000..16b4ea795a7e
133099 --- /dev/null
133100 +++ b/lib/zstd/decompress/zstd_decompress.c
133101 @@ -0,0 +1,2075 @@
133103 + * Copyright (c) Yann Collet, Facebook, Inc.
133104 + * All rights reserved.
133106 + * This source code is licensed under both the BSD-style license (found in the
133107 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
133108 + * in the COPYING file in the root directory of this source tree).
133109 + * You may select, at your option, one of the above-listed licenses.
133110 + */
133113 +/* ***************************************************************
133114 +*  Tuning parameters
133115 +*****************************************************************/
133117 + * HEAPMODE :
133118 + * Select how default decompression function ZSTD_decompress() allocates its context,
133119 + * on stack (0), or into heap (1, default; requires malloc()).
133120 + * Note that functions with explicit context such as ZSTD_decompressDCtx() are unaffected.
133121 + */
133122 +#ifndef ZSTD_HEAPMODE
133123 +#  define ZSTD_HEAPMODE 1
133124 +#endif
133127 +*  LEGACY_SUPPORT :
133128 +*  if set to 1+, ZSTD_decompress() can decode older formats (v0.1+)
133132 + *  MAXWINDOWSIZE_DEFAULT :
133133 + *  maximum window size accepted by DStream __by default__.
133134 + *  Frames requiring more memory will be rejected.
133135 + *  It's possible to set a different limit using ZSTD_DCtx_setMaxWindowSize().
133136 + */
133137 +#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT
133138 +#  define ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) + 1)
133139 +#endif
133142 + *  NO_FORWARD_PROGRESS_MAX :
133143 + *  maximum allowed nb of calls to ZSTD_decompressStream()
133144 + *  without any forward progress
133145 + *  (defined as: no byte read from input, and no byte flushed to output)
133146 + *  before triggering an error.
133147 + */
133148 +#ifndef ZSTD_NO_FORWARD_PROGRESS_MAX
133149 +#  define ZSTD_NO_FORWARD_PROGRESS_MAX 16
133150 +#endif
133153 +/*-*******************************************************
133154 +*  Dependencies
133155 +*********************************************************/
133156 +#include "../common/zstd_deps.h"   /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
133157 +#include "../common/cpu.h"         /* bmi2 */
133158 +#include "../common/mem.h"         /* low level memory routines */
133159 +#define FSE_STATIC_LINKING_ONLY
133160 +#include "../common/fse.h"
133161 +#define HUF_STATIC_LINKING_ONLY
133162 +#include "../common/huf.h"
133163 +#include <linux/xxhash.h> /* xxh64_reset, xxh64_update, xxh64_digest, XXH64 */
133164 +#include "../common/zstd_internal.h"  /* blockProperties_t */
133165 +#include "zstd_decompress_internal.h"   /* ZSTD_DCtx */
133166 +#include "zstd_ddict.h"  /* ZSTD_DDictDictContent */
133167 +#include "zstd_decompress_block.h"   /* ZSTD_decompressBlock_internal */
133172 +/*************************************
133173 + * Multiple DDicts Hashset internals *
133174 + *************************************/
133176 +#define DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT 4
133177 +#define DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT 3   /* These two constants represent SIZE_MULT/COUNT_MULT load factor without using a float.
133178 +                                                     * Currently, that means a 0.75 load factor.
133179 +                                                     * So, if count * COUNT_MULT / size * SIZE_MULT != 0, then we've exceeded
133180 +                                                     * the load factor of the ddict hash set.
133181 +                                                     */
133183 +#define DDICT_HASHSET_TABLE_BASE_SIZE 64
133184 +#define DDICT_HASHSET_RESIZE_FACTOR 2
133186 +/* Hash function to determine starting position of dict insertion within the table
133187 + * Returns an index between [0, hashSet->ddictPtrTableSize]
133188 + */
133189 +static size_t ZSTD_DDictHashSet_getIndex(const ZSTD_DDictHashSet* hashSet, U32 dictID) {
133190 +    const U64 hash = xxh64(&dictID, sizeof(U32), 0);
133191 +    /* DDict ptr table size is a multiple of 2, use size - 1 as mask to get index within [0, hashSet->ddictPtrTableSize) */
133192 +    return hash & (hashSet->ddictPtrTableSize - 1);
133195 +/* Adds DDict to a hashset without resizing it.
133196 + * If inserting a DDict with a dictID that already exists in the set, replaces the one in the set.
133197 + * Returns 0 if successful, or a zstd error code if something went wrong.
133198 + */
133199 +static size_t ZSTD_DDictHashSet_emplaceDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict) {
133200 +    const U32 dictID = ZSTD_getDictID_fromDDict(ddict);
133201 +    size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID);
133202 +    const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1;
133203 +    RETURN_ERROR_IF(hashSet->ddictPtrCount == hashSet->ddictPtrTableSize, GENERIC, "Hash set is full!");
133204 +    DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx);
133205 +    while (hashSet->ddictPtrTable[idx] != NULL) {
133206 +        /* Replace existing ddict if inserting ddict with same dictID */
133207 +        if (ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]) == dictID) {
133208 +            DEBUGLOG(4, "DictID already exists, replacing rather than adding");
133209 +            hashSet->ddictPtrTable[idx] = ddict;
133210 +            return 0;
133211 +        }
133212 +        idx &= idxRangeMask;
133213 +        idx++;
133214 +    }
133215 +    DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx);
133216 +    hashSet->ddictPtrTable[idx] = ddict;
133217 +    hashSet->ddictPtrCount++;
133218 +    return 0;
133221 +/* Expands hash table by factor of DDICT_HASHSET_RESIZE_FACTOR and
133222 + * rehashes all values, allocates new table, frees old table.
133223 + * Returns 0 on success, otherwise a zstd error code.
133224 + */
133225 +static size_t ZSTD_DDictHashSet_expand(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) {
133226 +    size_t newTableSize = hashSet->ddictPtrTableSize * DDICT_HASHSET_RESIZE_FACTOR;
133227 +    const ZSTD_DDict** newTable = (const ZSTD_DDict**)ZSTD_customCalloc(sizeof(ZSTD_DDict*) * newTableSize, customMem);
133228 +    const ZSTD_DDict** oldTable = hashSet->ddictPtrTable;
133229 +    size_t oldTableSize = hashSet->ddictPtrTableSize;
133230 +    size_t i;
133232 +    DEBUGLOG(4, "Expanding DDict hash table! Old size: %zu new size: %zu", oldTableSize, newTableSize);
133233 +    RETURN_ERROR_IF(!newTable, memory_allocation, "Expanded hashset allocation failed!");
133234 +    hashSet->ddictPtrTable = newTable;
133235 +    hashSet->ddictPtrTableSize = newTableSize;
133236 +    hashSet->ddictPtrCount = 0;
133237 +    for (i = 0; i < oldTableSize; ++i) {
133238 +        if (oldTable[i] != NULL) {
133239 +            FORWARD_IF_ERROR(ZSTD_DDictHashSet_emplaceDDict(hashSet, oldTable[i]), "");
133240 +        }
133241 +    }
133242 +    ZSTD_customFree((void*)oldTable, customMem);
133243 +    DEBUGLOG(4, "Finished re-hash");
133244 +    return 0;
133247 +/* Fetches a DDict with the given dictID
133248 + * Returns the ZSTD_DDict* with the requested dictID. If it doesn't exist, then returns NULL.
133249 + */
133250 +static const ZSTD_DDict* ZSTD_DDictHashSet_getDDict(ZSTD_DDictHashSet* hashSet, U32 dictID) {
133251 +    size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID);
133252 +    const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1;
133253 +    DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx);
133254 +    for (;;) {
133255 +        size_t currDictID = ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]);
133256 +        if (currDictID == dictID || currDictID == 0) {
133257 +            /* currDictID == 0 implies a NULL ddict entry */
133258 +            break;
133259 +        } else {
133260 +            idx &= idxRangeMask;    /* Goes to start of table when we reach the end */
133261 +            idx++;
133262 +        }
133263 +    }
133264 +    DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx);
133265 +    return hashSet->ddictPtrTable[idx];
133268 +/* Allocates space for and returns a ddict hash set
133269 + * The hash set's ZSTD_DDict* table has all values automatically set to NULL to begin with.
133270 + * Returns NULL if allocation failed.
133271 + */
133272 +static ZSTD_DDictHashSet* ZSTD_createDDictHashSet(ZSTD_customMem customMem) {
133273 +    ZSTD_DDictHashSet* ret = (ZSTD_DDictHashSet*)ZSTD_customMalloc(sizeof(ZSTD_DDictHashSet), customMem);
133274 +    DEBUGLOG(4, "Allocating new hash set");
133275 +    ret->ddictPtrTable = (const ZSTD_DDict**)ZSTD_customCalloc(DDICT_HASHSET_TABLE_BASE_SIZE * sizeof(ZSTD_DDict*), customMem);
133276 +    ret->ddictPtrTableSize = DDICT_HASHSET_TABLE_BASE_SIZE;
133277 +    ret->ddictPtrCount = 0;
133278 +    if (!ret || !ret->ddictPtrTable) {
133279 +        return NULL;
133280 +    }
133281 +    return ret;
133284 +/* Frees the table of ZSTD_DDict* within a hashset, then frees the hashset itself.
133285 + * Note: The ZSTD_DDict* within the table are NOT freed.
133286 + */
133287 +static void ZSTD_freeDDictHashSet(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) {
133288 +    DEBUGLOG(4, "Freeing ddict hash set");
133289 +    if (hashSet && hashSet->ddictPtrTable) {
133290 +        ZSTD_customFree((void*)hashSet->ddictPtrTable, customMem);
133291 +    }
133292 +    if (hashSet) {
133293 +        ZSTD_customFree(hashSet, customMem);
133294 +    }
133297 +/* Public function: Adds a DDict into the ZSTD_DDictHashSet, possibly triggering a resize of the hash set.
133298 + * Returns 0 on success, or a ZSTD error.
133299 + */
133300 +static size_t ZSTD_DDictHashSet_addDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict, ZSTD_customMem customMem) {
133301 +    DEBUGLOG(4, "Adding dict ID: %u to hashset with - Count: %zu Tablesize: %zu", ZSTD_getDictID_fromDDict(ddict), hashSet->ddictPtrCount, hashSet->ddictPtrTableSize);
133302 +    if (hashSet->ddictPtrCount * DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT / hashSet->ddictPtrTableSize * DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT != 0) {
133303 +        FORWARD_IF_ERROR(ZSTD_DDictHashSet_expand(hashSet, customMem), "");
133304 +    }
133305 +    FORWARD_IF_ERROR(ZSTD_DDictHashSet_emplaceDDict(hashSet, ddict), "");
133306 +    return 0;
133309 +/*-*************************************************************
133310 +*   Context management
133311 +***************************************************************/
133312 +size_t ZSTD_sizeof_DCtx (const ZSTD_DCtx* dctx)
133314 +    if (dctx==NULL) return 0;   /* support sizeof NULL */
133315 +    return sizeof(*dctx)
133316 +           + ZSTD_sizeof_DDict(dctx->ddictLocal)
133317 +           + dctx->inBuffSize + dctx->outBuffSize;
133320 +size_t ZSTD_estimateDCtxSize(void) { return sizeof(ZSTD_DCtx); }
133323 +static size_t ZSTD_startingInputLength(ZSTD_format_e format)
133325 +    size_t const startingInputLength = ZSTD_FRAMEHEADERSIZE_PREFIX(format);
133326 +    /* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */
133327 +    assert( (format == ZSTD_f_zstd1) || (format == ZSTD_f_zstd1_magicless) );
133328 +    return startingInputLength;
133331 +static void ZSTD_DCtx_resetParameters(ZSTD_DCtx* dctx)
133333 +    assert(dctx->streamStage == zdss_init);
133334 +    dctx->format = ZSTD_f_zstd1;
133335 +    dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
133336 +    dctx->outBufferMode = ZSTD_bm_buffered;
133337 +    dctx->forceIgnoreChecksum = ZSTD_d_validateChecksum;
133338 +    dctx->refMultipleDDicts = ZSTD_rmd_refSingleDDict;
133341 +static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
133343 +    dctx->staticSize  = 0;
133344 +    dctx->ddict       = NULL;
133345 +    dctx->ddictLocal  = NULL;
133346 +    dctx->dictEnd     = NULL;
133347 +    dctx->ddictIsCold = 0;
133348 +    dctx->dictUses = ZSTD_dont_use;
133349 +    dctx->inBuff      = NULL;
133350 +    dctx->inBuffSize  = 0;
133351 +    dctx->outBuffSize = 0;
133352 +    dctx->streamStage = zdss_init;
133353 +    dctx->legacyContext = NULL;
133354 +    dctx->previousLegacyVersion = 0;
133355 +    dctx->noForwardProgress = 0;
133356 +    dctx->oversizedDuration = 0;
133357 +    dctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
133358 +    dctx->ddictSet = NULL;
133359 +    ZSTD_DCtx_resetParameters(dctx);
133360 +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
133361 +    dctx->dictContentEndForFuzzing = NULL;
133362 +#endif
133365 +ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize)
133367 +    ZSTD_DCtx* const dctx = (ZSTD_DCtx*) workspace;
133369 +    if ((size_t)workspace & 7) return NULL;  /* 8-aligned */
133370 +    if (workspaceSize < sizeof(ZSTD_DCtx)) return NULL;  /* minimum size */
133372 +    ZSTD_initDCtx_internal(dctx);
133373 +    dctx->staticSize = workspaceSize;
133374 +    dctx->inBuff = (char*)(dctx+1);
133375 +    return dctx;
133378 +ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
133380 +    if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
133382 +    {   ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_customMalloc(sizeof(*dctx), customMem);
133383 +        if (!dctx) return NULL;
133384 +        dctx->customMem = customMem;
133385 +        ZSTD_initDCtx_internal(dctx);
133386 +        return dctx;
133387 +    }
133390 +ZSTD_DCtx* ZSTD_createDCtx(void)
133392 +    DEBUGLOG(3, "ZSTD_createDCtx");
133393 +    return ZSTD_createDCtx_advanced(ZSTD_defaultCMem);
133396 +static void ZSTD_clearDict(ZSTD_DCtx* dctx)
133398 +    ZSTD_freeDDict(dctx->ddictLocal);
133399 +    dctx->ddictLocal = NULL;
133400 +    dctx->ddict = NULL;
133401 +    dctx->dictUses = ZSTD_dont_use;
133404 +size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
133406 +    if (dctx==NULL) return 0;   /* support free on NULL */
133407 +    RETURN_ERROR_IF(dctx->staticSize, memory_allocation, "not compatible with static DCtx");
133408 +    {   ZSTD_customMem const cMem = dctx->customMem;
133409 +        ZSTD_clearDict(dctx);
133410 +        ZSTD_customFree(dctx->inBuff, cMem);
133411 +        dctx->inBuff = NULL;
133412 +        if (dctx->ddictSet) {
133413 +            ZSTD_freeDDictHashSet(dctx->ddictSet, cMem);
133414 +            dctx->ddictSet = NULL;
133415 +        }
133416 +        ZSTD_customFree(dctx, cMem);
133417 +        return 0;
133418 +    }
133421 +/* no longer useful */
133422 +void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx)
133424 +    size_t const toCopy = (size_t)((char*)(&dstDCtx->inBuff) - (char*)dstDCtx);
133425 +    ZSTD_memcpy(dstDCtx, srcDCtx, toCopy);  /* no need to copy workspace */
133428 +/* Given a dctx with a digested frame params, re-selects the correct ZSTD_DDict based on
133429 + * the requested dict ID from the frame. If there exists a reference to the correct ZSTD_DDict, then
133430 + * accordingly sets the ddict to be used to decompress the frame.
133432 + * If no DDict is found, then no action is taken, and the ZSTD_DCtx::ddict remains as-is.
133434 + * ZSTD_d_refMultipleDDicts must be enabled for this function to be called.
133435 + */
133436 +static void ZSTD_DCtx_selectFrameDDict(ZSTD_DCtx* dctx) {
133437 +    assert(dctx->refMultipleDDicts && dctx->ddictSet);
133438 +    DEBUGLOG(4, "Adjusting DDict based on requested dict ID from frame");
133439 +    if (dctx->ddict) {
133440 +        const ZSTD_DDict* frameDDict = ZSTD_DDictHashSet_getDDict(dctx->ddictSet, dctx->fParams.dictID);
133441 +        if (frameDDict) {
133442 +            DEBUGLOG(4, "DDict found!");
133443 +            ZSTD_clearDict(dctx);
133444 +            dctx->dictID = dctx->fParams.dictID;
133445 +            dctx->ddict = frameDDict;
133446 +            dctx->dictUses = ZSTD_use_indefinitely;
133447 +        }
133448 +    }
133452 +/*-*************************************************************
133453 + *   Frame header decoding
133454 + ***************************************************************/
133456 +/*! ZSTD_isFrame() :
133457 + *  Tells if the content of `buffer` starts with a valid Frame Identifier.
133458 + *  Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
133459 + *  Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
133460 + *  Note 3 : Skippable Frame Identifiers are considered valid. */
133461 +unsigned ZSTD_isFrame(const void* buffer, size_t size)
133463 +    if (size < ZSTD_FRAMEIDSIZE) return 0;
133464 +    {   U32 const magic = MEM_readLE32(buffer);
133465 +        if (magic == ZSTD_MAGICNUMBER) return 1;
133466 +        if ((magic & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) return 1;
133467 +    }
133468 +    return 0;
133471 +/** ZSTD_frameHeaderSize_internal() :
133472 + *  srcSize must be large enough to reach header size fields.
133473 + *  note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless.
133474 + * @return : size of the Frame Header
133475 + *           or an error code, which can be tested with ZSTD_isError() */
133476 +static size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format)
133478 +    size_t const minInputSize = ZSTD_startingInputLength(format);
133479 +    RETURN_ERROR_IF(srcSize < minInputSize, srcSize_wrong, "");
133481 +    {   BYTE const fhd = ((const BYTE*)src)[minInputSize-1];
133482 +        U32 const dictID= fhd & 3;
133483 +        U32 const singleSegment = (fhd >> 5) & 1;
133484 +        U32 const fcsId = fhd >> 6;
133485 +        return minInputSize + !singleSegment
133486 +             + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId]
133487 +             + (singleSegment && !fcsId);
133488 +    }
133491 +/** ZSTD_frameHeaderSize() :
133492 + *  srcSize must be >= ZSTD_frameHeaderSize_prefix.
133493 + * @return : size of the Frame Header,
133494 + *           or an error code (if srcSize is too small) */
133495 +size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize)
133497 +    return ZSTD_frameHeaderSize_internal(src, srcSize, ZSTD_f_zstd1);
133501 +/** ZSTD_getFrameHeader_advanced() :
133502 + *  decode Frame Header, or require larger `srcSize`.
133503 + *  note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless
133504 + * @return : 0, `zfhPtr` is correctly filled,
133505 + *          >0, `srcSize` is too small, value is wanted `srcSize` amount,
133506 + *           or an error code, which can be tested using ZSTD_isError() */
133507 +size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format)
133509 +    const BYTE* ip = (const BYTE*)src;
133510 +    size_t const minInputSize = ZSTD_startingInputLength(format);
133512 +    ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr));   /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */
133513 +    if (srcSize < minInputSize) return minInputSize;
133514 +    RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter");
133516 +    if ( (format != ZSTD_f_zstd1_magicless)
133517 +      && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) {
133518 +        if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
133519 +            /* skippable frame */
133520 +            if (srcSize < ZSTD_SKIPPABLEHEADERSIZE)
133521 +                return ZSTD_SKIPPABLEHEADERSIZE; /* magic number + frame length */
133522 +            ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr));
133523 +            zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE);
133524 +            zfhPtr->frameType = ZSTD_skippableFrame;
133525 +            return 0;
133526 +        }
133527 +        RETURN_ERROR(prefix_unknown, "");
133528 +    }
133530 +    /* ensure there is enough `srcSize` to fully read/decode frame header */
133531 +    {   size_t const fhsize = ZSTD_frameHeaderSize_internal(src, srcSize, format);
133532 +        if (srcSize < fhsize) return fhsize;
133533 +        zfhPtr->headerSize = (U32)fhsize;
133534 +    }
133536 +    {   BYTE const fhdByte = ip[minInputSize-1];
133537 +        size_t pos = minInputSize;
133538 +        U32 const dictIDSizeCode = fhdByte&3;
133539 +        U32 const checksumFlag = (fhdByte>>2)&1;
133540 +        U32 const singleSegment = (fhdByte>>5)&1;
133541 +        U32 const fcsID = fhdByte>>6;
133542 +        U64 windowSize = 0;
133543 +        U32 dictID = 0;
133544 +        U64 frameContentSize = ZSTD_CONTENTSIZE_UNKNOWN;
133545 +        RETURN_ERROR_IF((fhdByte & 0x08) != 0, frameParameter_unsupported,
133546 +                        "reserved bits, must be zero");
133548 +        if (!singleSegment) {
133549 +            BYTE const wlByte = ip[pos++];
133550 +            U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
133551 +            RETURN_ERROR_IF(windowLog > ZSTD_WINDOWLOG_MAX, frameParameter_windowTooLarge, "");
133552 +            windowSize = (1ULL << windowLog);
133553 +            windowSize += (windowSize >> 3) * (wlByte&7);
133554 +        }
133555 +        switch(dictIDSizeCode)
133556 +        {
133557 +            default: assert(0);  /* impossible */
133558 +            case 0 : break;
133559 +            case 1 : dictID = ip[pos]; pos++; break;
133560 +            case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break;
133561 +            case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break;
133562 +        }
133563 +        switch(fcsID)
133564 +        {
133565 +            default: assert(0);  /* impossible */
133566 +            case 0 : if (singleSegment) frameContentSize = ip[pos]; break;
133567 +            case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break;
133568 +            case 2 : frameContentSize = MEM_readLE32(ip+pos); break;
133569 +            case 3 : frameContentSize = MEM_readLE64(ip+pos); break;
133570 +        }
133571 +        if (singleSegment) windowSize = frameContentSize;
133573 +        zfhPtr->frameType = ZSTD_frame;
133574 +        zfhPtr->frameContentSize = frameContentSize;
133575 +        zfhPtr->windowSize = windowSize;
133576 +        zfhPtr->blockSizeMax = (unsigned) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
133577 +        zfhPtr->dictID = dictID;
133578 +        zfhPtr->checksumFlag = checksumFlag;
133579 +    }
133580 +    return 0;
133583 +/** ZSTD_getFrameHeader() :
133584 + *  decode Frame Header, or require larger `srcSize`.
133585 + *  note : this function does not consume input, it only reads it.
133586 + * @return : 0, `zfhPtr` is correctly filled,
133587 + *          >0, `srcSize` is too small, value is wanted `srcSize` amount,
133588 + *           or an error code, which can be tested using ZSTD_isError() */
133589 +size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize)
133591 +    return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_f_zstd1);
133595 +/** ZSTD_getFrameContentSize() :
133596 + *  compatible with legacy mode
133597 + * @return : decompressed size of the single frame pointed to be `src` if known, otherwise
133598 + *         - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
133599 + *         - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
133600 +unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)
133602 +    {   ZSTD_frameHeader zfh;
133603 +        if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0)
133604 +            return ZSTD_CONTENTSIZE_ERROR;
133605 +        if (zfh.frameType == ZSTD_skippableFrame) {
133606 +            return 0;
133607 +        } else {
133608 +            return zfh.frameContentSize;
133609 +    }   }
133612 +static size_t readSkippableFrameSize(void const* src, size_t srcSize)
133614 +    size_t const skippableHeaderSize = ZSTD_SKIPPABLEHEADERSIZE;
133615 +    U32 sizeU32;
133617 +    RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong, "");
133619 +    sizeU32 = MEM_readLE32((BYTE const*)src + ZSTD_FRAMEIDSIZE);
133620 +    RETURN_ERROR_IF((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32,
133621 +                    frameParameter_unsupported, "");
133622 +    {
133623 +        size_t const skippableSize = skippableHeaderSize + sizeU32;
133624 +        RETURN_ERROR_IF(skippableSize > srcSize, srcSize_wrong, "");
133625 +        return skippableSize;
133626 +    }
133629 +/** ZSTD_findDecompressedSize() :
133630 + *  compatible with legacy mode
133631 + *  `srcSize` must be the exact length of some number of ZSTD compressed and/or
133632 + *      skippable frames
133633 + *  @return : decompressed size of the frames contained */
133634 +unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
133636 +    unsigned long long totalDstSize = 0;
133638 +    while (srcSize >= ZSTD_startingInputLength(ZSTD_f_zstd1)) {
133639 +        U32 const magicNumber = MEM_readLE32(src);
133641 +        if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
133642 +            size_t const skippableSize = readSkippableFrameSize(src, srcSize);
133643 +            if (ZSTD_isError(skippableSize)) {
133644 +                return ZSTD_CONTENTSIZE_ERROR;
133645 +            }
133646 +            assert(skippableSize <= srcSize);
133648 +            src = (const BYTE *)src + skippableSize;
133649 +            srcSize -= skippableSize;
133650 +            continue;
133651 +        }
133653 +        {   unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
133654 +            if (ret >= ZSTD_CONTENTSIZE_ERROR) return ret;
133656 +            /* check for overflow */
133657 +            if (totalDstSize + ret < totalDstSize) return ZSTD_CONTENTSIZE_ERROR;
133658 +            totalDstSize += ret;
133659 +        }
133660 +        {   size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize);
133661 +            if (ZSTD_isError(frameSrcSize)) {
133662 +                return ZSTD_CONTENTSIZE_ERROR;
133663 +            }
133665 +            src = (const BYTE *)src + frameSrcSize;
133666 +            srcSize -= frameSrcSize;
133667 +        }
133668 +    }  /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
133670 +    if (srcSize) return ZSTD_CONTENTSIZE_ERROR;
133672 +    return totalDstSize;
133675 +/** ZSTD_getDecompressedSize() :
133676 + *  compatible with legacy mode
133677 + * @return : decompressed size if known, 0 otherwise
133678 +             note : 0 can mean any of the following :
133679 +                   - frame content is empty
133680 +                   - decompressed size field is not present in frame header
133681 +                   - frame header unknown / not supported
133682 +                   - frame header not complete (`srcSize` too small) */
133683 +unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize)
133685 +    unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
133686 +    ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_ERROR < ZSTD_CONTENTSIZE_UNKNOWN);
133687 +    return (ret >= ZSTD_CONTENTSIZE_ERROR) ? 0 : ret;
133691 +/** ZSTD_decodeFrameHeader() :
133692 + * `headerSize` must be the size provided by ZSTD_frameHeaderSize().
133693 + * If multiple DDict references are enabled, also will choose the correct DDict to use.
133694 + * @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */
133695 +static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize)
133697 +    size_t const result = ZSTD_getFrameHeader_advanced(&(dctx->fParams), src, headerSize, dctx->format);
133698 +    if (ZSTD_isError(result)) return result;    /* invalid header */
133699 +    RETURN_ERROR_IF(result>0, srcSize_wrong, "headerSize too small");
133701 +    /* Reference DDict requested by frame if dctx references multiple ddicts */
133702 +    if (dctx->refMultipleDDicts == ZSTD_rmd_refMultipleDDicts && dctx->ddictSet) {
133703 +        ZSTD_DCtx_selectFrameDDict(dctx);
133704 +    }
133706 +#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
133707 +    /* Skip the dictID check in fuzzing mode, because it makes the search
133708 +     * harder.
133709 +     */
133710 +    RETURN_ERROR_IF(dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID),
133711 +                    dictionary_wrong, "");
133712 +#endif
133713 +    dctx->validateChecksum = (dctx->fParams.checksumFlag && !dctx->forceIgnoreChecksum) ? 1 : 0;
133714 +    if (dctx->validateChecksum) xxh64_reset(&dctx->xxhState, 0);
133715 +    dctx->processedCSize += headerSize;
133716 +    return 0;
133719 +static ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(size_t ret)
133721 +    ZSTD_frameSizeInfo frameSizeInfo;
133722 +    frameSizeInfo.compressedSize = ret;
133723 +    frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR;
133724 +    return frameSizeInfo;
133727 +static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize)
133729 +    ZSTD_frameSizeInfo frameSizeInfo;
133730 +    ZSTD_memset(&frameSizeInfo, 0, sizeof(ZSTD_frameSizeInfo));
133733 +    if ((srcSize >= ZSTD_SKIPPABLEHEADERSIZE)
133734 +        && (MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
133735 +        frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize);
133736 +        assert(ZSTD_isError(frameSizeInfo.compressedSize) ||
133737 +               frameSizeInfo.compressedSize <= srcSize);
133738 +        return frameSizeInfo;
133739 +    } else {
133740 +        const BYTE* ip = (const BYTE*)src;
133741 +        const BYTE* const ipstart = ip;
133742 +        size_t remainingSize = srcSize;
133743 +        size_t nbBlocks = 0;
133744 +        ZSTD_frameHeader zfh;
133746 +        /* Extract Frame Header */
133747 +        {   size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize);
133748 +            if (ZSTD_isError(ret))
133749 +                return ZSTD_errorFrameSizeInfo(ret);
133750 +            if (ret > 0)
133751 +                return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
133752 +        }
133754 +        ip += zfh.headerSize;
133755 +        remainingSize -= zfh.headerSize;
133757 +        /* Iterate over each block */
133758 +        while (1) {
133759 +            blockProperties_t blockProperties;
133760 +            size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
133761 +            if (ZSTD_isError(cBlockSize))
133762 +                return ZSTD_errorFrameSizeInfo(cBlockSize);
133764 +            if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)
133765 +                return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
133767 +            ip += ZSTD_blockHeaderSize + cBlockSize;
133768 +            remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
133769 +            nbBlocks++;
133771 +            if (blockProperties.lastBlock) break;
133772 +        }
133774 +        /* Final frame content checksum */
133775 +        if (zfh.checksumFlag) {
133776 +            if (remainingSize < 4)
133777 +                return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong));
133778 +            ip += 4;
133779 +        }
133781 +        frameSizeInfo.compressedSize = (size_t)(ip - ipstart);
133782 +        frameSizeInfo.decompressedBound = (zfh.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN)
133783 +                                        ? zfh.frameContentSize
133784 +                                        : nbBlocks * zfh.blockSizeMax;
133785 +        return frameSizeInfo;
133786 +    }
133789 +/** ZSTD_findFrameCompressedSize() :
133790 + *  compatible with legacy mode
133791 + *  `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame
133792 + *  `srcSize` must be at least as large as the frame contained
133793 + *  @return : the compressed size of the frame starting at `src` */
133794 +size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
133796 +    ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
133797 +    return frameSizeInfo.compressedSize;
133800 +/** ZSTD_decompressBound() :
133801 + *  compatible with legacy mode
133802 + *  `src` must point to the start of a ZSTD frame or a skippeable frame
133803 + *  `srcSize` must be at least as large as the frame contained
133804 + *  @return : the maximum decompressed size of the compressed source
133805 + */
133806 +unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize)
133808 +    unsigned long long bound = 0;
133809 +    /* Iterate over each frame */
133810 +    while (srcSize > 0) {
133811 +        ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
133812 +        size_t const compressedSize = frameSizeInfo.compressedSize;
133813 +        unsigned long long const decompressedBound = frameSizeInfo.decompressedBound;
133814 +        if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR)
133815 +            return ZSTD_CONTENTSIZE_ERROR;
133816 +        assert(srcSize >= compressedSize);
133817 +        src = (const BYTE*)src + compressedSize;
133818 +        srcSize -= compressedSize;
133819 +        bound += decompressedBound;
133820 +    }
133821 +    return bound;
133825 +/*-*************************************************************
133826 + *   Frame decoding
133827 + ***************************************************************/
133829 +/** ZSTD_insertBlock() :
133830 + *  insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
133831 +size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize)
133833 +    DEBUGLOG(5, "ZSTD_insertBlock: %u bytes", (unsigned)blockSize);
133834 +    ZSTD_checkContinuity(dctx, blockStart, blockSize);
133835 +    dctx->previousDstEnd = (const char*)blockStart + blockSize;
133836 +    return blockSize;
133840 +static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity,
133841 +                          const void* src, size_t srcSize)
133843 +    DEBUGLOG(5, "ZSTD_copyRawBlock");
133844 +    RETURN_ERROR_IF(srcSize > dstCapacity, dstSize_tooSmall, "");
133845 +    if (dst == NULL) {
133846 +        if (srcSize == 0) return 0;
133847 +        RETURN_ERROR(dstBuffer_null, "");
133848 +    }
133849 +    ZSTD_memcpy(dst, src, srcSize);
133850 +    return srcSize;
133853 +static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,
133854 +                               BYTE b,
133855 +                               size_t regenSize)
133857 +    RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall, "");
133858 +    if (dst == NULL) {
133859 +        if (regenSize == 0) return 0;
133860 +        RETURN_ERROR(dstBuffer_null, "");
133861 +    }
133862 +    ZSTD_memset(dst, b, regenSize);
133863 +    return regenSize;
133866 +static void ZSTD_DCtx_trace_end(ZSTD_DCtx const* dctx, U64 uncompressedSize, U64 compressedSize, unsigned streaming)
133868 +    (void)dctx;
133869 +    (void)uncompressedSize;
133870 +    (void)compressedSize;
133871 +    (void)streaming;
133875 +/*! ZSTD_decompressFrame() :
133876 + * @dctx must be properly initialized
133877 + *  will update *srcPtr and *srcSizePtr,
133878 + *  to make *srcPtr progress by one frame. */
133879 +static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
133880 +                                   void* dst, size_t dstCapacity,
133881 +                             const void** srcPtr, size_t *srcSizePtr)
133883 +    const BYTE* const istart = (const BYTE*)(*srcPtr);
133884 +    const BYTE* ip = istart;
133885 +    BYTE* const ostart = (BYTE*)dst;
133886 +    BYTE* const oend = dstCapacity != 0 ? ostart + dstCapacity : ostart;
133887 +    BYTE* op = ostart;
133888 +    size_t remainingSrcSize = *srcSizePtr;
133890 +    DEBUGLOG(4, "ZSTD_decompressFrame (srcSize:%i)", (int)*srcSizePtr);
133892 +    /* check */
133893 +    RETURN_ERROR_IF(
133894 +        remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN(dctx->format)+ZSTD_blockHeaderSize,
133895 +        srcSize_wrong, "");
133897 +    /* Frame Header */
133898 +    {   size_t const frameHeaderSize = ZSTD_frameHeaderSize_internal(
133899 +                ip, ZSTD_FRAMEHEADERSIZE_PREFIX(dctx->format), dctx->format);
133900 +        if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
133901 +        RETURN_ERROR_IF(remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize,
133902 +                        srcSize_wrong, "");
133903 +        FORWARD_IF_ERROR( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) , "");
133904 +        ip += frameHeaderSize; remainingSrcSize -= frameHeaderSize;
133905 +    }
133907 +    /* Loop on each block */
133908 +    while (1) {
133909 +        size_t decodedSize;
133910 +        blockProperties_t blockProperties;
133911 +        size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSrcSize, &blockProperties);
133912 +        if (ZSTD_isError(cBlockSize)) return cBlockSize;
133914 +        ip += ZSTD_blockHeaderSize;
133915 +        remainingSrcSize -= ZSTD_blockHeaderSize;
133916 +        RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong, "");
133918 +        switch(blockProperties.blockType)
133919 +        {
133920 +        case bt_compressed:
133921 +            decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oend-op), ip, cBlockSize, /* frame */ 1);
133922 +            break;
133923 +        case bt_raw :
133924 +            decodedSize = ZSTD_copyRawBlock(op, (size_t)(oend-op), ip, cBlockSize);
133925 +            break;
133926 +        case bt_rle :
133927 +            decodedSize = ZSTD_setRleBlock(op, (size_t)(oend-op), *ip, blockProperties.origSize);
133928 +            break;
133929 +        case bt_reserved :
133930 +        default:
133931 +            RETURN_ERROR(corruption_detected, "invalid block type");
133932 +        }
133934 +        if (ZSTD_isError(decodedSize)) return decodedSize;
133935 +        if (dctx->validateChecksum)
133936 +            xxh64_update(&dctx->xxhState, op, decodedSize);
133937 +        if (decodedSize != 0)
133938 +            op += decodedSize;
133939 +        assert(ip != NULL);
133940 +        ip += cBlockSize;
133941 +        remainingSrcSize -= cBlockSize;
133942 +        if (blockProperties.lastBlock) break;
133943 +    }
133945 +    if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
133946 +        RETURN_ERROR_IF((U64)(op-ostart) != dctx->fParams.frameContentSize,
133947 +                        corruption_detected, "");
133948 +    }
133949 +    if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
133950 +        RETURN_ERROR_IF(remainingSrcSize<4, checksum_wrong, "");
133951 +        if (!dctx->forceIgnoreChecksum) {
133952 +            U32 const checkCalc = (U32)xxh64_digest(&dctx->xxhState);
133953 +            U32 checkRead;
133954 +            checkRead = MEM_readLE32(ip);
133955 +            RETURN_ERROR_IF(checkRead != checkCalc, checksum_wrong, "");
133956 +        }
133957 +        ip += 4;
133958 +        remainingSrcSize -= 4;
133959 +    }
133960 +    ZSTD_DCtx_trace_end(dctx, (U64)(op-ostart), (U64)(ip-istart), /* streaming */ 0);
133961 +    /* Allow caller to get size read */
133962 +    *srcPtr = ip;
133963 +    *srcSizePtr = remainingSrcSize;
133964 +    return (size_t)(op-ostart);
133967 +static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
133968 +                                        void* dst, size_t dstCapacity,
133969 +                                  const void* src, size_t srcSize,
133970 +                                  const void* dict, size_t dictSize,
133971 +                                  const ZSTD_DDict* ddict)
133973 +    void* const dststart = dst;
133974 +    int moreThan1Frame = 0;
133976 +    DEBUGLOG(5, "ZSTD_decompressMultiFrame");
133977 +    assert(dict==NULL || ddict==NULL);  /* either dict or ddict set, not both */
133979 +    if (ddict) {
133980 +        dict = ZSTD_DDict_dictContent(ddict);
133981 +        dictSize = ZSTD_DDict_dictSize(ddict);
133982 +    }
133984 +    while (srcSize >= ZSTD_startingInputLength(dctx->format)) {
133987 +        {   U32 const magicNumber = MEM_readLE32(src);
133988 +            DEBUGLOG(4, "reading magic number %08X (expecting %08X)",
133989 +                        (unsigned)magicNumber, ZSTD_MAGICNUMBER);
133990 +            if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {
133991 +                size_t const skippableSize = readSkippableFrameSize(src, srcSize);
133992 +                FORWARD_IF_ERROR(skippableSize, "readSkippableFrameSize failed");
133993 +                assert(skippableSize <= srcSize);
133995 +                src = (const BYTE *)src + skippableSize;
133996 +                srcSize -= skippableSize;
133997 +                continue;
133998 +        }   }
134000 +        if (ddict) {
134001 +            /* we were called from ZSTD_decompress_usingDDict */
134002 +            FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(dctx, ddict), "");
134003 +        } else {
134004 +            /* this will initialize correctly with no dict if dict == NULL, so
134005 +             * use this in all cases but ddict */
134006 +            FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize), "");
134007 +        }
134008 +        ZSTD_checkContinuity(dctx, dst, dstCapacity);
134010 +        {   const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity,
134011 +                                                    &src, &srcSize);
134012 +            RETURN_ERROR_IF(
134013 +                (ZSTD_getErrorCode(res) == ZSTD_error_prefix_unknown)
134014 +             && (moreThan1Frame==1),
134015 +                srcSize_wrong,
134016 +                "At least one frame successfully completed, "
134017 +                "but following bytes are garbage: "
134018 +                "it's more likely to be a srcSize error, "
134019 +                "specifying more input bytes than size of frame(s). "
134020 +                "Note: one could be unlucky, it might be a corruption error instead, "
134021 +                "happening right at the place where we expect zstd magic bytes. "
134022 +                "But this is _much_ less likely than a srcSize field error.");
134023 +            if (ZSTD_isError(res)) return res;
134024 +            assert(res <= dstCapacity);
134025 +            if (res != 0)
134026 +                dst = (BYTE*)dst + res;
134027 +            dstCapacity -= res;
134028 +        }
134029 +        moreThan1Frame = 1;
134030 +    }  /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
134032 +    RETURN_ERROR_IF(srcSize, srcSize_wrong, "input not entirely consumed");
134034 +    return (size_t)((BYTE*)dst - (BYTE*)dststart);
134037 +size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
134038 +                                 void* dst, size_t dstCapacity,
134039 +                           const void* src, size_t srcSize,
134040 +                           const void* dict, size_t dictSize)
134042 +    return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL);
134046 +static ZSTD_DDict const* ZSTD_getDDict(ZSTD_DCtx* dctx)
134048 +    switch (dctx->dictUses) {
134049 +    default:
134050 +        assert(0 /* Impossible */);
134051 +        /* fall-through */
134052 +    case ZSTD_dont_use:
134053 +        ZSTD_clearDict(dctx);
134054 +        return NULL;
134055 +    case ZSTD_use_indefinitely:
134056 +        return dctx->ddict;
134057 +    case ZSTD_use_once:
134058 +        dctx->dictUses = ZSTD_dont_use;
134059 +        return dctx->ddict;
134060 +    }
134063 +size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
134065 +    return ZSTD_decompress_usingDDict(dctx, dst, dstCapacity, src, srcSize, ZSTD_getDDict(dctx));
134069 +size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
134071 +#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1)
134072 +    size_t regenSize;
134073 +    ZSTD_DCtx* const dctx = ZSTD_createDCtx();
134074 +    RETURN_ERROR_IF(dctx==NULL, memory_allocation, "NULL pointer!");
134075 +    regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
134076 +    ZSTD_freeDCtx(dctx);
134077 +    return regenSize;
134078 +#else   /* stack mode */
134079 +    ZSTD_DCtx dctx;
134080 +    ZSTD_initDCtx_internal(&dctx);
134081 +    return ZSTD_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize);
134082 +#endif
134086 +/*-**************************************
134087 +*   Advanced Streaming Decompression API
134088 +*   Bufferless and synchronous
134089 +****************************************/
134090 +size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; }
134093 + * Similar to ZSTD_nextSrcSizeToDecompress(), but when when a block input can be streamed,
134094 + * we allow taking a partial block as the input. Currently only raw uncompressed blocks can
134095 + * be streamed.
134097 + * For blocks that can be streamed, this allows us to reduce the latency until we produce
134098 + * output, and avoid copying the input.
134100 + * @param inputSize - The total amount of input that the caller currently has.
134101 + */
134102 +static size_t ZSTD_nextSrcSizeToDecompressWithInputSize(ZSTD_DCtx* dctx, size_t inputSize) {
134103 +    if (!(dctx->stage == ZSTDds_decompressBlock || dctx->stage == ZSTDds_decompressLastBlock))
134104 +        return dctx->expected;
134105 +    if (dctx->bType != bt_raw)
134106 +        return dctx->expected;
134107 +    return MIN(MAX(inputSize, 1), dctx->expected);
134110 +ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) {
134111 +    switch(dctx->stage)
134112 +    {
134113 +    default:   /* should not happen */
134114 +        assert(0);
134115 +    case ZSTDds_getFrameHeaderSize:
134116 +    case ZSTDds_decodeFrameHeader:
134117 +        return ZSTDnit_frameHeader;
134118 +    case ZSTDds_decodeBlockHeader:
134119 +        return ZSTDnit_blockHeader;
134120 +    case ZSTDds_decompressBlock:
134121 +        return ZSTDnit_block;
134122 +    case ZSTDds_decompressLastBlock:
134123 +        return ZSTDnit_lastBlock;
134124 +    case ZSTDds_checkChecksum:
134125 +        return ZSTDnit_checksum;
134126 +    case ZSTDds_decodeSkippableHeader:
134127 +    case ZSTDds_skipFrame:
134128 +        return ZSTDnit_skippableFrame;
134129 +    }
134132 +static int ZSTD_isSkipFrame(ZSTD_DCtx* dctx) { return dctx->stage == ZSTDds_skipFrame; }
134134 +/** ZSTD_decompressContinue() :
134135 + *  srcSize : must be the exact nb of bytes expected (see ZSTD_nextSrcSizeToDecompress())
134136 + *  @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity)
134137 + *            or an error code, which can be tested using ZSTD_isError() */
134138 +size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
134140 +    DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (unsigned)srcSize);
134141 +    /* Sanity check */
134142 +    RETURN_ERROR_IF(srcSize != ZSTD_nextSrcSizeToDecompressWithInputSize(dctx, srcSize), srcSize_wrong, "not allowed");
134143 +    ZSTD_checkContinuity(dctx, dst, dstCapacity);
134145 +    dctx->processedCSize += srcSize;
134147 +    switch (dctx->stage)
134148 +    {
134149 +    case ZSTDds_getFrameHeaderSize :
134150 +        assert(src != NULL);
134151 +        if (dctx->format == ZSTD_f_zstd1) {  /* allows header */
134152 +            assert(srcSize >= ZSTD_FRAMEIDSIZE);  /* to read skippable magic number */
134153 +            if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {        /* skippable frame */
134154 +                ZSTD_memcpy(dctx->headerBuffer, src, srcSize);
134155 +                dctx->expected = ZSTD_SKIPPABLEHEADERSIZE - srcSize;  /* remaining to load to get full skippable frame header */
134156 +                dctx->stage = ZSTDds_decodeSkippableHeader;
134157 +                return 0;
134158 +        }   }
134159 +        dctx->headerSize = ZSTD_frameHeaderSize_internal(src, srcSize, dctx->format);
134160 +        if (ZSTD_isError(dctx->headerSize)) return dctx->headerSize;
134161 +        ZSTD_memcpy(dctx->headerBuffer, src, srcSize);
134162 +        dctx->expected = dctx->headerSize - srcSize;
134163 +        dctx->stage = ZSTDds_decodeFrameHeader;
134164 +        return 0;
134166 +    case ZSTDds_decodeFrameHeader:
134167 +        assert(src != NULL);
134168 +        ZSTD_memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize);
134169 +        FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize), "");
134170 +        dctx->expected = ZSTD_blockHeaderSize;
134171 +        dctx->stage = ZSTDds_decodeBlockHeader;
134172 +        return 0;
134174 +    case ZSTDds_decodeBlockHeader:
134175 +        {   blockProperties_t bp;
134176 +            size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
134177 +            if (ZSTD_isError(cBlockSize)) return cBlockSize;
134178 +            RETURN_ERROR_IF(cBlockSize > dctx->fParams.blockSizeMax, corruption_detected, "Block Size Exceeds Maximum");
134179 +            dctx->expected = cBlockSize;
134180 +            dctx->bType = bp.blockType;
134181 +            dctx->rleSize = bp.origSize;
134182 +            if (cBlockSize) {
134183 +                dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock;
134184 +                return 0;
134185 +            }
134186 +            /* empty block */
134187 +            if (bp.lastBlock) {
134188 +                if (dctx->fParams.checksumFlag) {
134189 +                    dctx->expected = 4;
134190 +                    dctx->stage = ZSTDds_checkChecksum;
134191 +                } else {
134192 +                    dctx->expected = 0; /* end of frame */
134193 +                    dctx->stage = ZSTDds_getFrameHeaderSize;
134194 +                }
134195 +            } else {
134196 +                dctx->expected = ZSTD_blockHeaderSize;  /* jump to next header */
134197 +                dctx->stage = ZSTDds_decodeBlockHeader;
134198 +            }
134199 +            return 0;
134200 +        }
134202 +    case ZSTDds_decompressLastBlock:
134203 +    case ZSTDds_decompressBlock:
134204 +        DEBUGLOG(5, "ZSTD_decompressContinue: case ZSTDds_decompressBlock");
134205 +        {   size_t rSize;
134206 +            switch(dctx->bType)
134207 +            {
134208 +            case bt_compressed:
134209 +                DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed");
134210 +                rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1);
134211 +                dctx->expected = 0;  /* Streaming not supported */
134212 +                break;
134213 +            case bt_raw :
134214 +                assert(srcSize <= dctx->expected);
134215 +                rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize);
134216 +                FORWARD_IF_ERROR(rSize, "ZSTD_copyRawBlock failed");
134217 +                assert(rSize == srcSize);
134218 +                dctx->expected -= rSize;
134219 +                break;
134220 +            case bt_rle :
134221 +                rSize = ZSTD_setRleBlock(dst, dstCapacity, *(const BYTE*)src, dctx->rleSize);
134222 +                dctx->expected = 0;  /* Streaming not supported */
134223 +                break;
134224 +            case bt_reserved :   /* should never happen */
134225 +            default:
134226 +                RETURN_ERROR(corruption_detected, "invalid block type");
134227 +            }
134228 +            FORWARD_IF_ERROR(rSize, "");
134229 +            RETURN_ERROR_IF(rSize > dctx->fParams.blockSizeMax, corruption_detected, "Decompressed Block Size Exceeds Maximum");
134230 +            DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (unsigned)rSize);
134231 +            dctx->decodedSize += rSize;
134232 +            if (dctx->validateChecksum) xxh64_update(&dctx->xxhState, dst, rSize);
134233 +            dctx->previousDstEnd = (char*)dst + rSize;
134235 +            /* Stay on the same stage until we are finished streaming the block. */
134236 +            if (dctx->expected > 0) {
134237 +                return rSize;
134238 +            }
134240 +            if (dctx->stage == ZSTDds_decompressLastBlock) {   /* end of frame */
134241 +                DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (unsigned)dctx->decodedSize);
134242 +                RETURN_ERROR_IF(
134243 +                    dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
134244 +                 && dctx->decodedSize != dctx->fParams.frameContentSize,
134245 +                    corruption_detected, "");
134246 +                if (dctx->fParams.checksumFlag) {  /* another round for frame checksum */
134247 +                    dctx->expected = 4;
134248 +                    dctx->stage = ZSTDds_checkChecksum;
134249 +                } else {
134250 +                    ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, /* streaming */ 1);
134251 +                    dctx->expected = 0;   /* ends here */
134252 +                    dctx->stage = ZSTDds_getFrameHeaderSize;
134253 +                }
134254 +            } else {
134255 +                dctx->stage = ZSTDds_decodeBlockHeader;
134256 +                dctx->expected = ZSTD_blockHeaderSize;
134257 +            }
134258 +            return rSize;
134259 +        }
134261 +    case ZSTDds_checkChecksum:
134262 +        assert(srcSize == 4);  /* guaranteed by dctx->expected */
134263 +        {
134264 +            if (dctx->validateChecksum) {
134265 +                U32 const h32 = (U32)xxh64_digest(&dctx->xxhState);
134266 +                U32 const check32 = MEM_readLE32(src);
134267 +                DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", (unsigned)h32, (unsigned)check32);
134268 +                RETURN_ERROR_IF(check32 != h32, checksum_wrong, "");
134269 +            }
134270 +            ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, /* streaming */ 1);
134271 +            dctx->expected = 0;
134272 +            dctx->stage = ZSTDds_getFrameHeaderSize;
134273 +            return 0;
134274 +        }
134276 +    case ZSTDds_decodeSkippableHeader:
134277 +        assert(src != NULL);
134278 +        assert(srcSize <= ZSTD_SKIPPABLEHEADERSIZE);
134279 +        ZSTD_memcpy(dctx->headerBuffer + (ZSTD_SKIPPABLEHEADERSIZE - srcSize), src, srcSize);   /* complete skippable header */
134280 +        dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_FRAMEIDSIZE);   /* note : dctx->expected can grow seriously large, beyond local buffer size */
134281 +        dctx->stage = ZSTDds_skipFrame;
134282 +        return 0;
134284 +    case ZSTDds_skipFrame:
134285 +        dctx->expected = 0;
134286 +        dctx->stage = ZSTDds_getFrameHeaderSize;
134287 +        return 0;
134289 +    default:
134290 +        assert(0);   /* impossible */
134291 +        RETURN_ERROR(GENERIC, "impossible to reach");   /* some compiler require default to do something */
134292 +    }
134296 +static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
134298 +    dctx->dictEnd = dctx->previousDstEnd;
134299 +    dctx->virtualStart = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
134300 +    dctx->prefixStart = dict;
134301 +    dctx->previousDstEnd = (const char*)dict + dictSize;
134302 +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
134303 +    dctx->dictContentBeginForFuzzing = dctx->prefixStart;
134304 +    dctx->dictContentEndForFuzzing = dctx->previousDstEnd;
134305 +#endif
134306 +    return 0;
134309 +/*! ZSTD_loadDEntropy() :
134310 + *  dict : must point at beginning of a valid zstd dictionary.
134311 + * @return : size of entropy tables read */
134312 +size_t
134313 +ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
134314 +                  const void* const dict, size_t const dictSize)
134316 +    const BYTE* dictPtr = (const BYTE*)dict;
134317 +    const BYTE* const dictEnd = dictPtr + dictSize;
134319 +    RETURN_ERROR_IF(dictSize <= 8, dictionary_corrupted, "dict is too small");
134320 +    assert(MEM_readLE32(dict) == ZSTD_MAGIC_DICTIONARY);   /* dict must be valid */
134321 +    dictPtr += 8;   /* skip header = magic + dictID */
134323 +    ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, OFTable) == offsetof(ZSTD_entropyDTables_t, LLTable) + sizeof(entropy->LLTable));
134324 +    ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, MLTable) == offsetof(ZSTD_entropyDTables_t, OFTable) + sizeof(entropy->OFTable));
134325 +    ZSTD_STATIC_ASSERT(sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable) >= HUF_DECOMPRESS_WORKSPACE_SIZE);
134326 +    {   void* const workspace = &entropy->LLTable;   /* use fse tables as temporary workspace; implies fse tables are grouped together */
134327 +        size_t const workspaceSize = sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable);
134328 +#ifdef HUF_FORCE_DECOMPRESS_X1
134329 +        /* in minimal huffman, we always use X1 variants */
134330 +        size_t const hSize = HUF_readDTableX1_wksp(entropy->hufTable,
134331 +                                                dictPtr, dictEnd - dictPtr,
134332 +                                                workspace, workspaceSize);
134333 +#else
134334 +        size_t const hSize = HUF_readDTableX2_wksp(entropy->hufTable,
134335 +                                                dictPtr, (size_t)(dictEnd - dictPtr),
134336 +                                                workspace, workspaceSize);
134337 +#endif
134338 +        RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted, "");
134339 +        dictPtr += hSize;
134340 +    }
134342 +    {   short offcodeNCount[MaxOff+1];
134343 +        unsigned offcodeMaxValue = MaxOff, offcodeLog;
134344 +        size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, (size_t)(dictEnd-dictPtr));
134345 +        RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, "");
134346 +        RETURN_ERROR_IF(offcodeMaxValue > MaxOff, dictionary_corrupted, "");
134347 +        RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, "");
134348 +        ZSTD_buildFSETable( entropy->OFTable,
134349 +                            offcodeNCount, offcodeMaxValue,
134350 +                            OF_base, OF_bits,
134351 +                            offcodeLog,
134352 +                            entropy->workspace, sizeof(entropy->workspace),
134353 +                            /* bmi2 */0);
134354 +        dictPtr += offcodeHeaderSize;
134355 +    }
134357 +    {   short matchlengthNCount[MaxML+1];
134358 +        unsigned matchlengthMaxValue = MaxML, matchlengthLog;
134359 +        size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, (size_t)(dictEnd-dictPtr));
134360 +        RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, "");
134361 +        RETURN_ERROR_IF(matchlengthMaxValue > MaxML, dictionary_corrupted, "");
134362 +        RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, "");
134363 +        ZSTD_buildFSETable( entropy->MLTable,
134364 +                            matchlengthNCount, matchlengthMaxValue,
134365 +                            ML_base, ML_bits,
134366 +                            matchlengthLog,
134367 +                            entropy->workspace, sizeof(entropy->workspace),
134368 +                            /* bmi2 */ 0);
134369 +        dictPtr += matchlengthHeaderSize;
134370 +    }
134372 +    {   short litlengthNCount[MaxLL+1];
134373 +        unsigned litlengthMaxValue = MaxLL, litlengthLog;
134374 +        size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, (size_t)(dictEnd-dictPtr));
134375 +        RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, "");
134376 +        RETURN_ERROR_IF(litlengthMaxValue > MaxLL, dictionary_corrupted, "");
134377 +        RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, "");
134378 +        ZSTD_buildFSETable( entropy->LLTable,
134379 +                            litlengthNCount, litlengthMaxValue,
134380 +                            LL_base, LL_bits,
134381 +                            litlengthLog,
134382 +                            entropy->workspace, sizeof(entropy->workspace),
134383 +                            /* bmi2 */ 0);
134384 +        dictPtr += litlengthHeaderSize;
134385 +    }
134387 +    RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, "");
134388 +    {   int i;
134389 +        size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12));
134390 +        for (i=0; i<3; i++) {
134391 +            U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4;
134392 +            RETURN_ERROR_IF(rep==0 || rep > dictContentSize,
134393 +                            dictionary_corrupted, "");
134394 +            entropy->rep[i] = rep;
134395 +    }   }
134397 +    return (size_t)(dictPtr - (const BYTE*)dict);
134400 +static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
134402 +    if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize);
134403 +    {   U32 const magic = MEM_readLE32(dict);
134404 +        if (magic != ZSTD_MAGIC_DICTIONARY) {
134405 +            return ZSTD_refDictContent(dctx, dict, dictSize);   /* pure content mode */
134406 +    }   }
134407 +    dctx->dictID = MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
134409 +    /* load entropy tables */
134410 +    {   size_t const eSize = ZSTD_loadDEntropy(&dctx->entropy, dict, dictSize);
134411 +        RETURN_ERROR_IF(ZSTD_isError(eSize), dictionary_corrupted, "");
134412 +        dict = (const char*)dict + eSize;
134413 +        dictSize -= eSize;
134414 +    }
134415 +    dctx->litEntropy = dctx->fseEntropy = 1;
134417 +    /* reference dictionary content */
134418 +    return ZSTD_refDictContent(dctx, dict, dictSize);
134421 +size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
134423 +    assert(dctx != NULL);
134424 +    dctx->expected = ZSTD_startingInputLength(dctx->format);  /* dctx->format must be properly set */
134425 +    dctx->stage = ZSTDds_getFrameHeaderSize;
134426 +    dctx->processedCSize = 0;
134427 +    dctx->decodedSize = 0;
134428 +    dctx->previousDstEnd = NULL;
134429 +    dctx->prefixStart = NULL;
134430 +    dctx->virtualStart = NULL;
134431 +    dctx->dictEnd = NULL;
134432 +    dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001);  /* cover both little and big endian */
134433 +    dctx->litEntropy = dctx->fseEntropy = 0;
134434 +    dctx->dictID = 0;
134435 +    dctx->bType = bt_reserved;
134436 +    ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
134437 +    ZSTD_memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue));  /* initial repcodes */
134438 +    dctx->LLTptr = dctx->entropy.LLTable;
134439 +    dctx->MLTptr = dctx->entropy.MLTable;
134440 +    dctx->OFTptr = dctx->entropy.OFTable;
134441 +    dctx->HUFptr = dctx->entropy.hufTable;
134442 +    return 0;
134445 +size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
134447 +    FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , "");
134448 +    if (dict && dictSize)
134449 +        RETURN_ERROR_IF(
134450 +            ZSTD_isError(ZSTD_decompress_insertDictionary(dctx, dict, dictSize)),
134451 +            dictionary_corrupted, "");
134452 +    return 0;
134456 +/* ======   ZSTD_DDict   ====== */
134458 +size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
134460 +    DEBUGLOG(4, "ZSTD_decompressBegin_usingDDict");
134461 +    assert(dctx != NULL);
134462 +    if (ddict) {
134463 +        const char* const dictStart = (const char*)ZSTD_DDict_dictContent(ddict);
134464 +        size_t const dictSize = ZSTD_DDict_dictSize(ddict);
134465 +        const void* const dictEnd = dictStart + dictSize;
134466 +        dctx->ddictIsCold = (dctx->dictEnd != dictEnd);
134467 +        DEBUGLOG(4, "DDict is %s",
134468 +                    dctx->ddictIsCold ? "~cold~" : "hot!");
134469 +    }
134470 +    FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , "");
134471 +    if (ddict) {   /* NULL ddict is equivalent to no dictionary */
134472 +        ZSTD_copyDDictParameters(dctx, ddict);
134473 +    }
134474 +    return 0;
134477 +/*! ZSTD_getDictID_fromDict() :
134478 + *  Provides the dictID stored within dictionary.
134479 + *  if @return == 0, the dictionary is not conformant with Zstandard specification.
134480 + *  It can still be loaded, but as a content-only dictionary. */
134481 +unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize)
134483 +    if (dictSize < 8) return 0;
134484 +    if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) return 0;
134485 +    return MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE);
134488 +/*! ZSTD_getDictID_fromFrame() :
134489 + *  Provides the dictID required to decompress frame stored within `src`.
134490 + *  If @return == 0, the dictID could not be decoded.
134491 + *  This could for one of the following reasons :
134492 + *  - The frame does not require a dictionary (most common case).
134493 + *  - The frame was built with dictID intentionally removed.
134494 + *    Needed dictionary is a hidden information.
134495 + *    Note : this use case also happens when using a non-conformant dictionary.
134496 + *  - `srcSize` is too small, and as a result, frame header could not be decoded.
134497 + *    Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`.
134498 + *  - This is not a Zstandard frame.
134499 + *  When identifying the exact failure cause, it's possible to use
134500 + *  ZSTD_getFrameHeader(), which will provide a more precise error code. */
134501 +unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize)
134503 +    ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0 };
134504 +    size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize);
134505 +    if (ZSTD_isError(hError)) return 0;
134506 +    return zfp.dictID;
134510 +/*! ZSTD_decompress_usingDDict() :
134511 +*   Decompression using a pre-digested Dictionary
134512 +*   Use dictionary without significant overhead. */
134513 +size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
134514 +                                  void* dst, size_t dstCapacity,
134515 +                            const void* src, size_t srcSize,
134516 +                            const ZSTD_DDict* ddict)
134518 +    /* pass content and size in case legacy frames are encountered */
134519 +    return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize,
134520 +                                     NULL, 0,
134521 +                                     ddict);
134525 +/*=====================================
134526 +*   Streaming decompression
134527 +*====================================*/
134529 +ZSTD_DStream* ZSTD_createDStream(void)
134531 +    DEBUGLOG(3, "ZSTD_createDStream");
134532 +    return ZSTD_createDStream_advanced(ZSTD_defaultCMem);
134535 +ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize)
134537 +    return ZSTD_initStaticDCtx(workspace, workspaceSize);
134540 +ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem)
134542 +    return ZSTD_createDCtx_advanced(customMem);
134545 +size_t ZSTD_freeDStream(ZSTD_DStream* zds)
134547 +    return ZSTD_freeDCtx(zds);
134551 +/* ***  Initialization  *** */
134553 +size_t ZSTD_DStreamInSize(void)  { return ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize; }
134554 +size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_MAX; }
134556 +size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx,
134557 +                                   const void* dict, size_t dictSize,
134558 +                                         ZSTD_dictLoadMethod_e dictLoadMethod,
134559 +                                         ZSTD_dictContentType_e dictContentType)
134561 +    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
134562 +    ZSTD_clearDict(dctx);
134563 +    if (dict && dictSize != 0) {
134564 +        dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem);
134565 +        RETURN_ERROR_IF(dctx->ddictLocal == NULL, memory_allocation, "NULL pointer!");
134566 +        dctx->ddict = dctx->ddictLocal;
134567 +        dctx->dictUses = ZSTD_use_indefinitely;
134568 +    }
134569 +    return 0;
134572 +size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
134574 +    return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
134577 +size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
134579 +    return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
134582 +size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
134584 +    FORWARD_IF_ERROR(ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType), "");
134585 +    dctx->dictUses = ZSTD_use_once;
134586 +    return 0;
134589 +size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize)
134591 +    return ZSTD_DCtx_refPrefix_advanced(dctx, prefix, prefixSize, ZSTD_dct_rawContent);
134595 +/* ZSTD_initDStream_usingDict() :
134596 + * return : expected size, aka ZSTD_startingInputLength().
134597 + * this function cannot fail */
134598 +size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize)
134600 +    DEBUGLOG(4, "ZSTD_initDStream_usingDict");
134601 +    FORWARD_IF_ERROR( ZSTD_DCtx_reset(zds, ZSTD_reset_session_only) , "");
134602 +    FORWARD_IF_ERROR( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) , "");
134603 +    return ZSTD_startingInputLength(zds->format);
134606 +/* note : this variant can't fail */
134607 +size_t ZSTD_initDStream(ZSTD_DStream* zds)
134609 +    DEBUGLOG(4, "ZSTD_initDStream");
134610 +    return ZSTD_initDStream_usingDDict(zds, NULL);
134613 +/* ZSTD_initDStream_usingDDict() :
134614 + * ddict will just be referenced, and must outlive decompression session
134615 + * this function cannot fail */
134616 +size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
134618 +    FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) , "");
134619 +    FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) , "");
134620 +    return ZSTD_startingInputLength(dctx->format);
134623 +/* ZSTD_resetDStream() :
134624 + * return : expected size, aka ZSTD_startingInputLength().
134625 + * this function cannot fail */
134626 +size_t ZSTD_resetDStream(ZSTD_DStream* dctx)
134628 +    FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only), "");
134629 +    return ZSTD_startingInputLength(dctx->format);
134633 +size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
134635 +    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
134636 +    ZSTD_clearDict(dctx);
134637 +    if (ddict) {
134638 +        dctx->ddict = ddict;
134639 +        dctx->dictUses = ZSTD_use_indefinitely;
134640 +        if (dctx->refMultipleDDicts == ZSTD_rmd_refMultipleDDicts) {
134641 +            if (dctx->ddictSet == NULL) {
134642 +                dctx->ddictSet = ZSTD_createDDictHashSet(dctx->customMem);
134643 +                if (!dctx->ddictSet) {
134644 +                    RETURN_ERROR(memory_allocation, "Failed to allocate memory for hash set!");
134645 +                }
134646 +            }
134647 +            assert(!dctx->staticSize);  /* Impossible: ddictSet cannot have been allocated if static dctx */
134648 +            FORWARD_IF_ERROR(ZSTD_DDictHashSet_addDDict(dctx->ddictSet, ddict, dctx->customMem), "");
134649 +        }
134650 +    }
134651 +    return 0;
134654 +/* ZSTD_DCtx_setMaxWindowSize() :
134655 + * note : no direct equivalence in ZSTD_DCtx_setParameter,
134656 + * since this version sets windowSize, and the other sets windowLog */
134657 +size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize)
134659 +    ZSTD_bounds const bounds = ZSTD_dParam_getBounds(ZSTD_d_windowLogMax);
134660 +    size_t const min = (size_t)1 << bounds.lowerBound;
134661 +    size_t const max = (size_t)1 << bounds.upperBound;
134662 +    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
134663 +    RETURN_ERROR_IF(maxWindowSize < min, parameter_outOfBound, "");
134664 +    RETURN_ERROR_IF(maxWindowSize > max, parameter_outOfBound, "");
134665 +    dctx->maxWindowSize = maxWindowSize;
134666 +    return 0;
134669 +size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format)
134671 +    return ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, (int)format);
134674 +ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam)
134676 +    ZSTD_bounds bounds = { 0, 0, 0 };
134677 +    switch(dParam) {
134678 +        case ZSTD_d_windowLogMax:
134679 +            bounds.lowerBound = ZSTD_WINDOWLOG_ABSOLUTEMIN;
134680 +            bounds.upperBound = ZSTD_WINDOWLOG_MAX;
134681 +            return bounds;
134682 +        case ZSTD_d_format:
134683 +            bounds.lowerBound = (int)ZSTD_f_zstd1;
134684 +            bounds.upperBound = (int)ZSTD_f_zstd1_magicless;
134685 +            ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
134686 +            return bounds;
134687 +        case ZSTD_d_stableOutBuffer:
134688 +            bounds.lowerBound = (int)ZSTD_bm_buffered;
134689 +            bounds.upperBound = (int)ZSTD_bm_stable;
134690 +            return bounds;
134691 +        case ZSTD_d_forceIgnoreChecksum:
134692 +            bounds.lowerBound = (int)ZSTD_d_validateChecksum;
134693 +            bounds.upperBound = (int)ZSTD_d_ignoreChecksum;
134694 +            return bounds;
134695 +        case ZSTD_d_refMultipleDDicts:
134696 +            bounds.lowerBound = (int)ZSTD_rmd_refSingleDDict;
134697 +            bounds.upperBound = (int)ZSTD_rmd_refMultipleDDicts;
134698 +            return bounds;
134699 +        default:;
134700 +    }
134701 +    bounds.error = ERROR(parameter_unsupported);
134702 +    return bounds;
134705 +/* ZSTD_dParam_withinBounds:
134706 + * @return 1 if value is within dParam bounds,
134707 + * 0 otherwise */
134708 +static int ZSTD_dParam_withinBounds(ZSTD_dParameter dParam, int value)
134710 +    ZSTD_bounds const bounds = ZSTD_dParam_getBounds(dParam);
134711 +    if (ZSTD_isError(bounds.error)) return 0;
134712 +    if (value < bounds.lowerBound) return 0;
134713 +    if (value > bounds.upperBound) return 0;
134714 +    return 1;
134717 +#define CHECK_DBOUNDS(p,v) {                \
134718 +    RETURN_ERROR_IF(!ZSTD_dParam_withinBounds(p, v), parameter_outOfBound, ""); \
134721 +size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value)
134723 +    switch (param) {
134724 +        case ZSTD_d_windowLogMax:
134725 +            *value = (int)ZSTD_highbit32((U32)dctx->maxWindowSize);
134726 +            return 0;
134727 +        case ZSTD_d_format:
134728 +            *value = (int)dctx->format;
134729 +            return 0;
134730 +        case ZSTD_d_stableOutBuffer:
134731 +            *value = (int)dctx->outBufferMode;
134732 +            return 0;
134733 +        case ZSTD_d_forceIgnoreChecksum:
134734 +            *value = (int)dctx->forceIgnoreChecksum;
134735 +            return 0;
134736 +        case ZSTD_d_refMultipleDDicts:
134737 +            *value = (int)dctx->refMultipleDDicts;
134738 +            return 0;
134739 +        default:;
134740 +    }
134741 +    RETURN_ERROR(parameter_unsupported, "");
134744 +size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value)
134746 +    RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
134747 +    switch(dParam) {
134748 +        case ZSTD_d_windowLogMax:
134749 +            if (value == 0) value = ZSTD_WINDOWLOG_LIMIT_DEFAULT;
134750 +            CHECK_DBOUNDS(ZSTD_d_windowLogMax, value);
134751 +            dctx->maxWindowSize = ((size_t)1) << value;
134752 +            return 0;
134753 +        case ZSTD_d_format:
134754 +            CHECK_DBOUNDS(ZSTD_d_format, value);
134755 +            dctx->format = (ZSTD_format_e)value;
134756 +            return 0;
134757 +        case ZSTD_d_stableOutBuffer:
134758 +            CHECK_DBOUNDS(ZSTD_d_stableOutBuffer, value);
134759 +            dctx->outBufferMode = (ZSTD_bufferMode_e)value;
134760 +            return 0;
134761 +        case ZSTD_d_forceIgnoreChecksum:
134762 +            CHECK_DBOUNDS(ZSTD_d_forceIgnoreChecksum, value);
134763 +            dctx->forceIgnoreChecksum = (ZSTD_forceIgnoreChecksum_e)value;
134764 +            return 0;
134765 +        case ZSTD_d_refMultipleDDicts:
134766 +            CHECK_DBOUNDS(ZSTD_d_refMultipleDDicts, value);
134767 +            if (dctx->staticSize != 0) {
134768 +                RETURN_ERROR(parameter_unsupported, "Static dctx does not support multiple DDicts!");
134769 +            }
134770 +            dctx->refMultipleDDicts = (ZSTD_refMultipleDDicts_e)value;
134771 +            return 0;
134772 +        default:;
134773 +    }
134774 +    RETURN_ERROR(parameter_unsupported, "");
134777 +size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset)
134779 +    if ( (reset == ZSTD_reset_session_only)
134780 +      || (reset == ZSTD_reset_session_and_parameters) ) {
134781 +        dctx->streamStage = zdss_init;
134782 +        dctx->noForwardProgress = 0;
134783 +    }
134784 +    if ( (reset == ZSTD_reset_parameters)
134785 +      || (reset == ZSTD_reset_session_and_parameters) ) {
134786 +        RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, "");
134787 +        ZSTD_clearDict(dctx);
134788 +        ZSTD_DCtx_resetParameters(dctx);
134789 +    }
134790 +    return 0;
134794 +size_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx)
134796 +    return ZSTD_sizeof_DCtx(dctx);
134799 +size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize)
134801 +    size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
134802 +    unsigned long long const neededRBSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2);
134803 +    unsigned long long const neededSize = MIN(frameContentSize, neededRBSize);
134804 +    size_t const minRBSize = (size_t) neededSize;
134805 +    RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize,
134806 +                    frameParameter_windowTooLarge, "");
134807 +    return minRBSize;
134810 +size_t ZSTD_estimateDStreamSize(size_t windowSize)
134812 +    size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
134813 +    size_t const inBuffSize = blockSize;  /* no block can be larger */
134814 +    size_t const outBuffSize = ZSTD_decodingBufferSize_min(windowSize, ZSTD_CONTENTSIZE_UNKNOWN);
134815 +    return ZSTD_estimateDCtxSize() + inBuffSize + outBuffSize;
134818 +size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize)
134820 +    U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX;   /* note : should be user-selectable, but requires an additional parameter (or a dctx) */
134821 +    ZSTD_frameHeader zfh;
134822 +    size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize);
134823 +    if (ZSTD_isError(err)) return err;
134824 +    RETURN_ERROR_IF(err>0, srcSize_wrong, "");
134825 +    RETURN_ERROR_IF(zfh.windowSize > windowSizeMax,
134826 +                    frameParameter_windowTooLarge, "");
134827 +    return ZSTD_estimateDStreamSize((size_t)zfh.windowSize);
134831 +/* *****   Decompression   ***** */
134833 +static int ZSTD_DCtx_isOverflow(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize)
134835 +    return (zds->inBuffSize + zds->outBuffSize) >= (neededInBuffSize + neededOutBuffSize) * ZSTD_WORKSPACETOOLARGE_FACTOR;
134838 +static void ZSTD_DCtx_updateOversizedDuration(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize)
134840 +    if (ZSTD_DCtx_isOverflow(zds, neededInBuffSize, neededOutBuffSize))
134841 +        zds->oversizedDuration++;
134842 +    else
134843 +        zds->oversizedDuration = 0;
134846 +static int ZSTD_DCtx_isOversizedTooLong(ZSTD_DStream* zds)
134848 +    return zds->oversizedDuration >= ZSTD_WORKSPACETOOLARGE_MAXDURATION;
134851 +/* Checks that the output buffer hasn't changed if ZSTD_obm_stable is used. */
134852 +static size_t ZSTD_checkOutBuffer(ZSTD_DStream const* zds, ZSTD_outBuffer const* output)
134854 +    ZSTD_outBuffer const expect = zds->expectedOutBuffer;
134855 +    /* No requirement when ZSTD_obm_stable is not enabled. */
134856 +    if (zds->outBufferMode != ZSTD_bm_stable)
134857 +        return 0;
134858 +    /* Any buffer is allowed in zdss_init, this must be the same for every other call until
134859 +     * the context is reset.
134860 +     */
134861 +    if (zds->streamStage == zdss_init)
134862 +        return 0;
134863 +    /* The buffer must match our expectation exactly. */
134864 +    if (expect.dst == output->dst && expect.pos == output->pos && expect.size == output->size)
134865 +        return 0;
134866 +    RETURN_ERROR(dstBuffer_wrong, "ZSTD_d_stableOutBuffer enabled but output differs!");
134869 +/* Calls ZSTD_decompressContinue() with the right parameters for ZSTD_decompressStream()
134870 + * and updates the stage and the output buffer state. This call is extracted so it can be
134871 + * used both when reading directly from the ZSTD_inBuffer, and in buffered input mode.
134872 + * NOTE: You must break after calling this function since the streamStage is modified.
134873 + */
134874 +static size_t ZSTD_decompressContinueStream(
134875 +            ZSTD_DStream* zds, char** op, char* oend,
134876 +            void const* src, size_t srcSize) {
134877 +    int const isSkipFrame = ZSTD_isSkipFrame(zds);
134878 +    if (zds->outBufferMode == ZSTD_bm_buffered) {
134879 +        size_t const dstSize = isSkipFrame ? 0 : zds->outBuffSize - zds->outStart;
134880 +        size_t const decodedSize = ZSTD_decompressContinue(zds,
134881 +                zds->outBuff + zds->outStart, dstSize, src, srcSize);
134882 +        FORWARD_IF_ERROR(decodedSize, "");
134883 +        if (!decodedSize && !isSkipFrame) {
134884 +            zds->streamStage = zdss_read;
134885 +        } else {
134886 +            zds->outEnd = zds->outStart + decodedSize;
134887 +            zds->streamStage = zdss_flush;
134888 +        }
134889 +    } else {
134890 +        /* Write directly into the output buffer */
134891 +        size_t const dstSize = isSkipFrame ? 0 : (size_t)(oend - *op);
134892 +        size_t const decodedSize = ZSTD_decompressContinue(zds, *op, dstSize, src, srcSize);
134893 +        FORWARD_IF_ERROR(decodedSize, "");
134894 +        *op += decodedSize;
134895 +        /* Flushing is not needed. */
134896 +        zds->streamStage = zdss_read;
134897 +        assert(*op <= oend);
134898 +        assert(zds->outBufferMode == ZSTD_bm_stable);
134899 +    }
134900 +    return 0;
134903 +size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
134905 +    const char* const src = (const char*)input->src;
134906 +    const char* const istart = input->pos != 0 ? src + input->pos : src;
134907 +    const char* const iend = input->size != 0 ? src + input->size : src;
134908 +    const char* ip = istart;
134909 +    char* const dst = (char*)output->dst;
134910 +    char* const ostart = output->pos != 0 ? dst + output->pos : dst;
134911 +    char* const oend = output->size != 0 ? dst + output->size : dst;
134912 +    char* op = ostart;
134913 +    U32 someMoreWork = 1;
134915 +    DEBUGLOG(5, "ZSTD_decompressStream");
134916 +    RETURN_ERROR_IF(
134917 +        input->pos > input->size,
134918 +        srcSize_wrong,
134919 +        "forbidden. in: pos: %u   vs size: %u",
134920 +        (U32)input->pos, (U32)input->size);
134921 +    RETURN_ERROR_IF(
134922 +        output->pos > output->size,
134923 +        dstSize_tooSmall,
134924 +        "forbidden. out: pos: %u   vs size: %u",
134925 +        (U32)output->pos, (U32)output->size);
134926 +    DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos));
134927 +    FORWARD_IF_ERROR(ZSTD_checkOutBuffer(zds, output), "");
134929 +    while (someMoreWork) {
134930 +        switch(zds->streamStage)
134931 +        {
134932 +        case zdss_init :
134933 +            DEBUGLOG(5, "stage zdss_init => transparent reset ");
134934 +            zds->streamStage = zdss_loadHeader;
134935 +            zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0;
134936 +            zds->legacyVersion = 0;
134937 +            zds->hostageByte = 0;
134938 +            zds->expectedOutBuffer = *output;
134939 +            /* fall-through */
134941 +        case zdss_loadHeader :
134942 +            DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip));
134943 +            {   size_t const hSize = ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format);
134944 +                if (zds->refMultipleDDicts && zds->ddictSet) {
134945 +                    ZSTD_DCtx_selectFrameDDict(zds);
134946 +                }
134947 +                DEBUGLOG(5, "header size : %u", (U32)hSize);
134948 +                if (ZSTD_isError(hSize)) {
134949 +                    return hSize;   /* error */
134950 +                }
134951 +                if (hSize != 0) {   /* need more input */
134952 +                    size_t const toLoad = hSize - zds->lhSize;   /* if hSize!=0, hSize > zds->lhSize */
134953 +                    size_t const remainingInput = (size_t)(iend-ip);
134954 +                    assert(iend >= ip);
134955 +                    if (toLoad > remainingInput) {   /* not enough input to load full header */
134956 +                        if (remainingInput > 0) {
134957 +                            ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, remainingInput);
134958 +                            zds->lhSize += remainingInput;
134959 +                        }
134960 +                        input->pos = input->size;
134961 +                        return (MAX((size_t)ZSTD_FRAMEHEADERSIZE_MIN(zds->format), hSize) - zds->lhSize) + ZSTD_blockHeaderSize;   /* remaining header bytes + next block header */
134962 +                    }
134963 +                    assert(ip != NULL);
134964 +                    ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad;
134965 +                    break;
134966 +            }   }
134968 +            /* check for single-pass mode opportunity */
134969 +            if (zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
134970 +                && zds->fParams.frameType != ZSTD_skippableFrame
134971 +                && (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) {
134972 +                size_t const cSize = ZSTD_findFrameCompressedSize(istart, (size_t)(iend-istart));
134973 +                if (cSize <= (size_t)(iend-istart)) {
134974 +                    /* shortcut : using single-pass mode */
134975 +                    size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, (size_t)(oend-op), istart, cSize, ZSTD_getDDict(zds));
134976 +                    if (ZSTD_isError(decompressedSize)) return decompressedSize;
134977 +                    DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()")
134978 +                    ip = istart + cSize;
134979 +                    op += decompressedSize;
134980 +                    zds->expected = 0;
134981 +                    zds->streamStage = zdss_init;
134982 +                    someMoreWork = 0;
134983 +                    break;
134984 +            }   }
134986 +            /* Check output buffer is large enough for ZSTD_odm_stable. */
134987 +            if (zds->outBufferMode == ZSTD_bm_stable
134988 +                && zds->fParams.frameType != ZSTD_skippableFrame
134989 +                && zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN
134990 +                && (U64)(size_t)(oend-op) < zds->fParams.frameContentSize) {
134991 +                RETURN_ERROR(dstSize_tooSmall, "ZSTD_obm_stable passed but ZSTD_outBuffer is too small");
134992 +            }
134994 +            /* Consume header (see ZSTDds_decodeFrameHeader) */
134995 +            DEBUGLOG(4, "Consume header");
134996 +            FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)), "");
134998 +            if ((MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) {  /* skippable frame */
134999 +                zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE);
135000 +                zds->stage = ZSTDds_skipFrame;
135001 +            } else {
135002 +                FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize), "");
135003 +                zds->expected = ZSTD_blockHeaderSize;
135004 +                zds->stage = ZSTDds_decodeBlockHeader;
135005 +            }
135007 +            /* control buffer memory usage */
135008 +            DEBUGLOG(4, "Control max memory usage (%u KB <= max %u KB)",
135009 +                        (U32)(zds->fParams.windowSize >>10),
135010 +                        (U32)(zds->maxWindowSize >> 10) );
135011 +            zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
135012 +            RETURN_ERROR_IF(zds->fParams.windowSize > zds->maxWindowSize,
135013 +                            frameParameter_windowTooLarge, "");
135015 +            /* Adapt buffer sizes to frame header instructions */
135016 +            {   size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */);
135017 +                size_t const neededOutBuffSize = zds->outBufferMode == ZSTD_bm_buffered
135018 +                        ? ZSTD_decodingBufferSize_min(zds->fParams.windowSize, zds->fParams.frameContentSize)
135019 +                        : 0;
135021 +                ZSTD_DCtx_updateOversizedDuration(zds, neededInBuffSize, neededOutBuffSize);
135023 +                {   int const tooSmall = (zds->inBuffSize < neededInBuffSize) || (zds->outBuffSize < neededOutBuffSize);
135024 +                    int const tooLarge = ZSTD_DCtx_isOversizedTooLong(zds);
135026 +                    if (tooSmall || tooLarge) {
135027 +                        size_t const bufferSize = neededInBuffSize + neededOutBuffSize;
135028 +                        DEBUGLOG(4, "inBuff  : from %u to %u",
135029 +                                    (U32)zds->inBuffSize, (U32)neededInBuffSize);
135030 +                        DEBUGLOG(4, "outBuff : from %u to %u",
135031 +                                    (U32)zds->outBuffSize, (U32)neededOutBuffSize);
135032 +                        if (zds->staticSize) {  /* static DCtx */
135033 +                            DEBUGLOG(4, "staticSize : %u", (U32)zds->staticSize);
135034 +                            assert(zds->staticSize >= sizeof(ZSTD_DCtx));  /* controlled at init */
135035 +                            RETURN_ERROR_IF(
135036 +                                bufferSize > zds->staticSize - sizeof(ZSTD_DCtx),
135037 +                                memory_allocation, "");
135038 +                        } else {
135039 +                            ZSTD_customFree(zds->inBuff, zds->customMem);
135040 +                            zds->inBuffSize = 0;
135041 +                            zds->outBuffSize = 0;
135042 +                            zds->inBuff = (char*)ZSTD_customMalloc(bufferSize, zds->customMem);
135043 +                            RETURN_ERROR_IF(zds->inBuff == NULL, memory_allocation, "");
135044 +                        }
135045 +                        zds->inBuffSize = neededInBuffSize;
135046 +                        zds->outBuff = zds->inBuff + zds->inBuffSize;
135047 +                        zds->outBuffSize = neededOutBuffSize;
135048 +            }   }   }
135049 +            zds->streamStage = zdss_read;
135050 +            /* fall-through */
135052 +        case zdss_read:
135053 +            DEBUGLOG(5, "stage zdss_read");
135054 +            {   size_t const neededInSize = ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (size_t)(iend - ip));
135055 +                DEBUGLOG(5, "neededInSize = %u", (U32)neededInSize);
135056 +                if (neededInSize==0) {  /* end of frame */
135057 +                    zds->streamStage = zdss_init;
135058 +                    someMoreWork = 0;
135059 +                    break;
135060 +                }
135061 +                if ((size_t)(iend-ip) >= neededInSize) {  /* decode directly from src */
135062 +                    FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, ip, neededInSize), "");
135063 +                    ip += neededInSize;
135064 +                    /* Function modifies the stage so we must break */
135065 +                    break;
135066 +            }   }
135067 +            if (ip==iend) { someMoreWork = 0; break; }   /* no more input */
135068 +            zds->streamStage = zdss_load;
135069 +            /* fall-through */
135071 +        case zdss_load:
135072 +            {   size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds);
135073 +                size_t const toLoad = neededInSize - zds->inPos;
135074 +                int const isSkipFrame = ZSTD_isSkipFrame(zds);
135075 +                size_t loadedSize;
135076 +                /* At this point we shouldn't be decompressing a block that we can stream. */
135077 +                assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, iend - ip));
135078 +                if (isSkipFrame) {
135079 +                    loadedSize = MIN(toLoad, (size_t)(iend-ip));
135080 +                } else {
135081 +                    RETURN_ERROR_IF(toLoad > zds->inBuffSize - zds->inPos,
135082 +                                    corruption_detected,
135083 +                                    "should never happen");
135084 +                    loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, (size_t)(iend-ip));
135085 +                }
135086 +                ip += loadedSize;
135087 +                zds->inPos += loadedSize;
135088 +                if (loadedSize < toLoad) { someMoreWork = 0; break; }   /* not enough input, wait for more */
135090 +                /* decode loaded input */
135091 +                zds->inPos = 0;   /* input is consumed */
135092 +                FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, zds->inBuff, neededInSize), "");
135093 +                /* Function modifies the stage so we must break */
135094 +                break;
135095 +            }
135096 +        case zdss_flush:
135097 +            {   size_t const toFlushSize = zds->outEnd - zds->outStart;
135098 +                size_t const flushedSize = ZSTD_limitCopy(op, (size_t)(oend-op), zds->outBuff + zds->outStart, toFlushSize);
135099 +                op += flushedSize;
135100 +                zds->outStart += flushedSize;
135101 +                if (flushedSize == toFlushSize) {  /* flush completed */
135102 +                    zds->streamStage = zdss_read;
135103 +                    if ( (zds->outBuffSize < zds->fParams.frameContentSize)
135104 +                      && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) {
135105 +                        DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)",
135106 +                                (int)(zds->outBuffSize - zds->outStart),
135107 +                                (U32)zds->fParams.blockSizeMax);
135108 +                        zds->outStart = zds->outEnd = 0;
135109 +                    }
135110 +                    break;
135111 +            }   }
135112 +            /* cannot complete flush */
135113 +            someMoreWork = 0;
135114 +            break;
135116 +        default:
135117 +            assert(0);    /* impossible */
135118 +            RETURN_ERROR(GENERIC, "impossible to reach");   /* some compiler require default to do something */
135119 +    }   }
135121 +    /* result */
135122 +    input->pos = (size_t)(ip - (const char*)(input->src));
135123 +    output->pos = (size_t)(op - (char*)(output->dst));
135125 +    /* Update the expected output buffer for ZSTD_obm_stable. */
135126 +    zds->expectedOutBuffer = *output;
135128 +    if ((ip==istart) && (op==ostart)) {  /* no forward progress */
135129 +        zds->noForwardProgress ++;
135130 +        if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) {
135131 +            RETURN_ERROR_IF(op==oend, dstSize_tooSmall, "");
135132 +            RETURN_ERROR_IF(ip==iend, srcSize_wrong, "");
135133 +            assert(0);
135134 +        }
135135 +    } else {
135136 +        zds->noForwardProgress = 0;
135137 +    }
135138 +    {   size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds);
135139 +        if (!nextSrcSizeHint) {   /* frame fully decoded */
135140 +            if (zds->outEnd == zds->outStart) {  /* output fully flushed */
135141 +                if (zds->hostageByte) {
135142 +                    if (input->pos >= input->size) {
135143 +                        /* can't release hostage (not present) */
135144 +                        zds->streamStage = zdss_read;
135145 +                        return 1;
135146 +                    }
135147 +                    input->pos++;  /* release hostage */
135148 +                }   /* zds->hostageByte */
135149 +                return 0;
135150 +            }  /* zds->outEnd == zds->outStart */
135151 +            if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */
135152 +                input->pos--;   /* note : pos > 0, otherwise, impossible to finish reading last block */
135153 +                zds->hostageByte=1;
135154 +            }
135155 +            return 1;
135156 +        }  /* nextSrcSizeHint==0 */
135157 +        nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds) == ZSTDnit_block);   /* preload header of next block */
135158 +        assert(zds->inPos <= nextSrcSizeHint);
135159 +        nextSrcSizeHint -= zds->inPos;   /* part already loaded*/
135160 +        return nextSrcSizeHint;
135161 +    }
135164 +size_t ZSTD_decompressStream_simpleArgs (
135165 +                            ZSTD_DCtx* dctx,
135166 +                            void* dst, size_t dstCapacity, size_t* dstPos,
135167 +                      const void* src, size_t srcSize, size_t* srcPos)
135169 +    ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
135170 +    ZSTD_inBuffer  input  = { src, srcSize, *srcPos };
135171 +    /* ZSTD_compress_generic() will check validity of dstPos and srcPos */
135172 +    size_t const cErr = ZSTD_decompressStream(dctx, &output, &input);
135173 +    *dstPos = output.pos;
135174 +    *srcPos = input.pos;
135175 +    return cErr;
135177 diff --git a/lib/zstd/decompress/zstd_decompress_block.c b/lib/zstd/decompress/zstd_decompress_block.c
135178 new file mode 100644
135179 index 000000000000..cd6eba55a21c
135180 --- /dev/null
135181 +++ b/lib/zstd/decompress/zstd_decompress_block.c
135182 @@ -0,0 +1,1540 @@
135184 + * Copyright (c) Yann Collet, Facebook, Inc.
135185 + * All rights reserved.
135187 + * This source code is licensed under both the BSD-style license (found in the
135188 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
135189 + * in the COPYING file in the root directory of this source tree).
135190 + * You may select, at your option, one of the above-listed licenses.
135191 + */
135193 +/* zstd_decompress_block :
135194 + * this module takes care of decompressing _compressed_ block */
135196 +/*-*******************************************************
135197 +*  Dependencies
135198 +*********************************************************/
135199 +#include "../common/zstd_deps.h"   /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */
135200 +#include "../common/compiler.h"    /* prefetch */
135201 +#include "../common/cpu.h"         /* bmi2 */
135202 +#include "../common/mem.h"         /* low level memory routines */
135203 +#define FSE_STATIC_LINKING_ONLY
135204 +#include "../common/fse.h"
135205 +#define HUF_STATIC_LINKING_ONLY
135206 +#include "../common/huf.h"
135207 +#include "../common/zstd_internal.h"
135208 +#include "zstd_decompress_internal.h"   /* ZSTD_DCtx */
135209 +#include "zstd_ddict.h"  /* ZSTD_DDictDictContent */
135210 +#include "zstd_decompress_block.h"
135212 +/*_*******************************************************
135213 +*  Macros
135214 +**********************************************************/
135216 +/* These two optional macros force the use one way or another of the two
135217 + * ZSTD_decompressSequences implementations. You can't force in both directions
135218 + * at the same time.
135219 + */
135220 +#if defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
135221 +    defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
135222 +#error "Cannot force the use of the short and the long ZSTD_decompressSequences variants!"
135223 +#endif
135226 +/*_*******************************************************
135227 +*  Memory operations
135228 +**********************************************************/
135229 +static void ZSTD_copy4(void* dst, const void* src) { ZSTD_memcpy(dst, src, 4); }
135232 +/*-*************************************************************
135233 + *   Block decoding
135234 + ***************************************************************/
135236 +/*! ZSTD_getcBlockSize() :
135237 + *  Provides the size of compressed block from block header `src` */
135238 +size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
135239 +                          blockProperties_t* bpPtr)
135241 +    RETURN_ERROR_IF(srcSize < ZSTD_blockHeaderSize, srcSize_wrong, "");
135243 +    {   U32 const cBlockHeader = MEM_readLE24(src);
135244 +        U32 const cSize = cBlockHeader >> 3;
135245 +        bpPtr->lastBlock = cBlockHeader & 1;
135246 +        bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
135247 +        bpPtr->origSize = cSize;   /* only useful for RLE */
135248 +        if (bpPtr->blockType == bt_rle) return 1;
135249 +        RETURN_ERROR_IF(bpPtr->blockType == bt_reserved, corruption_detected, "");
135250 +        return cSize;
135251 +    }
135255 +/* Hidden declaration for fullbench */
135256 +size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
135257 +                          const void* src, size_t srcSize);
135258 +/*! ZSTD_decodeLiteralsBlock() :
135259 + * @return : nb of bytes read from src (< srcSize )
135260 + *  note : symbol not declared but exposed for fullbench */
135261 +size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
135262 +                          const void* src, size_t srcSize)   /* note : srcSize < BLOCKSIZE */
135264 +    DEBUGLOG(5, "ZSTD_decodeLiteralsBlock");
135265 +    RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected, "");
135267 +    {   const BYTE* const istart = (const BYTE*) src;
135268 +        symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
135270 +        switch(litEncType)
135271 +        {
135272 +        case set_repeat:
135273 +            DEBUGLOG(5, "set_repeat flag : re-using stats from previous compressed literals block");
135274 +            RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted, "");
135275 +            /* fall-through */
135277 +        case set_compressed:
135278 +            RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3");
135279 +            {   size_t lhSize, litSize, litCSize;
135280 +                U32 singleStream=0;
135281 +                U32 const lhlCode = (istart[0] >> 2) & 3;
135282 +                U32 const lhc = MEM_readLE32(istart);
135283 +                size_t hufSuccess;
135284 +                switch(lhlCode)
135285 +                {
135286 +                case 0: case 1: default:   /* note : default is impossible, since lhlCode into [0..3] */
135287 +                    /* 2 - 2 - 10 - 10 */
135288 +                    singleStream = !lhlCode;
135289 +                    lhSize = 3;
135290 +                    litSize  = (lhc >> 4) & 0x3FF;
135291 +                    litCSize = (lhc >> 14) & 0x3FF;
135292 +                    break;
135293 +                case 2:
135294 +                    /* 2 - 2 - 14 - 14 */
135295 +                    lhSize = 4;
135296 +                    litSize  = (lhc >> 4) & 0x3FFF;
135297 +                    litCSize = lhc >> 18;
135298 +                    break;
135299 +                case 3:
135300 +                    /* 2 - 2 - 18 - 18 */
135301 +                    lhSize = 5;
135302 +                    litSize  = (lhc >> 4) & 0x3FFFF;
135303 +                    litCSize = (lhc >> 22) + ((size_t)istart[4] << 10);
135304 +                    break;
135305 +                }
135306 +                RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
135307 +                RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected, "");
135309 +                /* prefetch huffman table if cold */
135310 +                if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) {
135311 +                    PREFETCH_AREA(dctx->HUFptr, sizeof(dctx->entropy.hufTable));
135312 +                }
135314 +                if (litEncType==set_repeat) {
135315 +                    if (singleStream) {
135316 +                        hufSuccess = HUF_decompress1X_usingDTable_bmi2(
135317 +                            dctx->litBuffer, litSize, istart+lhSize, litCSize,
135318 +                            dctx->HUFptr, dctx->bmi2);
135319 +                    } else {
135320 +                        hufSuccess = HUF_decompress4X_usingDTable_bmi2(
135321 +                            dctx->litBuffer, litSize, istart+lhSize, litCSize,
135322 +                            dctx->HUFptr, dctx->bmi2);
135323 +                    }
135324 +                } else {
135325 +                    if (singleStream) {
135326 +#if defined(HUF_FORCE_DECOMPRESS_X2)
135327 +                        hufSuccess = HUF_decompress1X_DCtx_wksp(
135328 +                            dctx->entropy.hufTable, dctx->litBuffer, litSize,
135329 +                            istart+lhSize, litCSize, dctx->workspace,
135330 +                            sizeof(dctx->workspace));
135331 +#else
135332 +                        hufSuccess = HUF_decompress1X1_DCtx_wksp_bmi2(
135333 +                            dctx->entropy.hufTable, dctx->litBuffer, litSize,
135334 +                            istart+lhSize, litCSize, dctx->workspace,
135335 +                            sizeof(dctx->workspace), dctx->bmi2);
135336 +#endif
135337 +                    } else {
135338 +                        hufSuccess = HUF_decompress4X_hufOnly_wksp_bmi2(
135339 +                            dctx->entropy.hufTable, dctx->litBuffer, litSize,
135340 +                            istart+lhSize, litCSize, dctx->workspace,
135341 +                            sizeof(dctx->workspace), dctx->bmi2);
135342 +                    }
135343 +                }
135345 +                RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected, "");
135347 +                dctx->litPtr = dctx->litBuffer;
135348 +                dctx->litSize = litSize;
135349 +                dctx->litEntropy = 1;
135350 +                if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable;
135351 +                ZSTD_memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
135352 +                return litCSize + lhSize;
135353 +            }
135355 +        case set_basic:
135356 +            {   size_t litSize, lhSize;
135357 +                U32 const lhlCode = ((istart[0]) >> 2) & 3;
135358 +                switch(lhlCode)
135359 +                {
135360 +                case 0: case 2: default:   /* note : default is impossible, since lhlCode into [0..3] */
135361 +                    lhSize = 1;
135362 +                    litSize = istart[0] >> 3;
135363 +                    break;
135364 +                case 1:
135365 +                    lhSize = 2;
135366 +                    litSize = MEM_readLE16(istart) >> 4;
135367 +                    break;
135368 +                case 3:
135369 +                    lhSize = 3;
135370 +                    litSize = MEM_readLE24(istart) >> 4;
135371 +                    break;
135372 +                }
135374 +                if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) {  /* risk reading beyond src buffer with wildcopy */
135375 +                    RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected, "");
135376 +                    ZSTD_memcpy(dctx->litBuffer, istart+lhSize, litSize);
135377 +                    dctx->litPtr = dctx->litBuffer;
135378 +                    dctx->litSize = litSize;
135379 +                    ZSTD_memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
135380 +                    return lhSize+litSize;
135381 +                }
135382 +                /* direct reference into compressed stream */
135383 +                dctx->litPtr = istart+lhSize;
135384 +                dctx->litSize = litSize;
135385 +                return lhSize+litSize;
135386 +            }
135388 +        case set_rle:
135389 +            {   U32 const lhlCode = ((istart[0]) >> 2) & 3;
135390 +                size_t litSize, lhSize;
135391 +                switch(lhlCode)
135392 +                {
135393 +                case 0: case 2: default:   /* note : default is impossible, since lhlCode into [0..3] */
135394 +                    lhSize = 1;
135395 +                    litSize = istart[0] >> 3;
135396 +                    break;
135397 +                case 1:
135398 +                    lhSize = 2;
135399 +                    litSize = MEM_readLE16(istart) >> 4;
135400 +                    break;
135401 +                case 3:
135402 +                    lhSize = 3;
135403 +                    litSize = MEM_readLE24(istart) >> 4;
135404 +                    RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4");
135405 +                    break;
135406 +                }
135407 +                RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, "");
135408 +                ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
135409 +                dctx->litPtr = dctx->litBuffer;
135410 +                dctx->litSize = litSize;
135411 +                return lhSize+1;
135412 +            }
135413 +        default:
135414 +            RETURN_ERROR(corruption_detected, "impossible");
135415 +        }
135416 +    }
135419 +/* Default FSE distribution tables.
135420 + * These are pre-calculated FSE decoding tables using default distributions as defined in specification :
135421 + * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#default-distributions
135422 + * They were generated programmatically with following method :
135423 + * - start from default distributions, present in /lib/common/zstd_internal.h
135424 + * - generate tables normally, using ZSTD_buildFSETable()
135425 + * - printout the content of tables
135426 + * - pretify output, report below, test with fuzzer to ensure it's correct */
135428 +/* Default FSE distribution table for Literal Lengths */
135429 +static const ZSTD_seqSymbol LL_defaultDTable[(1<<LL_DEFAULTNORMLOG)+1] = {
135430 +     {  1,  1,  1, LL_DEFAULTNORMLOG},  /* header : fastMode, tableLog */
135431 +     /* nextState, nbAddBits, nbBits, baseVal */
135432 +     {  0,  0,  4,    0},  { 16,  0,  4,    0},
135433 +     { 32,  0,  5,    1},  {  0,  0,  5,    3},
135434 +     {  0,  0,  5,    4},  {  0,  0,  5,    6},
135435 +     {  0,  0,  5,    7},  {  0,  0,  5,    9},
135436 +     {  0,  0,  5,   10},  {  0,  0,  5,   12},
135437 +     {  0,  0,  6,   14},  {  0,  1,  5,   16},
135438 +     {  0,  1,  5,   20},  {  0,  1,  5,   22},
135439 +     {  0,  2,  5,   28},  {  0,  3,  5,   32},
135440 +     {  0,  4,  5,   48},  { 32,  6,  5,   64},
135441 +     {  0,  7,  5,  128},  {  0,  8,  6,  256},
135442 +     {  0, 10,  6, 1024},  {  0, 12,  6, 4096},
135443 +     { 32,  0,  4,    0},  {  0,  0,  4,    1},
135444 +     {  0,  0,  5,    2},  { 32,  0,  5,    4},
135445 +     {  0,  0,  5,    5},  { 32,  0,  5,    7},
135446 +     {  0,  0,  5,    8},  { 32,  0,  5,   10},
135447 +     {  0,  0,  5,   11},  {  0,  0,  6,   13},
135448 +     { 32,  1,  5,   16},  {  0,  1,  5,   18},
135449 +     { 32,  1,  5,   22},  {  0,  2,  5,   24},
135450 +     { 32,  3,  5,   32},  {  0,  3,  5,   40},
135451 +     {  0,  6,  4,   64},  { 16,  6,  4,   64},
135452 +     { 32,  7,  5,  128},  {  0,  9,  6,  512},
135453 +     {  0, 11,  6, 2048},  { 48,  0,  4,    0},
135454 +     { 16,  0,  4,    1},  { 32,  0,  5,    2},
135455 +     { 32,  0,  5,    3},  { 32,  0,  5,    5},
135456 +     { 32,  0,  5,    6},  { 32,  0,  5,    8},
135457 +     { 32,  0,  5,    9},  { 32,  0,  5,   11},
135458 +     { 32,  0,  5,   12},  {  0,  0,  6,   15},
135459 +     { 32,  1,  5,   18},  { 32,  1,  5,   20},
135460 +     { 32,  2,  5,   24},  { 32,  2,  5,   28},
135461 +     { 32,  3,  5,   40},  { 32,  4,  5,   48},
135462 +     {  0, 16,  6,65536},  {  0, 15,  6,32768},
135463 +     {  0, 14,  6,16384},  {  0, 13,  6, 8192},
135464 +};   /* LL_defaultDTable */
135466 +/* Default FSE distribution table for Offset Codes */
135467 +static const ZSTD_seqSymbol OF_defaultDTable[(1<<OF_DEFAULTNORMLOG)+1] = {
135468 +    {  1,  1,  1, OF_DEFAULTNORMLOG},  /* header : fastMode, tableLog */
135469 +    /* nextState, nbAddBits, nbBits, baseVal */
135470 +    {  0,  0,  5,    0},     {  0,  6,  4,   61},
135471 +    {  0,  9,  5,  509},     {  0, 15,  5,32765},
135472 +    {  0, 21,  5,2097149},   {  0,  3,  5,    5},
135473 +    {  0,  7,  4,  125},     {  0, 12,  5, 4093},
135474 +    {  0, 18,  5,262141},    {  0, 23,  5,8388605},
135475 +    {  0,  5,  5,   29},     {  0,  8,  4,  253},
135476 +    {  0, 14,  5,16381},     {  0, 20,  5,1048573},
135477 +    {  0,  2,  5,    1},     { 16,  7,  4,  125},
135478 +    {  0, 11,  5, 2045},     {  0, 17,  5,131069},
135479 +    {  0, 22,  5,4194301},   {  0,  4,  5,   13},
135480 +    { 16,  8,  4,  253},     {  0, 13,  5, 8189},
135481 +    {  0, 19,  5,524285},    {  0,  1,  5,    1},
135482 +    { 16,  6,  4,   61},     {  0, 10,  5, 1021},
135483 +    {  0, 16,  5,65533},     {  0, 28,  5,268435453},
135484 +    {  0, 27,  5,134217725}, {  0, 26,  5,67108861},
135485 +    {  0, 25,  5,33554429},  {  0, 24,  5,16777213},
135486 +};   /* OF_defaultDTable */
135489 +/* Default FSE distribution table for Match Lengths */
135490 +static const ZSTD_seqSymbol ML_defaultDTable[(1<<ML_DEFAULTNORMLOG)+1] = {
135491 +    {  1,  1,  1, ML_DEFAULTNORMLOG},  /* header : fastMode, tableLog */
135492 +    /* nextState, nbAddBits, nbBits, baseVal */
135493 +    {  0,  0,  6,    3},  {  0,  0,  4,    4},
135494 +    { 32,  0,  5,    5},  {  0,  0,  5,    6},
135495 +    {  0,  0,  5,    8},  {  0,  0,  5,    9},
135496 +    {  0,  0,  5,   11},  {  0,  0,  6,   13},
135497 +    {  0,  0,  6,   16},  {  0,  0,  6,   19},
135498 +    {  0,  0,  6,   22},  {  0,  0,  6,   25},
135499 +    {  0,  0,  6,   28},  {  0,  0,  6,   31},
135500 +    {  0,  0,  6,   34},  {  0,  1,  6,   37},
135501 +    {  0,  1,  6,   41},  {  0,  2,  6,   47},
135502 +    {  0,  3,  6,   59},  {  0,  4,  6,   83},
135503 +    {  0,  7,  6,  131},  {  0,  9,  6,  515},
135504 +    { 16,  0,  4,    4},  {  0,  0,  4,    5},
135505 +    { 32,  0,  5,    6},  {  0,  0,  5,    7},
135506 +    { 32,  0,  5,    9},  {  0,  0,  5,   10},
135507 +    {  0,  0,  6,   12},  {  0,  0,  6,   15},
135508 +    {  0,  0,  6,   18},  {  0,  0,  6,   21},
135509 +    {  0,  0,  6,   24},  {  0,  0,  6,   27},
135510 +    {  0,  0,  6,   30},  {  0,  0,  6,   33},
135511 +    {  0,  1,  6,   35},  {  0,  1,  6,   39},
135512 +    {  0,  2,  6,   43},  {  0,  3,  6,   51},
135513 +    {  0,  4,  6,   67},  {  0,  5,  6,   99},
135514 +    {  0,  8,  6,  259},  { 32,  0,  4,    4},
135515 +    { 48,  0,  4,    4},  { 16,  0,  4,    5},
135516 +    { 32,  0,  5,    7},  { 32,  0,  5,    8},
135517 +    { 32,  0,  5,   10},  { 32,  0,  5,   11},
135518 +    {  0,  0,  6,   14},  {  0,  0,  6,   17},
135519 +    {  0,  0,  6,   20},  {  0,  0,  6,   23},
135520 +    {  0,  0,  6,   26},  {  0,  0,  6,   29},
135521 +    {  0,  0,  6,   32},  {  0, 16,  6,65539},
135522 +    {  0, 15,  6,32771},  {  0, 14,  6,16387},
135523 +    {  0, 13,  6, 8195},  {  0, 12,  6, 4099},
135524 +    {  0, 11,  6, 2051},  {  0, 10,  6, 1027},
135525 +};   /* ML_defaultDTable */
135528 +static void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol* dt, U32 baseValue, U32 nbAddBits)
135530 +    void* ptr = dt;
135531 +    ZSTD_seqSymbol_header* const DTableH = (ZSTD_seqSymbol_header*)ptr;
135532 +    ZSTD_seqSymbol* const cell = dt + 1;
135534 +    DTableH->tableLog = 0;
135535 +    DTableH->fastMode = 0;
135537 +    cell->nbBits = 0;
135538 +    cell->nextState = 0;
135539 +    assert(nbAddBits < 255);
135540 +    cell->nbAdditionalBits = (BYTE)nbAddBits;
135541 +    cell->baseValue = baseValue;
135545 +/* ZSTD_buildFSETable() :
135546 + * generate FSE decoding table for one symbol (ll, ml or off)
135547 + * cannot fail if input is valid =>
135548 + * all inputs are presumed validated at this stage */
135549 +FORCE_INLINE_TEMPLATE
135550 +void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt,
135551 +            const short* normalizedCounter, unsigned maxSymbolValue,
135552 +            const U32* baseValue, const U32* nbAdditionalBits,
135553 +            unsigned tableLog, void* wksp, size_t wkspSize)
135555 +    ZSTD_seqSymbol* const tableDecode = dt+1;
135556 +    U32 const maxSV1 = maxSymbolValue + 1;
135557 +    U32 const tableSize = 1 << tableLog;
135559 +    U16* symbolNext = (U16*)wksp;
135560 +    BYTE* spread = (BYTE*)(symbolNext + MaxSeq + 1);
135561 +    U32 highThreshold = tableSize - 1;
135564 +    /* Sanity Checks */
135565 +    assert(maxSymbolValue <= MaxSeq);
135566 +    assert(tableLog <= MaxFSELog);
135567 +    assert(wkspSize >= ZSTD_BUILD_FSE_TABLE_WKSP_SIZE);
135568 +    (void)wkspSize;
135569 +    /* Init, lay down lowprob symbols */
135570 +    {   ZSTD_seqSymbol_header DTableH;
135571 +        DTableH.tableLog = tableLog;
135572 +        DTableH.fastMode = 1;
135573 +        {   S16 const largeLimit= (S16)(1 << (tableLog-1));
135574 +            U32 s;
135575 +            for (s=0; s<maxSV1; s++) {
135576 +                if (normalizedCounter[s]==-1) {
135577 +                    tableDecode[highThreshold--].baseValue = s;
135578 +                    symbolNext[s] = 1;
135579 +                } else {
135580 +                    if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
135581 +                    assert(normalizedCounter[s]>=0);
135582 +                    symbolNext[s] = (U16)normalizedCounter[s];
135583 +        }   }   }
135584 +        ZSTD_memcpy(dt, &DTableH, sizeof(DTableH));
135585 +    }
135587 +    /* Spread symbols */
135588 +    assert(tableSize <= 512);
135589 +    /* Specialized symbol spreading for the case when there are
135590 +     * no low probability (-1 count) symbols. When compressing
135591 +     * small blocks we avoid low probability symbols to hit this
135592 +     * case, since header decoding speed matters more.
135593 +     */
135594 +    if (highThreshold == tableSize - 1) {
135595 +        size_t const tableMask = tableSize-1;
135596 +        size_t const step = FSE_TABLESTEP(tableSize);
135597 +        /* First lay down the symbols in order.
135598 +         * We use a uint64_t to lay down 8 bytes at a time. This reduces branch
135599 +         * misses since small blocks generally have small table logs, so nearly
135600 +         * all symbols have counts <= 8. We ensure we have 8 bytes at the end of
135601 +         * our buffer to handle the over-write.
135602 +         */
135603 +        {
135604 +            U64 const add = 0x0101010101010101ull;
135605 +            size_t pos = 0;
135606 +            U64 sv = 0;
135607 +            U32 s;
135608 +            for (s=0; s<maxSV1; ++s, sv += add) {
135609 +                int i;
135610 +                int const n = normalizedCounter[s];
135611 +                MEM_write64(spread + pos, sv);
135612 +                for (i = 8; i < n; i += 8) {
135613 +                    MEM_write64(spread + pos + i, sv);
135614 +                }
135615 +                pos += n;
135616 +            }
135617 +        }
135618 +        /* Now we spread those positions across the table.
135619 +         * The benefit of doing it in two stages is that we avoid the the
135620 +         * variable size inner loop, which caused lots of branch misses.
135621 +         * Now we can run through all the positions without any branch misses.
135622 +         * We unroll the loop twice, since that is what emperically worked best.
135623 +         */
135624 +        {
135625 +            size_t position = 0;
135626 +            size_t s;
135627 +            size_t const unroll = 2;
135628 +            assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
135629 +            for (s = 0; s < (size_t)tableSize; s += unroll) {
135630 +                size_t u;
135631 +                for (u = 0; u < unroll; ++u) {
135632 +                    size_t const uPosition = (position + (u * step)) & tableMask;
135633 +                    tableDecode[uPosition].baseValue = spread[s + u];
135634 +                }
135635 +                position = (position + (unroll * step)) & tableMask;
135636 +            }
135637 +            assert(position == 0);
135638 +        }
135639 +    } else {
135640 +        U32 const tableMask = tableSize-1;
135641 +        U32 const step = FSE_TABLESTEP(tableSize);
135642 +        U32 s, position = 0;
135643 +        for (s=0; s<maxSV1; s++) {
135644 +            int i;
135645 +            int const n = normalizedCounter[s];
135646 +            for (i=0; i<n; i++) {
135647 +                tableDecode[position].baseValue = s;
135648 +                position = (position + step) & tableMask;
135649 +                while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */
135650 +        }   }
135651 +        assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
135652 +    }
135654 +    /* Build Decoding table */
135655 +    {
135656 +        U32 u;
135657 +        for (u=0; u<tableSize; u++) {
135658 +            U32 const symbol = tableDecode[u].baseValue;
135659 +            U32 const nextState = symbolNext[symbol]++;
135660 +            tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
135661 +            tableDecode[u].nextState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
135662 +            assert(nbAdditionalBits[symbol] < 255);
135663 +            tableDecode[u].nbAdditionalBits = (BYTE)nbAdditionalBits[symbol];
135664 +            tableDecode[u].baseValue = baseValue[symbol];
135665 +        }
135666 +    }
135669 +/* Avoids the FORCE_INLINE of the _body() function. */
135670 +static void ZSTD_buildFSETable_body_default(ZSTD_seqSymbol* dt,
135671 +            const short* normalizedCounter, unsigned maxSymbolValue,
135672 +            const U32* baseValue, const U32* nbAdditionalBits,
135673 +            unsigned tableLog, void* wksp, size_t wkspSize)
135675 +    ZSTD_buildFSETable_body(dt, normalizedCounter, maxSymbolValue,
135676 +            baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
135679 +#if DYNAMIC_BMI2
135680 +TARGET_ATTRIBUTE("bmi2") static void ZSTD_buildFSETable_body_bmi2(ZSTD_seqSymbol* dt,
135681 +            const short* normalizedCounter, unsigned maxSymbolValue,
135682 +            const U32* baseValue, const U32* nbAdditionalBits,
135683 +            unsigned tableLog, void* wksp, size_t wkspSize)
135685 +    ZSTD_buildFSETable_body(dt, normalizedCounter, maxSymbolValue,
135686 +            baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
135688 +#endif
135690 +void ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
135691 +            const short* normalizedCounter, unsigned maxSymbolValue,
135692 +            const U32* baseValue, const U32* nbAdditionalBits,
135693 +            unsigned tableLog, void* wksp, size_t wkspSize, int bmi2)
135695 +#if DYNAMIC_BMI2
135696 +    if (bmi2) {
135697 +        ZSTD_buildFSETable_body_bmi2(dt, normalizedCounter, maxSymbolValue,
135698 +                baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
135699 +        return;
135700 +    }
135701 +#endif
135702 +    (void)bmi2;
135703 +    ZSTD_buildFSETable_body_default(dt, normalizedCounter, maxSymbolValue,
135704 +            baseValue, nbAdditionalBits, tableLog, wksp, wkspSize);
135708 +/*! ZSTD_buildSeqTable() :
135709 + * @return : nb bytes read from src,
135710 + *           or an error code if it fails */
135711 +static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymbol** DTablePtr,
135712 +                                 symbolEncodingType_e type, unsigned max, U32 maxLog,
135713 +                                 const void* src, size_t srcSize,
135714 +                                 const U32* baseValue, const U32* nbAdditionalBits,
135715 +                                 const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable,
135716 +                                 int ddictIsCold, int nbSeq, U32* wksp, size_t wkspSize,
135717 +                                 int bmi2)
135719 +    switch(type)
135720 +    {
135721 +    case set_rle :
135722 +        RETURN_ERROR_IF(!srcSize, srcSize_wrong, "");
135723 +        RETURN_ERROR_IF((*(const BYTE*)src) > max, corruption_detected, "");
135724 +        {   U32 const symbol = *(const BYTE*)src;
135725 +            U32 const baseline = baseValue[symbol];
135726 +            U32 const nbBits = nbAdditionalBits[symbol];
135727 +            ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits);
135728 +        }
135729 +        *DTablePtr = DTableSpace;
135730 +        return 1;
135731 +    case set_basic :
135732 +        *DTablePtr = defaultTable;
135733 +        return 0;
135734 +    case set_repeat:
135735 +        RETURN_ERROR_IF(!flagRepeatTable, corruption_detected, "");
135736 +        /* prefetch FSE table if used */
135737 +        if (ddictIsCold && (nbSeq > 24 /* heuristic */)) {
135738 +            const void* const pStart = *DTablePtr;
135739 +            size_t const pSize = sizeof(ZSTD_seqSymbol) * (SEQSYMBOL_TABLE_SIZE(maxLog));
135740 +            PREFETCH_AREA(pStart, pSize);
135741 +        }
135742 +        return 0;
135743 +    case set_compressed :
135744 +        {   unsigned tableLog;
135745 +            S16 norm[MaxSeq+1];
135746 +            size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
135747 +            RETURN_ERROR_IF(FSE_isError(headerSize), corruption_detected, "");
135748 +            RETURN_ERROR_IF(tableLog > maxLog, corruption_detected, "");
135749 +            ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog, wksp, wkspSize, bmi2);
135750 +            *DTablePtr = DTableSpace;
135751 +            return headerSize;
135752 +        }
135753 +    default :
135754 +        assert(0);
135755 +        RETURN_ERROR(GENERIC, "impossible");
135756 +    }
135759 +size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
135760 +                             const void* src, size_t srcSize)
135762 +    const BYTE* const istart = (const BYTE*)src;
135763 +    const BYTE* const iend = istart + srcSize;
135764 +    const BYTE* ip = istart;
135765 +    int nbSeq;
135766 +    DEBUGLOG(5, "ZSTD_decodeSeqHeaders");
135768 +    /* check */
135769 +    RETURN_ERROR_IF(srcSize < MIN_SEQUENCES_SIZE, srcSize_wrong, "");
135771 +    /* SeqHead */
135772 +    nbSeq = *ip++;
135773 +    if (!nbSeq) {
135774 +        *nbSeqPtr=0;
135775 +        RETURN_ERROR_IF(srcSize != 1, srcSize_wrong, "");
135776 +        return 1;
135777 +    }
135778 +    if (nbSeq > 0x7F) {
135779 +        if (nbSeq == 0xFF) {
135780 +            RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong, "");
135781 +            nbSeq = MEM_readLE16(ip) + LONGNBSEQ;
135782 +            ip+=2;
135783 +        } else {
135784 +            RETURN_ERROR_IF(ip >= iend, srcSize_wrong, "");
135785 +            nbSeq = ((nbSeq-0x80)<<8) + *ip++;
135786 +        }
135787 +    }
135788 +    *nbSeqPtr = nbSeq;
135790 +    /* FSE table descriptors */
135791 +    RETURN_ERROR_IF(ip+1 > iend, srcSize_wrong, ""); /* minimum possible size: 1 byte for symbol encoding types */
135792 +    {   symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
135793 +        symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
135794 +        symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
135795 +        ip++;
135797 +        /* Build DTables */
135798 +        {   size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr,
135799 +                                                      LLtype, MaxLL, LLFSELog,
135800 +                                                      ip, iend-ip,
135801 +                                                      LL_base, LL_bits,
135802 +                                                      LL_defaultDTable, dctx->fseEntropy,
135803 +                                                      dctx->ddictIsCold, nbSeq,
135804 +                                                      dctx->workspace, sizeof(dctx->workspace),
135805 +                                                      dctx->bmi2);
135806 +            RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected, "ZSTD_buildSeqTable failed");
135807 +            ip += llhSize;
135808 +        }
135810 +        {   size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr,
135811 +                                                      OFtype, MaxOff, OffFSELog,
135812 +                                                      ip, iend-ip,
135813 +                                                      OF_base, OF_bits,
135814 +                                                      OF_defaultDTable, dctx->fseEntropy,
135815 +                                                      dctx->ddictIsCold, nbSeq,
135816 +                                                      dctx->workspace, sizeof(dctx->workspace),
135817 +                                                      dctx->bmi2);
135818 +            RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected, "ZSTD_buildSeqTable failed");
135819 +            ip += ofhSize;
135820 +        }
135822 +        {   size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr,
135823 +                                                      MLtype, MaxML, MLFSELog,
135824 +                                                      ip, iend-ip,
135825 +                                                      ML_base, ML_bits,
135826 +                                                      ML_defaultDTable, dctx->fseEntropy,
135827 +                                                      dctx->ddictIsCold, nbSeq,
135828 +                                                      dctx->workspace, sizeof(dctx->workspace),
135829 +                                                      dctx->bmi2);
135830 +            RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected, "ZSTD_buildSeqTable failed");
135831 +            ip += mlhSize;
135832 +        }
135833 +    }
135835 +    return ip-istart;
135839 +typedef struct {
135840 +    size_t litLength;
135841 +    size_t matchLength;
135842 +    size_t offset;
135843 +    const BYTE* match;
135844 +} seq_t;
135846 +typedef struct {
135847 +    size_t state;
135848 +    const ZSTD_seqSymbol* table;
135849 +} ZSTD_fseState;
135851 +typedef struct {
135852 +    BIT_DStream_t DStream;
135853 +    ZSTD_fseState stateLL;
135854 +    ZSTD_fseState stateOffb;
135855 +    ZSTD_fseState stateML;
135856 +    size_t prevOffset[ZSTD_REP_NUM];
135857 +    const BYTE* prefixStart;
135858 +    const BYTE* dictEnd;
135859 +    size_t pos;
135860 +} seqState_t;
135862 +/*! ZSTD_overlapCopy8() :
135863 + *  Copies 8 bytes from ip to op and updates op and ip where ip <= op.
135864 + *  If the offset is < 8 then the offset is spread to at least 8 bytes.
135866 + *  Precondition: *ip <= *op
135867 + *  Postcondition: *op - *op >= 8
135868 + */
135869 +HINT_INLINE void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) {
135870 +    assert(*ip <= *op);
135871 +    if (offset < 8) {
135872 +        /* close range match, overlap */
135873 +        static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
135874 +        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */
135875 +        int const sub2 = dec64table[offset];
135876 +        (*op)[0] = (*ip)[0];
135877 +        (*op)[1] = (*ip)[1];
135878 +        (*op)[2] = (*ip)[2];
135879 +        (*op)[3] = (*ip)[3];
135880 +        *ip += dec32table[offset];
135881 +        ZSTD_copy4(*op+4, *ip);
135882 +        *ip -= sub2;
135883 +    } else {
135884 +        ZSTD_copy8(*op, *ip);
135885 +    }
135886 +    *ip += 8;
135887 +    *op += 8;
135888 +    assert(*op - *ip >= 8);
135891 +/*! ZSTD_safecopy() :
135892 + *  Specialized version of memcpy() that is allowed to READ up to WILDCOPY_OVERLENGTH past the input buffer
135893 + *  and write up to 16 bytes past oend_w (op >= oend_w is allowed).
135894 + *  This function is only called in the uncommon case where the sequence is near the end of the block. It
135895 + *  should be fast for a single long sequence, but can be slow for several short sequences.
135897 + *  @param ovtype controls the overlap detection
135898 + *         - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.
135899 + *         - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart.
135900 + *           The src buffer must be before the dst buffer.
135901 + */
135902 +static void ZSTD_safecopy(BYTE* op, BYTE* const oend_w, BYTE const* ip, ptrdiff_t length, ZSTD_overlap_e ovtype) {
135903 +    ptrdiff_t const diff = op - ip;
135904 +    BYTE* const oend = op + length;
135906 +    assert((ovtype == ZSTD_no_overlap && (diff <= -8 || diff >= 8 || op >= oend_w)) ||
135907 +           (ovtype == ZSTD_overlap_src_before_dst && diff >= 0));
135909 +    if (length < 8) {
135910 +        /* Handle short lengths. */
135911 +        while (op < oend) *op++ = *ip++;
135912 +        return;
135913 +    }
135914 +    if (ovtype == ZSTD_overlap_src_before_dst) {
135915 +        /* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */
135916 +        assert(length >= 8);
135917 +        ZSTD_overlapCopy8(&op, &ip, diff);
135918 +        assert(op - ip >= 8);
135919 +        assert(op <= oend);
135920 +    }
135922 +    if (oend <= oend_w) {
135923 +        /* No risk of overwrite. */
135924 +        ZSTD_wildcopy(op, ip, length, ovtype);
135925 +        return;
135926 +    }
135927 +    if (op <= oend_w) {
135928 +        /* Wildcopy until we get close to the end. */
135929 +        assert(oend > oend_w);
135930 +        ZSTD_wildcopy(op, ip, oend_w - op, ovtype);
135931 +        ip += oend_w - op;
135932 +        op = oend_w;
135933 +    }
135934 +    /* Handle the leftovers. */
135935 +    while (op < oend) *op++ = *ip++;
135938 +/* ZSTD_execSequenceEnd():
135939 + * This version handles cases that are near the end of the output buffer. It requires
135940 + * more careful checks to make sure there is no overflow. By separating out these hard
135941 + * and unlikely cases, we can speed up the common cases.
135943 + * NOTE: This function needs to be fast for a single long sequence, but doesn't need
135944 + * to be optimized for many small sequences, since those fall into ZSTD_execSequence().
135945 + */
135946 +FORCE_NOINLINE
135947 +size_t ZSTD_execSequenceEnd(BYTE* op,
135948 +                            BYTE* const oend, seq_t sequence,
135949 +                            const BYTE** litPtr, const BYTE* const litLimit,
135950 +                            const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
135952 +    BYTE* const oLitEnd = op + sequence.litLength;
135953 +    size_t const sequenceLength = sequence.litLength + sequence.matchLength;
135954 +    const BYTE* const iLitEnd = *litPtr + sequence.litLength;
135955 +    const BYTE* match = oLitEnd - sequence.offset;
135956 +    BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
135958 +    /* bounds checks : careful of address space overflow in 32-bit mode */
135959 +    RETURN_ERROR_IF(sequenceLength > (size_t)(oend - op), dstSize_tooSmall, "last match must fit within dstBuffer");
135960 +    RETURN_ERROR_IF(sequence.litLength > (size_t)(litLimit - *litPtr), corruption_detected, "try to read beyond literal buffer");
135961 +    assert(op < op + sequenceLength);
135962 +    assert(oLitEnd < op + sequenceLength);
135964 +    /* copy literals */
135965 +    ZSTD_safecopy(op, oend_w, *litPtr, sequence.litLength, ZSTD_no_overlap);
135966 +    op = oLitEnd;
135967 +    *litPtr = iLitEnd;
135969 +    /* copy Match */
135970 +    if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
135971 +        /* offset beyond prefix */
135972 +        RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, "");
135973 +        match = dictEnd - (prefixStart-match);
135974 +        if (match + sequence.matchLength <= dictEnd) {
135975 +            ZSTD_memmove(oLitEnd, match, sequence.matchLength);
135976 +            return sequenceLength;
135977 +        }
135978 +        /* span extDict & currentPrefixSegment */
135979 +        {   size_t const length1 = dictEnd - match;
135980 +            ZSTD_memmove(oLitEnd, match, length1);
135981 +            op = oLitEnd + length1;
135982 +            sequence.matchLength -= length1;
135983 +            match = prefixStart;
135984 +    }   }
135985 +    ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst);
135986 +    return sequenceLength;
135989 +HINT_INLINE
135990 +size_t ZSTD_execSequence(BYTE* op,
135991 +                         BYTE* const oend, seq_t sequence,
135992 +                         const BYTE** litPtr, const BYTE* const litLimit,
135993 +                         const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
135995 +    BYTE* const oLitEnd = op + sequence.litLength;
135996 +    size_t const sequenceLength = sequence.litLength + sequence.matchLength;
135997 +    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
135998 +    BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;   /* risk : address space underflow on oend=NULL */
135999 +    const BYTE* const iLitEnd = *litPtr + sequence.litLength;
136000 +    const BYTE* match = oLitEnd - sequence.offset;
136002 +    assert(op != NULL /* Precondition */);
136003 +    assert(oend_w < oend /* No underflow */);
136004 +    /* Handle edge cases in a slow path:
136005 +     *   - Read beyond end of literals
136006 +     *   - Match end is within WILDCOPY_OVERLIMIT of oend
136007 +     *   - 32-bit mode and the match length overflows
136008 +     */
136009 +    if (UNLIKELY(
136010 +            iLitEnd > litLimit ||
136011 +            oMatchEnd > oend_w ||
136012 +            (MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH)))
136013 +        return ZSTD_execSequenceEnd(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
136015 +    /* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */
136016 +    assert(op <= oLitEnd /* No overflow */);
136017 +    assert(oLitEnd < oMatchEnd /* Non-zero match & no overflow */);
136018 +    assert(oMatchEnd <= oend /* No underflow */);
136019 +    assert(iLitEnd <= litLimit /* Literal length is in bounds */);
136020 +    assert(oLitEnd <= oend_w /* Can wildcopy literals */);
136021 +    assert(oMatchEnd <= oend_w /* Can wildcopy matches */);
136023 +    /* Copy Literals:
136024 +     * Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9.
136025 +     * We likely don't need the full 32-byte wildcopy.
136026 +     */
136027 +    assert(WILDCOPY_OVERLENGTH >= 16);
136028 +    ZSTD_copy16(op, (*litPtr));
136029 +    if (UNLIKELY(sequence.litLength > 16)) {
136030 +        ZSTD_wildcopy(op+16, (*litPtr)+16, sequence.litLength-16, ZSTD_no_overlap);
136031 +    }
136032 +    op = oLitEnd;
136033 +    *litPtr = iLitEnd;   /* update for next sequence */
136035 +    /* Copy Match */
136036 +    if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
136037 +        /* offset beyond prefix -> go into extDict */
136038 +        RETURN_ERROR_IF(UNLIKELY(sequence.offset > (size_t)(oLitEnd - virtualStart)), corruption_detected, "");
136039 +        match = dictEnd + (match - prefixStart);
136040 +        if (match + sequence.matchLength <= dictEnd) {
136041 +            ZSTD_memmove(oLitEnd, match, sequence.matchLength);
136042 +            return sequenceLength;
136043 +        }
136044 +        /* span extDict & currentPrefixSegment */
136045 +        {   size_t const length1 = dictEnd - match;
136046 +            ZSTD_memmove(oLitEnd, match, length1);
136047 +            op = oLitEnd + length1;
136048 +            sequence.matchLength -= length1;
136049 +            match = prefixStart;
136050 +    }   }
136051 +    /* Match within prefix of 1 or more bytes */
136052 +    assert(op <= oMatchEnd);
136053 +    assert(oMatchEnd <= oend_w);
136054 +    assert(match >= prefixStart);
136055 +    assert(sequence.matchLength >= 1);
136057 +    /* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy
136058 +     * without overlap checking.
136059 +     */
136060 +    if (LIKELY(sequence.offset >= WILDCOPY_VECLEN)) {
136061 +        /* We bet on a full wildcopy for matches, since we expect matches to be
136062 +         * longer than literals (in general). In silesia, ~10% of matches are longer
136063 +         * than 16 bytes.
136064 +         */
136065 +        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap);
136066 +        return sequenceLength;
136067 +    }
136068 +    assert(sequence.offset < WILDCOPY_VECLEN);
136070 +    /* Copy 8 bytes and spread the offset to be >= 8. */
136071 +    ZSTD_overlapCopy8(&op, &match, sequence.offset);
136073 +    /* If the match length is > 8 bytes, then continue with the wildcopy. */
136074 +    if (sequence.matchLength > 8) {
136075 +        assert(op < oMatchEnd);
136076 +        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst);
136077 +    }
136078 +    return sequenceLength;
136081 +static void
136082 +ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqSymbol* dt)
136084 +    const void* ptr = dt;
136085 +    const ZSTD_seqSymbol_header* const DTableH = (const ZSTD_seqSymbol_header*)ptr;
136086 +    DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
136087 +    DEBUGLOG(6, "ZSTD_initFseState : val=%u using %u bits",
136088 +                (U32)DStatePtr->state, DTableH->tableLog);
136089 +    BIT_reloadDStream(bitD);
136090 +    DStatePtr->table = dt + 1;
136093 +FORCE_INLINE_TEMPLATE void
136094 +ZSTD_updateFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD)
136096 +    ZSTD_seqSymbol const DInfo = DStatePtr->table[DStatePtr->state];
136097 +    U32 const nbBits = DInfo.nbBits;
136098 +    size_t const lowBits = BIT_readBits(bitD, nbBits);
136099 +    DStatePtr->state = DInfo.nextState + lowBits;
136102 +FORCE_INLINE_TEMPLATE void
136103 +ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, ZSTD_seqSymbol const DInfo)
136105 +    U32 const nbBits = DInfo.nbBits;
136106 +    size_t const lowBits = BIT_readBits(bitD, nbBits);
136107 +    DStatePtr->state = DInfo.nextState + lowBits;
136110 +/* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum
136111 + * offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1)
136112 + * bits before reloading. This value is the maximum number of bytes we read
136113 + * after reloading when we are decoding long offsets.
136114 + */
136115 +#define LONG_OFFSETS_MAX_EXTRA_BITS_32                       \
136116 +    (ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32       \
136117 +        ? ZSTD_WINDOWLOG_MAX_32 - STREAM_ACCUMULATOR_MIN_32  \
136118 +        : 0)
136120 +typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e;
136121 +typedef enum { ZSTD_p_noPrefetch=0, ZSTD_p_prefetch=1 } ZSTD_prefetch_e;
136123 +FORCE_INLINE_TEMPLATE seq_t
136124 +ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets, const ZSTD_prefetch_e prefetch)
136126 +    seq_t seq;
136127 +    ZSTD_seqSymbol const llDInfo = seqState->stateLL.table[seqState->stateLL.state];
136128 +    ZSTD_seqSymbol const mlDInfo = seqState->stateML.table[seqState->stateML.state];
136129 +    ZSTD_seqSymbol const ofDInfo = seqState->stateOffb.table[seqState->stateOffb.state];
136130 +    U32 const llBase = llDInfo.baseValue;
136131 +    U32 const mlBase = mlDInfo.baseValue;
136132 +    U32 const ofBase = ofDInfo.baseValue;
136133 +    BYTE const llBits = llDInfo.nbAdditionalBits;
136134 +    BYTE const mlBits = mlDInfo.nbAdditionalBits;
136135 +    BYTE const ofBits = ofDInfo.nbAdditionalBits;
136136 +    BYTE const totalBits = llBits+mlBits+ofBits;
136138 +    /* sequence */
136139 +    {   size_t offset;
136140 +        if (ofBits > 1) {
136141 +            ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
136142 +            ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
136143 +            assert(ofBits <= MaxOff);
136144 +            if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) {
136145 +                U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed);
136146 +                offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
136147 +                BIT_reloadDStream(&seqState->DStream);
136148 +                if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
136149 +                assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32);   /* to avoid another reload */
136150 +            } else {
136151 +                offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/);   /* <=  (ZSTD_WINDOWLOG_MAX-1) bits */
136152 +                if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
136153 +            }
136154 +            seqState->prevOffset[2] = seqState->prevOffset[1];
136155 +            seqState->prevOffset[1] = seqState->prevOffset[0];
136156 +            seqState->prevOffset[0] = offset;
136157 +        } else {
136158 +            U32 const ll0 = (llBase == 0);
136159 +            if (LIKELY((ofBits == 0))) {
136160 +                if (LIKELY(!ll0))
136161 +                    offset = seqState->prevOffset[0];
136162 +                else {
136163 +                    offset = seqState->prevOffset[1];
136164 +                    seqState->prevOffset[1] = seqState->prevOffset[0];
136165 +                    seqState->prevOffset[0] = offset;
136166 +                }
136167 +            } else {
136168 +                offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1);
136169 +                {   size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
136170 +                    temp += !temp;   /* 0 is not valid; input is corrupted; force offset to 1 */
136171 +                    if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
136172 +                    seqState->prevOffset[1] = seqState->prevOffset[0];
136173 +                    seqState->prevOffset[0] = offset = temp;
136174 +        }   }   }
136175 +        seq.offset = offset;
136176 +    }
136178 +    seq.matchLength = mlBase;
136179 +    if (mlBits > 0)
136180 +        seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/);
136182 +    if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
136183 +        BIT_reloadDStream(&seqState->DStream);
136184 +    if (MEM_64bits() && UNLIKELY(totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
136185 +        BIT_reloadDStream(&seqState->DStream);
136186 +    /* Ensure there are enough bits to read the rest of data in 64-bit mode. */
136187 +    ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
136189 +    seq.litLength = llBase;
136190 +    if (llBits > 0)
136191 +        seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/);
136193 +    if (MEM_32bits())
136194 +        BIT_reloadDStream(&seqState->DStream);
136196 +    DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u",
136197 +                (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
136199 +    if (prefetch == ZSTD_p_prefetch) {
136200 +        size_t const pos = seqState->pos + seq.litLength;
136201 +        const BYTE* const matchBase = (seq.offset > pos) ? seqState->dictEnd : seqState->prefixStart;
136202 +        seq.match = matchBase + pos - seq.offset;  /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
136203 +                                                    * No consequence though : no memory access will occur, offset is only used for prefetching */
136204 +        seqState->pos = pos + seq.matchLength;
136205 +    }
136207 +    /* ANS state update
136208 +     * gcc-9.0.0 does 2.5% worse with ZSTD_updateFseStateWithDInfo().
136209 +     * clang-9.2.0 does 7% worse with ZSTD_updateFseState().
136210 +     * Naturally it seems like ZSTD_updateFseStateWithDInfo() should be the
136211 +     * better option, so it is the default for other compilers. But, if you
136212 +     * measure that it is worse, please put up a pull request.
136213 +     */
136214 +    {
136215 +#if !defined(__clang__)
136216 +        const int kUseUpdateFseState = 1;
136217 +#else
136218 +        const int kUseUpdateFseState = 0;
136219 +#endif
136220 +        if (kUseUpdateFseState) {
136221 +            ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream);    /* <=  9 bits */
136222 +            ZSTD_updateFseState(&seqState->stateML, &seqState->DStream);    /* <=  9 bits */
136223 +            if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);    /* <= 18 bits */
136224 +            ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream);  /* <=  8 bits */
136225 +        } else {
136226 +            ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llDInfo);    /* <=  9 bits */
136227 +            ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlDInfo);    /* <=  9 bits */
136228 +            if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);    /* <= 18 bits */
136229 +            ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofDInfo);  /* <=  8 bits */
136230 +        }
136231 +    }
136233 +    return seq;
136236 +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
136237 +MEM_STATIC int ZSTD_dictionaryIsActive(ZSTD_DCtx const* dctx, BYTE const* prefixStart, BYTE const* oLitEnd)
136239 +    size_t const windowSize = dctx->fParams.windowSize;
136240 +    /* No dictionary used. */
136241 +    if (dctx->dictContentEndForFuzzing == NULL) return 0;
136242 +    /* Dictionary is our prefix. */
136243 +    if (prefixStart == dctx->dictContentBeginForFuzzing) return 1;
136244 +    /* Dictionary is not our ext-dict. */
136245 +    if (dctx->dictEnd != dctx->dictContentEndForFuzzing) return 0;
136246 +    /* Dictionary is not within our window size. */
136247 +    if ((size_t)(oLitEnd - prefixStart) >= windowSize) return 0;
136248 +    /* Dictionary is active. */
136249 +    return 1;
136252 +MEM_STATIC void ZSTD_assertValidSequence(
136253 +        ZSTD_DCtx const* dctx,
136254 +        BYTE const* op, BYTE const* oend,
136255 +        seq_t const seq,
136256 +        BYTE const* prefixStart, BYTE const* virtualStart)
136258 +#if DEBUGLEVEL >= 1
136259 +    size_t const windowSize = dctx->fParams.windowSize;
136260 +    size_t const sequenceSize = seq.litLength + seq.matchLength;
136261 +    BYTE const* const oLitEnd = op + seq.litLength;
136262 +    DEBUGLOG(6, "Checking sequence: litL=%u matchL=%u offset=%u",
136263 +            (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
136264 +    assert(op <= oend);
136265 +    assert((size_t)(oend - op) >= sequenceSize);
136266 +    assert(sequenceSize <= ZSTD_BLOCKSIZE_MAX);
136267 +    if (ZSTD_dictionaryIsActive(dctx, prefixStart, oLitEnd)) {
136268 +        size_t const dictSize = (size_t)((char const*)dctx->dictContentEndForFuzzing - (char const*)dctx->dictContentBeginForFuzzing);
136269 +        /* Offset must be within the dictionary. */
136270 +        assert(seq.offset <= (size_t)(oLitEnd - virtualStart));
136271 +        assert(seq.offset <= windowSize + dictSize);
136272 +    } else {
136273 +        /* Offset must be within our window. */
136274 +        assert(seq.offset <= windowSize);
136275 +    }
136276 +#else
136277 +    (void)dctx, (void)op, (void)oend, (void)seq, (void)prefixStart, (void)virtualStart;
136278 +#endif
136280 +#endif
136282 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
136283 +FORCE_INLINE_TEMPLATE size_t
136284 +DONT_VECTORIZE
136285 +ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
136286 +                               void* dst, size_t maxDstSize,
136287 +                         const void* seqStart, size_t seqSize, int nbSeq,
136288 +                         const ZSTD_longOffset_e isLongOffset,
136289 +                         const int frame)
136291 +    const BYTE* ip = (const BYTE*)seqStart;
136292 +    const BYTE* const iend = ip + seqSize;
136293 +    BYTE* const ostart = (BYTE*)dst;
136294 +    BYTE* const oend = ostart + maxDstSize;
136295 +    BYTE* op = ostart;
136296 +    const BYTE* litPtr = dctx->litPtr;
136297 +    const BYTE* const litEnd = litPtr + dctx->litSize;
136298 +    const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
136299 +    const BYTE* const vBase = (const BYTE*) (dctx->virtualStart);
136300 +    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
136301 +    DEBUGLOG(5, "ZSTD_decompressSequences_body");
136302 +    (void)frame;
136304 +    /* Regen sequences */
136305 +    if (nbSeq) {
136306 +        seqState_t seqState;
136307 +        size_t error = 0;
136308 +        dctx->fseEntropy = 1;
136309 +        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
136310 +        RETURN_ERROR_IF(
136311 +            ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
136312 +            corruption_detected, "");
136313 +        ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
136314 +        ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
136315 +        ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
136316 +        assert(dst != NULL);
136318 +        ZSTD_STATIC_ASSERT(
136319 +                BIT_DStream_unfinished < BIT_DStream_completed &&
136320 +                BIT_DStream_endOfBuffer < BIT_DStream_completed &&
136321 +                BIT_DStream_completed < BIT_DStream_overflow);
136323 +#if defined(__x86_64__)
136324 +        /* Align the decompression loop to 32 + 16 bytes.
136325 +         *
136326 +         * zstd compiled with gcc-9 on an Intel i9-9900k shows 10% decompression
136327 +         * speed swings based on the alignment of the decompression loop. This
136328 +         * performance swing is caused by parts of the decompression loop falling
136329 +         * out of the DSB. The entire decompression loop should fit in the DSB,
136330 +         * when it can't we get much worse performance. You can measure if you've
136331 +         * hit the good case or the bad case with this perf command for some
136332 +         * compressed file test.zst:
136333 +         *
136334 +         *   perf stat -e cycles -e instructions -e idq.all_dsb_cycles_any_uops \
136335 +         *             -e idq.all_mite_cycles_any_uops -- ./zstd -tq test.zst
136336 +         *
136337 +         * If you see most cycles served out of the MITE you've hit the bad case.
136338 +         * If you see most cycles served out of the DSB you've hit the good case.
136339 +         * If it is pretty even then you may be in an okay case.
136340 +         *
136341 +         * I've been able to reproduce this issue on the following CPUs:
136342 +         *   - Kabylake: Macbook Pro (15-inch, 2019) 2.4 GHz Intel Core i9
136343 +         *               Use Instruments->Counters to get DSB/MITE cycles.
136344 +         *               I never got performance swings, but I was able to
136345 +         *               go from the good case of mostly DSB to half of the
136346 +         *               cycles served from MITE.
136347 +         *   - Coffeelake: Intel i9-9900k
136348 +         *
136349 +         * I haven't been able to reproduce the instability or DSB misses on any
136350 +         * of the following CPUS:
136351 +         *   - Haswell
136352 +         *   - Broadwell: Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GH
136353 +         *   - Skylake
136354 +         *
136355 +         * If you are seeing performance stability this script can help test.
136356 +         * It tests on 4 commits in zstd where I saw performance change.
136357 +         *
136358 +         *   https://gist.github.com/terrelln/9889fc06a423fd5ca6e99351564473f4
136359 +         */
136360 +        __asm__(".p2align 5");
136361 +        __asm__("nop");
136362 +        __asm__(".p2align 4");
136363 +#endif
136364 +        for ( ; ; ) {
136365 +            seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_noPrefetch);
136366 +            size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd);
136367 +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
136368 +            assert(!ZSTD_isError(oneSeqSize));
136369 +            if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
136370 +#endif
136371 +            DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
136372 +            BIT_reloadDStream(&(seqState.DStream));
136373 +            op += oneSeqSize;
136374 +            /* gcc and clang both don't like early returns in this loop.
136375 +             * Instead break and check for an error at the end of the loop.
136376 +             */
136377 +            if (UNLIKELY(ZSTD_isError(oneSeqSize))) {
136378 +                error = oneSeqSize;
136379 +                break;
136380 +            }
136381 +            if (UNLIKELY(!--nbSeq)) break;
136382 +        }
136384 +        /* check if reached exact end */
136385 +        DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq);
136386 +        if (ZSTD_isError(error)) return error;
136387 +        RETURN_ERROR_IF(nbSeq, corruption_detected, "");
136388 +        RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected, "");
136389 +        /* save reps for next block */
136390 +        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
136391 +    }
136393 +    /* last literal segment */
136394 +    {   size_t const lastLLSize = litEnd - litPtr;
136395 +        RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
136396 +        if (op != NULL) {
136397 +            ZSTD_memcpy(op, litPtr, lastLLSize);
136398 +            op += lastLLSize;
136399 +        }
136400 +    }
136402 +    return op-ostart;
136405 +static size_t
136406 +ZSTD_decompressSequences_default(ZSTD_DCtx* dctx,
136407 +                                 void* dst, size_t maxDstSize,
136408 +                           const void* seqStart, size_t seqSize, int nbSeq,
136409 +                           const ZSTD_longOffset_e isLongOffset,
136410 +                           const int frame)
136412 +    return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
136414 +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
136416 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
136417 +FORCE_INLINE_TEMPLATE size_t
136418 +ZSTD_decompressSequencesLong_body(
136419 +                               ZSTD_DCtx* dctx,
136420 +                               void* dst, size_t maxDstSize,
136421 +                         const void* seqStart, size_t seqSize, int nbSeq,
136422 +                         const ZSTD_longOffset_e isLongOffset,
136423 +                         const int frame)
136425 +    const BYTE* ip = (const BYTE*)seqStart;
136426 +    const BYTE* const iend = ip + seqSize;
136427 +    BYTE* const ostart = (BYTE*)dst;
136428 +    BYTE* const oend = ostart + maxDstSize;
136429 +    BYTE* op = ostart;
136430 +    const BYTE* litPtr = dctx->litPtr;
136431 +    const BYTE* const litEnd = litPtr + dctx->litSize;
136432 +    const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart);
136433 +    const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart);
136434 +    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
136435 +    (void)frame;
136437 +    /* Regen sequences */
136438 +    if (nbSeq) {
136439 +#define STORED_SEQS 4
136440 +#define STORED_SEQS_MASK (STORED_SEQS-1)
136441 +#define ADVANCED_SEQS 4
136442 +        seq_t sequences[STORED_SEQS];
136443 +        int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS);
136444 +        seqState_t seqState;
136445 +        int seqNb;
136446 +        dctx->fseEntropy = 1;
136447 +        { int i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
136448 +        seqState.prefixStart = prefixStart;
136449 +        seqState.pos = (size_t)(op-prefixStart);
136450 +        seqState.dictEnd = dictEnd;
136451 +        assert(dst != NULL);
136452 +        assert(iend >= ip);
136453 +        RETURN_ERROR_IF(
136454 +            ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)),
136455 +            corruption_detected, "");
136456 +        ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
136457 +        ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
136458 +        ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
136460 +        /* prepare in advance */
136461 +        for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && (seqNb<seqAdvance); seqNb++) {
136462 +            sequences[seqNb] = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_prefetch);
136463 +            PREFETCH_L1(sequences[seqNb].match); PREFETCH_L1(sequences[seqNb].match + sequences[seqNb].matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
136464 +        }
136465 +        RETURN_ERROR_IF(seqNb<seqAdvance, corruption_detected, "");
136467 +        /* decode and decompress */
136468 +        for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb<nbSeq) ; seqNb++) {
136469 +            seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_prefetch);
136470 +            size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
136471 +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
136472 +            assert(!ZSTD_isError(oneSeqSize));
136473 +            if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart);
136474 +#endif
136475 +            if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
136476 +            PREFETCH_L1(sequence.match); PREFETCH_L1(sequence.match + sequence.matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
136477 +            sequences[seqNb & STORED_SEQS_MASK] = sequence;
136478 +            op += oneSeqSize;
136479 +        }
136480 +        RETURN_ERROR_IF(seqNb<nbSeq, corruption_detected, "");
136482 +        /* finish queue */
136483 +        seqNb -= seqAdvance;
136484 +        for ( ; seqNb<nbSeq ; seqNb++) {
136485 +            size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[seqNb&STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
136486 +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
136487 +            assert(!ZSTD_isError(oneSeqSize));
136488 +            if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart);
136489 +#endif
136490 +            if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
136491 +            op += oneSeqSize;
136492 +        }
136494 +        /* save reps for next block */
136495 +        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
136496 +    }
136498 +    /* last literal segment */
136499 +    {   size_t const lastLLSize = litEnd - litPtr;
136500 +        RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
136501 +        if (op != NULL) {
136502 +            ZSTD_memcpy(op, litPtr, lastLLSize);
136503 +            op += lastLLSize;
136504 +        }
136505 +    }
136507 +    return op-ostart;
136510 +static size_t
136511 +ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx,
136512 +                                 void* dst, size_t maxDstSize,
136513 +                           const void* seqStart, size_t seqSize, int nbSeq,
136514 +                           const ZSTD_longOffset_e isLongOffset,
136515 +                           const int frame)
136517 +    return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
136519 +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
136523 +#if DYNAMIC_BMI2
136525 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
136526 +static TARGET_ATTRIBUTE("bmi2") size_t
136527 +DONT_VECTORIZE
136528 +ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx,
136529 +                                 void* dst, size_t maxDstSize,
136530 +                           const void* seqStart, size_t seqSize, int nbSeq,
136531 +                           const ZSTD_longOffset_e isLongOffset,
136532 +                           const int frame)
136534 +    return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
136536 +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
136538 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
136539 +static TARGET_ATTRIBUTE("bmi2") size_t
136540 +ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx,
136541 +                                 void* dst, size_t maxDstSize,
136542 +                           const void* seqStart, size_t seqSize, int nbSeq,
136543 +                           const ZSTD_longOffset_e isLongOffset,
136544 +                           const int frame)
136546 +    return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
136548 +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
136550 +#endif /* DYNAMIC_BMI2 */
136552 +typedef size_t (*ZSTD_decompressSequences_t)(
136553 +                            ZSTD_DCtx* dctx,
136554 +                            void* dst, size_t maxDstSize,
136555 +                            const void* seqStart, size_t seqSize, int nbSeq,
136556 +                            const ZSTD_longOffset_e isLongOffset,
136557 +                            const int frame);
136559 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
136560 +static size_t
136561 +ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
136562 +                   const void* seqStart, size_t seqSize, int nbSeq,
136563 +                   const ZSTD_longOffset_e isLongOffset,
136564 +                   const int frame)
136566 +    DEBUGLOG(5, "ZSTD_decompressSequences");
136567 +#if DYNAMIC_BMI2
136568 +    if (dctx->bmi2) {
136569 +        return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
136570 +    }
136571 +#endif
136572 +  return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
136574 +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */
136577 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
136578 +/* ZSTD_decompressSequencesLong() :
136579 + * decompression function triggered when a minimum share of offsets is considered "long",
136580 + * aka out of cache.
136581 + * note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes meaning "farther than memory cache distance".
136582 + * This function will try to mitigate main memory latency through the use of prefetching */
136583 +static size_t
136584 +ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx,
136585 +                             void* dst, size_t maxDstSize,
136586 +                             const void* seqStart, size_t seqSize, int nbSeq,
136587 +                             const ZSTD_longOffset_e isLongOffset,
136588 +                             const int frame)
136590 +    DEBUGLOG(5, "ZSTD_decompressSequencesLong");
136591 +#if DYNAMIC_BMI2
136592 +    if (dctx->bmi2) {
136593 +        return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
136594 +    }
136595 +#endif
136596 +  return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
136598 +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */
136602 +#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
136603 +    !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
136604 +/* ZSTD_getLongOffsetsShare() :
136605 + * condition : offTable must be valid
136606 + * @return : "share" of long offsets (arbitrarily defined as > (1<<23))
136607 + *           compared to maximum possible of (1<<OffFSELog) */
136608 +static unsigned
136609 +ZSTD_getLongOffsetsShare(const ZSTD_seqSymbol* offTable)
136611 +    const void* ptr = offTable;
136612 +    U32 const tableLog = ((const ZSTD_seqSymbol_header*)ptr)[0].tableLog;
136613 +    const ZSTD_seqSymbol* table = offTable + 1;
136614 +    U32 const max = 1 << tableLog;
136615 +    U32 u, total = 0;
136616 +    DEBUGLOG(5, "ZSTD_getLongOffsetsShare: (tableLog=%u)", tableLog);
136618 +    assert(max <= (1 << OffFSELog));  /* max not too large */
136619 +    for (u=0; u<max; u++) {
136620 +        if (table[u].nbAdditionalBits > 22) total += 1;
136621 +    }
136623 +    assert(tableLog <= OffFSELog);
136624 +    total <<= (OffFSELog - tableLog);  /* scale to OffFSELog */
136626 +    return total;
136628 +#endif
136630 +size_t
136631 +ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
136632 +                              void* dst, size_t dstCapacity,
136633 +                        const void* src, size_t srcSize, const int frame)
136634 +{   /* blockType == blockCompressed */
136635 +    const BYTE* ip = (const BYTE*)src;
136636 +    /* isLongOffset must be true if there are long offsets.
136637 +     * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN.
136638 +     * We don't expect that to be the case in 64-bit mode.
136639 +     * In block mode, window size is not known, so we have to be conservative.
136640 +     * (note: but it could be evaluated from current-lowLimit)
136641 +     */
136642 +    ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN))));
136643 +    DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
136645 +    RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong, "");
136647 +    /* Decode literals section */
136648 +    {   size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
136649 +        DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize);
136650 +        if (ZSTD_isError(litCSize)) return litCSize;
136651 +        ip += litCSize;
136652 +        srcSize -= litCSize;
136653 +    }
136655 +    /* Build Decoding Tables */
136656 +    {
136657 +        /* These macros control at build-time which decompressor implementation
136658 +         * we use. If neither is defined, we do some inspection and dispatch at
136659 +         * runtime.
136660 +         */
136661 +#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
136662 +    !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
136663 +        int usePrefetchDecoder = dctx->ddictIsCold;
136664 +#endif
136665 +        int nbSeq;
136666 +        size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize);
136667 +        if (ZSTD_isError(seqHSize)) return seqHSize;
136668 +        ip += seqHSize;
136669 +        srcSize -= seqHSize;
136671 +        RETURN_ERROR_IF(dst == NULL && nbSeq > 0, dstSize_tooSmall, "NULL not handled");
136673 +#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
136674 +    !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
136675 +        if ( !usePrefetchDecoder
136676 +          && (!frame || (dctx->fParams.windowSize > (1<<24)))
136677 +          && (nbSeq>ADVANCED_SEQS) ) {  /* could probably use a larger nbSeq limit */
136678 +            U32 const shareLongOffsets = ZSTD_getLongOffsetsShare(dctx->OFTptr);
136679 +            U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */
136680 +            usePrefetchDecoder = (shareLongOffsets >= minShare);
136681 +        }
136682 +#endif
136684 +        dctx->ddictIsCold = 0;
136686 +#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
136687 +    !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
136688 +        if (usePrefetchDecoder)
136689 +#endif
136690 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
136691 +            return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
136692 +#endif
136694 +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG
136695 +        /* else */
136696 +        return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
136697 +#endif
136698 +    }
136702 +void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize)
136704 +    if (dst != dctx->previousDstEnd && dstSize > 0) {   /* not contiguous */
136705 +        dctx->dictEnd = dctx->previousDstEnd;
136706 +        dctx->virtualStart = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart));
136707 +        dctx->prefixStart = dst;
136708 +        dctx->previousDstEnd = dst;
136709 +    }
136713 +size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx,
136714 +                            void* dst, size_t dstCapacity,
136715 +                      const void* src, size_t srcSize)
136717 +    size_t dSize;
136718 +    ZSTD_checkContinuity(dctx, dst, dstCapacity);
136719 +    dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0);
136720 +    dctx->previousDstEnd = (char*)dst + dSize;
136721 +    return dSize;
136723 diff --git a/lib/zstd/decompress/zstd_decompress_block.h b/lib/zstd/decompress/zstd_decompress_block.h
136724 new file mode 100644
136725 index 000000000000..e7f5f6689459
136726 --- /dev/null
136727 +++ b/lib/zstd/decompress/zstd_decompress_block.h
136728 @@ -0,0 +1,62 @@
136730 + * Copyright (c) Yann Collet, Facebook, Inc.
136731 + * All rights reserved.
136733 + * This source code is licensed under both the BSD-style license (found in the
136734 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
136735 + * in the COPYING file in the root directory of this source tree).
136736 + * You may select, at your option, one of the above-listed licenses.
136737 + */
136740 +#ifndef ZSTD_DEC_BLOCK_H
136741 +#define ZSTD_DEC_BLOCK_H
136743 +/*-*******************************************************
136744 + *  Dependencies
136745 + *********************************************************/
136746 +#include "../common/zstd_deps.h"   /* size_t */
136747 +#include <linux/zstd.h>    /* DCtx, and some public functions */
136748 +#include "../common/zstd_internal.h"  /* blockProperties_t, and some public functions */
136749 +#include "zstd_decompress_internal.h"  /* ZSTD_seqSymbol */
136752 +/* ===   Prototypes   === */
136754 +/* note: prototypes already published within `zstd.h` :
136755 + * ZSTD_decompressBlock()
136756 + */
136758 +/* note: prototypes already published within `zstd_internal.h` :
136759 + * ZSTD_getcBlockSize()
136760 + * ZSTD_decodeSeqHeaders()
136761 + */
136764 +/* ZSTD_decompressBlock_internal() :
136765 + * decompress block, starting at `src`,
136766 + * into destination buffer `dst`.
136767 + * @return : decompressed block size,
136768 + *           or an error code (which can be tested using ZSTD_isError())
136769 + */
136770 +size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
136771 +                               void* dst, size_t dstCapacity,
136772 +                         const void* src, size_t srcSize, const int frame);
136774 +/* ZSTD_buildFSETable() :
136775 + * generate FSE decoding table for one symbol (ll, ml or off)
136776 + * this function must be called with valid parameters only
136777 + * (dt is large enough, normalizedCounter distribution total is a power of 2, max is within range, etc.)
136778 + * in which case it cannot fail.
136779 + * The workspace must be 4-byte aligned and at least ZSTD_BUILD_FSE_TABLE_WKSP_SIZE bytes, which is
136780 + * defined in zstd_decompress_internal.h.
136781 + * Internal use only.
136782 + */
136783 +void ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
136784 +             const short* normalizedCounter, unsigned maxSymbolValue,
136785 +             const U32* baseValue, const U32* nbAdditionalBits,
136786 +                   unsigned tableLog, void* wksp, size_t wkspSize,
136787 +                   int bmi2);
136790 +#endif /* ZSTD_DEC_BLOCK_H */
136791 diff --git a/lib/zstd/decompress/zstd_decompress_internal.h b/lib/zstd/decompress/zstd_decompress_internal.h
136792 new file mode 100644
136793 index 000000000000..4b9052f68755
136794 --- /dev/null
136795 +++ b/lib/zstd/decompress/zstd_decompress_internal.h
136796 @@ -0,0 +1,202 @@
136798 + * Copyright (c) Yann Collet, Facebook, Inc.
136799 + * All rights reserved.
136801 + * This source code is licensed under both the BSD-style license (found in the
136802 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
136803 + * in the COPYING file in the root directory of this source tree).
136804 + * You may select, at your option, one of the above-listed licenses.
136805 + */
136808 +/* zstd_decompress_internal:
136809 + * objects and definitions shared within lib/decompress modules */
136811 + #ifndef ZSTD_DECOMPRESS_INTERNAL_H
136812 + #define ZSTD_DECOMPRESS_INTERNAL_H
136815 +/*-*******************************************************
136816 + *  Dependencies
136817 + *********************************************************/
136818 +#include "../common/mem.h"             /* BYTE, U16, U32 */
136819 +#include "../common/zstd_internal.h"   /* ZSTD_seqSymbol */
136823 +/*-*******************************************************
136824 + *  Constants
136825 + *********************************************************/
136826 +static UNUSED_ATTR const U32 LL_base[MaxLL+1] = {
136827 +                 0,    1,    2,     3,     4,     5,     6,      7,
136828 +                 8,    9,   10,    11,    12,    13,    14,     15,
136829 +                16,   18,   20,    22,    24,    28,    32,     40,
136830 +                48,   64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
136831 +                0x2000, 0x4000, 0x8000, 0x10000 };
136833 +static UNUSED_ATTR const U32 OF_base[MaxOff+1] = {
136834 +                 0,        1,       1,       5,     0xD,     0x1D,     0x3D,     0x7D,
136835 +                 0xFD,   0x1FD,   0x3FD,   0x7FD,   0xFFD,   0x1FFD,   0x3FFD,   0x7FFD,
136836 +                 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
136837 +                 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD };
136839 +static UNUSED_ATTR const U32 OF_bits[MaxOff+1] = {
136840 +                     0,  1,  2,  3,  4,  5,  6,  7,
136841 +                     8,  9, 10, 11, 12, 13, 14, 15,
136842 +                    16, 17, 18, 19, 20, 21, 22, 23,
136843 +                    24, 25, 26, 27, 28, 29, 30, 31 };
136845 +static UNUSED_ATTR const U32 ML_base[MaxML+1] = {
136846 +                     3,  4,  5,    6,     7,     8,     9,    10,
136847 +                    11, 12, 13,   14,    15,    16,    17,    18,
136848 +                    19, 20, 21,   22,    23,    24,    25,    26,
136849 +                    27, 28, 29,   30,    31,    32,    33,    34,
136850 +                    35, 37, 39,   41,    43,    47,    51,    59,
136851 +                    67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,
136852 +                    0x1003, 0x2003, 0x4003, 0x8003, 0x10003 };
136855 +/*-*******************************************************
136856 + *  Decompression types
136857 + *********************************************************/
136858 + typedef struct {
136859 +     U32 fastMode;
136860 +     U32 tableLog;
136861 + } ZSTD_seqSymbol_header;
136863 + typedef struct {
136864 +     U16  nextState;
136865 +     BYTE nbAdditionalBits;
136866 +     BYTE nbBits;
136867 +     U32  baseValue;
136868 + } ZSTD_seqSymbol;
136870 + #define SEQSYMBOL_TABLE_SIZE(log)   (1 + (1 << (log)))
136872 +#define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE (sizeof(S16) * (MaxSeq + 1) + (1u << MaxFSELog) + sizeof(U64))
136873 +#define ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32 ((ZSTD_BUILD_FSE_TABLE_WKSP_SIZE + sizeof(U32) - 1) / sizeof(U32))
136875 +typedef struct {
136876 +    ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)];    /* Note : Space reserved for FSE Tables */
136877 +    ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)];   /* is also used as temporary workspace while building hufTable during DDict creation */
136878 +    ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)];    /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */
136879 +    HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)];  /* can accommodate HUF_decompress4X */
136880 +    U32 rep[ZSTD_REP_NUM];
136881 +    U32 workspace[ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32];
136882 +} ZSTD_entropyDTables_t;
136884 +typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
136885 +               ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock,
136886 +               ZSTDds_decompressLastBlock, ZSTDds_checkChecksum,
136887 +               ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTD_dStage;
136889 +typedef enum { zdss_init=0, zdss_loadHeader,
136890 +               zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage;
136892 +typedef enum {
136893 +    ZSTD_use_indefinitely = -1,  /* Use the dictionary indefinitely */
136894 +    ZSTD_dont_use = 0,           /* Do not use the dictionary (if one exists free it) */
136895 +    ZSTD_use_once = 1            /* Use the dictionary once and set to ZSTD_dont_use */
136896 +} ZSTD_dictUses_e;
136898 +/* Hashset for storing references to multiple ZSTD_DDict within ZSTD_DCtx */
136899 +typedef struct {
136900 +    const ZSTD_DDict** ddictPtrTable;
136901 +    size_t ddictPtrTableSize;
136902 +    size_t ddictPtrCount;
136903 +} ZSTD_DDictHashSet;
136905 +struct ZSTD_DCtx_s
136907 +    const ZSTD_seqSymbol* LLTptr;
136908 +    const ZSTD_seqSymbol* MLTptr;
136909 +    const ZSTD_seqSymbol* OFTptr;
136910 +    const HUF_DTable* HUFptr;
136911 +    ZSTD_entropyDTables_t entropy;
136912 +    U32 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];   /* space needed when building huffman tables */
136913 +    const void* previousDstEnd;   /* detect continuity */
136914 +    const void* prefixStart;      /* start of current segment */
136915 +    const void* virtualStart;     /* virtual start of previous segment if it was just before current one */
136916 +    const void* dictEnd;          /* end of previous segment */
136917 +    size_t expected;
136918 +    ZSTD_frameHeader fParams;
136919 +    U64 processedCSize;
136920 +    U64 decodedSize;
136921 +    blockType_e bType;            /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */
136922 +    ZSTD_dStage stage;
136923 +    U32 litEntropy;
136924 +    U32 fseEntropy;
136925 +    struct xxh64_state xxhState;
136926 +    size_t headerSize;
136927 +    ZSTD_format_e format;
136928 +    ZSTD_forceIgnoreChecksum_e forceIgnoreChecksum;   /* User specified: if == 1, will ignore checksums in compressed frame. Default == 0 */
136929 +    U32 validateChecksum;         /* if == 1, will validate checksum. Is == 1 if (fParams.checksumFlag == 1) and (forceIgnoreChecksum == 0). */
136930 +    const BYTE* litPtr;
136931 +    ZSTD_customMem customMem;
136932 +    size_t litSize;
136933 +    size_t rleSize;
136934 +    size_t staticSize;
136935 +    int bmi2;                     /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
136937 +    /* dictionary */
136938 +    ZSTD_DDict* ddictLocal;
136939 +    const ZSTD_DDict* ddict;     /* set by ZSTD_initDStream_usingDDict(), or ZSTD_DCtx_refDDict() */
136940 +    U32 dictID;
136941 +    int ddictIsCold;             /* if == 1 : dictionary is "new" for working context, and presumed "cold" (not in cpu cache) */
136942 +    ZSTD_dictUses_e dictUses;
136943 +    ZSTD_DDictHashSet* ddictSet;                    /* Hash set for multiple ddicts */
136944 +    ZSTD_refMultipleDDicts_e refMultipleDDicts;     /* User specified: if == 1, will allow references to multiple DDicts. Default == 0 (disabled) */
136946 +    /* streaming */
136947 +    ZSTD_dStreamStage streamStage;
136948 +    char*  inBuff;
136949 +    size_t inBuffSize;
136950 +    size_t inPos;
136951 +    size_t maxWindowSize;
136952 +    char*  outBuff;
136953 +    size_t outBuffSize;
136954 +    size_t outStart;
136955 +    size_t outEnd;
136956 +    size_t lhSize;
136957 +    void* legacyContext;
136958 +    U32 previousLegacyVersion;
136959 +    U32 legacyVersion;
136960 +    U32 hostageByte;
136961 +    int noForwardProgress;
136962 +    ZSTD_bufferMode_e outBufferMode;
136963 +    ZSTD_outBuffer expectedOutBuffer;
136965 +    /* workspace */
136966 +    BYTE litBuffer[ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH];
136967 +    BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];
136969 +    size_t oversizedDuration;
136971 +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
136972 +    void const* dictContentBeginForFuzzing;
136973 +    void const* dictContentEndForFuzzing;
136974 +#endif
136976 +    /* Tracing */
136977 +};  /* typedef'd to ZSTD_DCtx within "zstd.h" */
136980 +/*-*******************************************************
136981 + *  Shared internal functions
136982 + *********************************************************/
136984 +/*! ZSTD_loadDEntropy() :
136985 + *  dict : must point at beginning of a valid zstd dictionary.
136986 + * @return : size of dictionary header (size of magic number + dict ID + entropy tables) */
136987 +size_t ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy,
136988 +                   const void* const dict, size_t const dictSize);
136990 +/*! ZSTD_checkContinuity() :
136991 + *  check if next `dst` follows previous position, where decompression ended.
136992 + *  If yes, do nothing (continue on current segment).
136993 + *  If not, classify previous segment as "external dictionary", and start a new segment.
136994 + *  This function cannot fail. */
136995 +void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize);
136998 +#endif /* ZSTD_DECOMPRESS_INTERNAL_H */
136999 diff --git a/lib/zstd/decompress_sources.h b/lib/zstd/decompress_sources.h
137000 new file mode 100644
137001 index 000000000000..f35bef03eb22
137002 --- /dev/null
137003 +++ b/lib/zstd/decompress_sources.h
137004 @@ -0,0 +1,28 @@
137005 +/* SPDX-License-Identifier: GPL-2.0-only */
137007 + * Copyright (c) Facebook, Inc.
137008 + * All rights reserved.
137010 + * This source code is licensed under both the BSD-style license (found in the
137011 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
137012 + * in the COPYING file in the root directory of this source tree).
137013 + * You may select, at your option, one of the above-listed licenses.
137014 + */
137017 + * This file includes every .c file needed for decompression.
137018 + * It is used by lib/decompress_unzstd.c to include the decompression
137019 + * source into the translation-unit, so it can be used for kernel
137020 + * decompression.
137021 + */
137023 +#include "common/debug.c"
137024 +#include "common/entropy_common.c"
137025 +#include "common/error_private.c"
137026 +#include "common/fse_decompress.c"
137027 +#include "common/zstd_common.c"
137028 +#include "decompress/huf_decompress.c"
137029 +#include "decompress/zstd_ddict.c"
137030 +#include "decompress/zstd_decompress.c"
137031 +#include "decompress/zstd_decompress_block.c"
137032 +#include "zstd_decompress_module.c"
137033 diff --git a/lib/zstd/entropy_common.c b/lib/zstd/entropy_common.c
137034 deleted file mode 100644
137035 index 2b0a643c32c4..000000000000
137036 --- a/lib/zstd/entropy_common.c
137037 +++ /dev/null
137038 @@ -1,243 +0,0 @@
137040 - * Common functions of New Generation Entropy library
137041 - * Copyright (C) 2016, Yann Collet.
137043 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
137045 - * Redistribution and use in source and binary forms, with or without
137046 - * modification, are permitted provided that the following conditions are
137047 - * met:
137049 - *   * Redistributions of source code must retain the above copyright
137050 - * notice, this list of conditions and the following disclaimer.
137051 - *   * Redistributions in binary form must reproduce the above
137052 - * copyright notice, this list of conditions and the following disclaimer
137053 - * in the documentation and/or other materials provided with the
137054 - * distribution.
137056 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
137057 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
137058 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
137059 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
137060 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
137061 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
137062 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
137063 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
137064 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
137065 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
137066 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
137068 - * This program is free software; you can redistribute it and/or modify it under
137069 - * the terms of the GNU General Public License version 2 as published by the
137070 - * Free Software Foundation. This program is dual-licensed; you may select
137071 - * either version 2 of the GNU General Public License ("GPL") or BSD license
137072 - * ("BSD").
137074 - * You can contact the author at :
137075 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
137076 - */
137078 -/* *************************************
137079 -*  Dependencies
137080 -***************************************/
137081 -#include "error_private.h" /* ERR_*, ERROR */
137082 -#include "fse.h"
137083 -#include "huf.h"
137084 -#include "mem.h"
137086 -/*===   Version   ===*/
137087 -unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
137089 -/*===   Error Management   ===*/
137090 -unsigned FSE_isError(size_t code) { return ERR_isError(code); }
137092 -unsigned HUF_isError(size_t code) { return ERR_isError(code); }
137094 -/*-**************************************************************
137095 -*  FSE NCount encoding-decoding
137096 -****************************************************************/
137097 -size_t FSE_readNCount(short *normalizedCounter, unsigned *maxSVPtr, unsigned *tableLogPtr, const void *headerBuffer, size_t hbSize)
137099 -       const BYTE *const istart = (const BYTE *)headerBuffer;
137100 -       const BYTE *const iend = istart + hbSize;
137101 -       const BYTE *ip = istart;
137102 -       int nbBits;
137103 -       int remaining;
137104 -       int threshold;
137105 -       U32 bitStream;
137106 -       int bitCount;
137107 -       unsigned charnum = 0;
137108 -       int previous0 = 0;
137110 -       if (hbSize < 4)
137111 -               return ERROR(srcSize_wrong);
137112 -       bitStream = ZSTD_readLE32(ip);
137113 -       nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
137114 -       if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX)
137115 -               return ERROR(tableLog_tooLarge);
137116 -       bitStream >>= 4;
137117 -       bitCount = 4;
137118 -       *tableLogPtr = nbBits;
137119 -       remaining = (1 << nbBits) + 1;
137120 -       threshold = 1 << nbBits;
137121 -       nbBits++;
137123 -       while ((remaining > 1) & (charnum <= *maxSVPtr)) {
137124 -               if (previous0) {
137125 -                       unsigned n0 = charnum;
137126 -                       while ((bitStream & 0xFFFF) == 0xFFFF) {
137127 -                               n0 += 24;
137128 -                               if (ip < iend - 5) {
137129 -                                       ip += 2;
137130 -                                       bitStream = ZSTD_readLE32(ip) >> bitCount;
137131 -                               } else {
137132 -                                       bitStream >>= 16;
137133 -                                       bitCount += 16;
137134 -                               }
137135 -                       }
137136 -                       while ((bitStream & 3) == 3) {
137137 -                               n0 += 3;
137138 -                               bitStream >>= 2;
137139 -                               bitCount += 2;
137140 -                       }
137141 -                       n0 += bitStream & 3;
137142 -                       bitCount += 2;
137143 -                       if (n0 > *maxSVPtr)
137144 -                               return ERROR(maxSymbolValue_tooSmall);
137145 -                       while (charnum < n0)
137146 -                               normalizedCounter[charnum++] = 0;
137147 -                       if ((ip <= iend - 7) || (ip + (bitCount >> 3) <= iend - 4)) {
137148 -                               ip += bitCount >> 3;
137149 -                               bitCount &= 7;
137150 -                               bitStream = ZSTD_readLE32(ip) >> bitCount;
137151 -                       } else {
137152 -                               bitStream >>= 2;
137153 -                       }
137154 -               }
137155 -               {
137156 -                       int const max = (2 * threshold - 1) - remaining;
137157 -                       int count;
137159 -                       if ((bitStream & (threshold - 1)) < (U32)max) {
137160 -                               count = bitStream & (threshold - 1);
137161 -                               bitCount += nbBits - 1;
137162 -                       } else {
137163 -                               count = bitStream & (2 * threshold - 1);
137164 -                               if (count >= threshold)
137165 -                                       count -= max;
137166 -                               bitCount += nbBits;
137167 -                       }
137169 -                       count--;                                 /* extra accuracy */
137170 -                       remaining -= count < 0 ? -count : count; /* -1 means +1 */
137171 -                       normalizedCounter[charnum++] = (short)count;
137172 -                       previous0 = !count;
137173 -                       while (remaining < threshold) {
137174 -                               nbBits--;
137175 -                               threshold >>= 1;
137176 -                       }
137178 -                       if ((ip <= iend - 7) || (ip + (bitCount >> 3) <= iend - 4)) {
137179 -                               ip += bitCount >> 3;
137180 -                               bitCount &= 7;
137181 -                       } else {
137182 -                               bitCount -= (int)(8 * (iend - 4 - ip));
137183 -                               ip = iend - 4;
137184 -                       }
137185 -                       bitStream = ZSTD_readLE32(ip) >> (bitCount & 31);
137186 -               }
137187 -       } /* while ((remaining>1) & (charnum<=*maxSVPtr)) */
137188 -       if (remaining != 1)
137189 -               return ERROR(corruption_detected);
137190 -       if (bitCount > 32)
137191 -               return ERROR(corruption_detected);
137192 -       *maxSVPtr = charnum - 1;
137194 -       ip += (bitCount + 7) >> 3;
137195 -       return ip - istart;
137198 -/*! HUF_readStats() :
137199 -       Read compact Huffman tree, saved by HUF_writeCTable().
137200 -       `huffWeight` is destination buffer.
137201 -       `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
137202 -       @return : size read from `src` , or an error Code .
137203 -       Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
137205 -size_t HUF_readStats_wksp(BYTE *huffWeight, size_t hwSize, U32 *rankStats, U32 *nbSymbolsPtr, U32 *tableLogPtr, const void *src, size_t srcSize, void *workspace, size_t workspaceSize)
137207 -       U32 weightTotal;
137208 -       const BYTE *ip = (const BYTE *)src;
137209 -       size_t iSize;
137210 -       size_t oSize;
137212 -       if (!srcSize)
137213 -               return ERROR(srcSize_wrong);
137214 -       iSize = ip[0];
137215 -       /* memset(huffWeight, 0, hwSize);   */ /* is not necessary, even though some analyzer complain ... */
137217 -       if (iSize >= 128) { /* special header */
137218 -               oSize = iSize - 127;
137219 -               iSize = ((oSize + 1) / 2);
137220 -               if (iSize + 1 > srcSize)
137221 -                       return ERROR(srcSize_wrong);
137222 -               if (oSize >= hwSize)
137223 -                       return ERROR(corruption_detected);
137224 -               ip += 1;
137225 -               {
137226 -                       U32 n;
137227 -                       for (n = 0; n < oSize; n += 2) {
137228 -                               huffWeight[n] = ip[n / 2] >> 4;
137229 -                               huffWeight[n + 1] = ip[n / 2] & 15;
137230 -                       }
137231 -               }
137232 -       } else {                                                 /* header compressed with FSE (normal case) */
137233 -               if (iSize + 1 > srcSize)
137234 -                       return ERROR(srcSize_wrong);
137235 -               oSize = FSE_decompress_wksp(huffWeight, hwSize - 1, ip + 1, iSize, 6, workspace, workspaceSize); /* max (hwSize-1) values decoded, as last one is implied */
137236 -               if (FSE_isError(oSize))
137237 -                       return oSize;
137238 -       }
137240 -       /* collect weight stats */
137241 -       memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
137242 -       weightTotal = 0;
137243 -       {
137244 -               U32 n;
137245 -               for (n = 0; n < oSize; n++) {
137246 -                       if (huffWeight[n] >= HUF_TABLELOG_MAX)
137247 -                               return ERROR(corruption_detected);
137248 -                       rankStats[huffWeight[n]]++;
137249 -                       weightTotal += (1 << huffWeight[n]) >> 1;
137250 -               }
137251 -       }
137252 -       if (weightTotal == 0)
137253 -               return ERROR(corruption_detected);
137255 -       /* get last non-null symbol weight (implied, total must be 2^n) */
137256 -       {
137257 -               U32 const tableLog = BIT_highbit32(weightTotal) + 1;
137258 -               if (tableLog > HUF_TABLELOG_MAX)
137259 -                       return ERROR(corruption_detected);
137260 -               *tableLogPtr = tableLog;
137261 -               /* determine last weight */
137262 -               {
137263 -                       U32 const total = 1 << tableLog;
137264 -                       U32 const rest = total - weightTotal;
137265 -                       U32 const verif = 1 << BIT_highbit32(rest);
137266 -                       U32 const lastWeight = BIT_highbit32(rest) + 1;
137267 -                       if (verif != rest)
137268 -                               return ERROR(corruption_detected); /* last value must be a clean power of 2 */
137269 -                       huffWeight[oSize] = (BYTE)lastWeight;
137270 -                       rankStats[lastWeight]++;
137271 -               }
137272 -       }
137274 -       /* check tree construction validity */
137275 -       if ((rankStats[1] < 2) || (rankStats[1] & 1))
137276 -               return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
137278 -       /* results */
137279 -       *nbSymbolsPtr = (U32)(oSize + 1);
137280 -       return iSize + 1;
137282 diff --git a/lib/zstd/error_private.h b/lib/zstd/error_private.h
137283 deleted file mode 100644
137284 index 1a60b31f706c..000000000000
137285 --- a/lib/zstd/error_private.h
137286 +++ /dev/null
137287 @@ -1,53 +0,0 @@
137289 - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
137290 - * All rights reserved.
137292 - * This source code is licensed under the BSD-style license found in the
137293 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
137294 - * An additional grant of patent rights can be found in the PATENTS file in the
137295 - * same directory.
137297 - * This program is free software; you can redistribute it and/or modify it under
137298 - * the terms of the GNU General Public License version 2 as published by the
137299 - * Free Software Foundation. This program is dual-licensed; you may select
137300 - * either version 2 of the GNU General Public License ("GPL") or BSD license
137301 - * ("BSD").
137302 - */
137304 -/* Note : this module is expected to remain private, do not expose it */
137306 -#ifndef ERROR_H_MODULE
137307 -#define ERROR_H_MODULE
137309 -/* ****************************************
137310 -*  Dependencies
137311 -******************************************/
137312 -#include <linux/types.h> /* size_t */
137313 -#include <linux/zstd.h>  /* enum list */
137315 -/* ****************************************
137316 -*  Compiler-specific
137317 -******************************************/
137318 -#define ERR_STATIC static __attribute__((unused))
137320 -/*-****************************************
137321 -*  Customization (error_public.h)
137322 -******************************************/
137323 -typedef ZSTD_ErrorCode ERR_enum;
137324 -#define PREFIX(name) ZSTD_error_##name
137326 -/*-****************************************
137327 -*  Error codes handling
137328 -******************************************/
137329 -#define ERROR(name) ((size_t)-PREFIX(name))
137331 -ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
137333 -ERR_STATIC ERR_enum ERR_getErrorCode(size_t code)
137335 -       if (!ERR_isError(code))
137336 -               return (ERR_enum)0;
137337 -       return (ERR_enum)(0 - code);
137340 -#endif /* ERROR_H_MODULE */
137341 diff --git a/lib/zstd/fse.h b/lib/zstd/fse.h
137342 deleted file mode 100644
137343 index 7460ab04b191..000000000000
137344 --- a/lib/zstd/fse.h
137345 +++ /dev/null
137346 @@ -1,575 +0,0 @@
137348 - * FSE : Finite State Entropy codec
137349 - * Public Prototypes declaration
137350 - * Copyright (C) 2013-2016, Yann Collet.
137352 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
137354 - * Redistribution and use in source and binary forms, with or without
137355 - * modification, are permitted provided that the following conditions are
137356 - * met:
137358 - *   * Redistributions of source code must retain the above copyright
137359 - * notice, this list of conditions and the following disclaimer.
137360 - *   * Redistributions in binary form must reproduce the above
137361 - * copyright notice, this list of conditions and the following disclaimer
137362 - * in the documentation and/or other materials provided with the
137363 - * distribution.
137365 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
137366 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
137367 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
137368 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
137369 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
137370 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
137371 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
137372 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
137373 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
137374 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
137375 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
137377 - * This program is free software; you can redistribute it and/or modify it under
137378 - * the terms of the GNU General Public License version 2 as published by the
137379 - * Free Software Foundation. This program is dual-licensed; you may select
137380 - * either version 2 of the GNU General Public License ("GPL") or BSD license
137381 - * ("BSD").
137383 - * You can contact the author at :
137384 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
137385 - */
137386 -#ifndef FSE_H
137387 -#define FSE_H
137389 -/*-*****************************************
137390 -*  Dependencies
137391 -******************************************/
137392 -#include <linux/types.h> /* size_t, ptrdiff_t */
137394 -/*-*****************************************
137395 -*  FSE_PUBLIC_API : control library symbols visibility
137396 -******************************************/
137397 -#define FSE_PUBLIC_API
137399 -/*------   Version   ------*/
137400 -#define FSE_VERSION_MAJOR 0
137401 -#define FSE_VERSION_MINOR 9
137402 -#define FSE_VERSION_RELEASE 0
137404 -#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE
137405 -#define FSE_QUOTE(str) #str
137406 -#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str)
137407 -#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION)
137409 -#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR * 100 * 100 + FSE_VERSION_MINOR * 100 + FSE_VERSION_RELEASE)
137410 -FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */
137412 -/*-*****************************************
137413 -*  Tool functions
137414 -******************************************/
137415 -FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */
137417 -/* Error Management */
137418 -FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */
137420 -/*-*****************************************
137421 -*  FSE detailed API
137422 -******************************************/
137424 -FSE_compress() does the following:
137425 -1. count symbol occurrence from source[] into table count[]
137426 -2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
137427 -3. save normalized counters to memory buffer using writeNCount()
137428 -4. build encoding table 'CTable' from normalized counters
137429 -5. encode the data stream using encoding table 'CTable'
137431 -FSE_decompress() does the following:
137432 -1. read normalized counters with readNCount()
137433 -2. build decoding table 'DTable' from normalized counters
137434 -3. decode the data stream using decoding table 'DTable'
137436 -The following API allows targeting specific sub-functions for advanced tasks.
137437 -For example, it's possible to compress several blocks using the same 'CTable',
137438 -or to save and provide normalized distribution using external method.
137441 -/* *** COMPRESSION *** */
137442 -/*! FSE_optimalTableLog():
137443 -       dynamically downsize 'tableLog' when conditions are met.
137444 -       It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
137445 -       @return : recommended tableLog (necessarily <= 'maxTableLog') */
137446 -FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
137448 -/*! FSE_normalizeCount():
137449 -       normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
137450 -       'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
137451 -       @return : tableLog,
137452 -                         or an errorCode, which can be tested using FSE_isError() */
137453 -FSE_PUBLIC_API size_t FSE_normalizeCount(short *normalizedCounter, unsigned tableLog, const unsigned *count, size_t srcSize, unsigned maxSymbolValue);
137455 -/*! FSE_NCountWriteBound():
137456 -       Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
137457 -       Typically useful for allocation purpose. */
137458 -FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog);
137460 -/*! FSE_writeNCount():
137461 -       Compactly save 'normalizedCounter' into 'buffer'.
137462 -       @return : size of the compressed table,
137463 -                         or an errorCode, which can be tested using FSE_isError(). */
137464 -FSE_PUBLIC_API size_t FSE_writeNCount(void *buffer, size_t bufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
137466 -/*! Constructor and Destructor of FSE_CTable.
137467 -       Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
137468 -typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */
137470 -/*! FSE_compress_usingCTable():
137471 -       Compress `src` using `ct` into `dst` which must be already allocated.
137472 -       @return : size of compressed data (<= `dstCapacity`),
137473 -                         or 0 if compressed data could not fit into `dst`,
137474 -                         or an errorCode, which can be tested using FSE_isError() */
137475 -FSE_PUBLIC_API size_t FSE_compress_usingCTable(void *dst, size_t dstCapacity, const void *src, size_t srcSize, const FSE_CTable *ct);
137478 -Tutorial :
137479 -----------
137480 -The first step is to count all symbols. FSE_count() does this job very fast.
137481 -Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells.
137482 -'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0]
137483 -maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value)
137484 -FSE_count() will return the number of occurrence of the most frequent symbol.
137485 -This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility.
137486 -If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
137488 -The next step is to normalize the frequencies.
137489 -FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'.
137490 -It also guarantees a minimum of 1 to any Symbol with frequency >= 1.
137491 -You can use 'tableLog'==0 to mean "use default tableLog value".
137492 -If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(),
137493 -which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default").
137495 -The result of FSE_normalizeCount() will be saved into a table,
137496 -called 'normalizedCounter', which is a table of signed short.
137497 -'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells.
137498 -The return value is tableLog if everything proceeded as expected.
137499 -It is 0 if there is a single symbol within distribution.
137500 -If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()).
137502 -'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount().
137503 -'buffer' must be already allocated.
137504 -For guaranteed success, buffer size must be at least FSE_headerBound().
137505 -The result of the function is the number of bytes written into 'buffer'.
137506 -If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small).
137508 -'normalizedCounter' can then be used to create the compression table 'CTable'.
137509 -The space required by 'CTable' must be already allocated, using FSE_createCTable().
137510 -You can then use FSE_buildCTable() to fill 'CTable'.
137511 -If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()).
137513 -'CTable' can then be used to compress 'src', with FSE_compress_usingCTable().
137514 -Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize'
137515 -The function returns the size of compressed data (without header), necessarily <= `dstCapacity`.
137516 -If it returns '0', compressed data could not fit into 'dst'.
137517 -If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
137520 -/* *** DECOMPRESSION *** */
137522 -/*! FSE_readNCount():
137523 -       Read compactly saved 'normalizedCounter' from 'rBuffer'.
137524 -       @return : size read from 'rBuffer',
137525 -                         or an errorCode, which can be tested using FSE_isError().
137526 -                         maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
137527 -FSE_PUBLIC_API size_t FSE_readNCount(short *normalizedCounter, unsigned *maxSymbolValuePtr, unsigned *tableLogPtr, const void *rBuffer, size_t rBuffSize);
137529 -/*! Constructor and Destructor of FSE_DTable.
137530 -       Note that its size depends on 'tableLog' */
137531 -typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
137533 -/*! FSE_buildDTable():
137534 -       Builds 'dt', which must be already allocated, using FSE_createDTable().
137535 -       return : 0, or an errorCode, which can be tested using FSE_isError() */
137536 -FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable *dt, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workspace, size_t workspaceSize);
137538 -/*! FSE_decompress_usingDTable():
137539 -       Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
137540 -       into `dst` which must be already allocated.
137541 -       @return : size of regenerated data (necessarily <= `dstCapacity`),
137542 -                         or an errorCode, which can be tested using FSE_isError() */
137543 -FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt);
137546 -Tutorial :
137547 -----------
137548 -(Note : these functions only decompress FSE-compressed blocks.
137549 - If block is uncompressed, use memcpy() instead
137550 - If block is a single repeated byte, use memset() instead )
137552 -The first step is to obtain the normalized frequencies of symbols.
137553 -This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().
137554 -'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
137555 -In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
137556 -or size the table to handle worst case situations (typically 256).
137557 -FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
137558 -The result of FSE_readNCount() is the number of bytes read from 'rBuffer'.
137559 -Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
137560 -If there is an error, the function will return an error code, which can be tested using FSE_isError().
137562 -The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.
137563 -This is performed by the function FSE_buildDTable().
137564 -The space required by 'FSE_DTable' must be already allocated using FSE_createDTable().
137565 -If there is an error, the function will return an error code, which can be tested using FSE_isError().
137567 -`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable().
137568 -`cSrcSize` must be strictly correct, otherwise decompression will fail.
137569 -FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
137570 -If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
137573 -/* *** Dependency *** */
137574 -#include "bitstream.h"
137576 -/* *****************************************
137577 -*  Static allocation
137578 -*******************************************/
137579 -/* FSE buffer bounds */
137580 -#define FSE_NCOUNTBOUND 512
137581 -#define FSE_BLOCKBOUND(size) (size + (size >> 7))
137582 -#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
137584 -/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */
137585 -#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1 << (maxTableLog - 1)) + ((maxSymbolValue + 1) * 2))
137586 -#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1 << maxTableLog))
137588 -/* *****************************************
137589 -*  FSE advanced API
137590 -*******************************************/
137591 -/* FSE_count_wksp() :
137592 - * Same as FSE_count(), but using an externally provided scratch buffer.
137593 - * `workSpace` size must be table of >= `1024` unsigned
137594 - */
137595 -size_t FSE_count_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned *workSpace);
137597 -/* FSE_countFast_wksp() :
137598 - * Same as FSE_countFast(), but using an externally provided scratch buffer.
137599 - * `workSpace` must be a table of minimum `1024` unsigned
137600 - */
137601 -size_t FSE_countFast_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize, unsigned *workSpace);
137603 -/*! FSE_count_simple
137604 - * Same as FSE_countFast(), but does not use any additional memory (not even on stack).
137605 - * This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr` (presuming it's also the size of `count`).
137607 -size_t FSE_count_simple(unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize);
137609 -unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
137610 -/**< same as FSE_optimalTableLog(), which used `minus==2` */
137612 -size_t FSE_buildCTable_raw(FSE_CTable *ct, unsigned nbBits);
137613 -/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */
137615 -size_t FSE_buildCTable_rle(FSE_CTable *ct, unsigned char symbolValue);
137616 -/**< build a fake FSE_CTable, designed to compress always the same symbolValue */
137618 -/* FSE_buildCTable_wksp() :
137619 - * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
137620 - * `wkspSize` must be >= `(1<<tableLog)`.
137621 - */
137622 -size_t FSE_buildCTable_wksp(FSE_CTable *ct, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workSpace, size_t wkspSize);
137624 -size_t FSE_buildDTable_raw(FSE_DTable *dt, unsigned nbBits);
137625 -/**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */
137627 -size_t FSE_buildDTable_rle(FSE_DTable *dt, unsigned char symbolValue);
137628 -/**< build a fake FSE_DTable, designed to always generate the same symbolValue */
137630 -size_t FSE_decompress_wksp(void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, unsigned maxLog, void *workspace, size_t workspaceSize);
137631 -/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DTABLE_SIZE_U32(maxLog)` */
137633 -/* *****************************************
137634 -*  FSE symbol compression API
137635 -*******************************************/
137637 -   This API consists of small unitary functions, which highly benefit from being inlined.
137638 -   Hence their body are included in next section.
137640 -typedef struct {
137641 -       ptrdiff_t value;
137642 -       const void *stateTable;
137643 -       const void *symbolTT;
137644 -       unsigned stateLog;
137645 -} FSE_CState_t;
137647 -static void FSE_initCState(FSE_CState_t *CStatePtr, const FSE_CTable *ct);
137649 -static void FSE_encodeSymbol(BIT_CStream_t *bitC, FSE_CState_t *CStatePtr, unsigned symbol);
137651 -static void FSE_flushCState(BIT_CStream_t *bitC, const FSE_CState_t *CStatePtr);
137653 -/**<
137654 -These functions are inner components of FSE_compress_usingCTable().
137655 -They allow the creation of custom streams, mixing multiple tables and bit sources.
137657 -A key property to keep in mind is that encoding and decoding are done **in reverse direction**.
137658 -So the first symbol you will encode is the last you will decode, like a LIFO stack.
137660 -You will need a few variables to track your CStream. They are :
137662 -FSE_CTable    ct;         // Provided by FSE_buildCTable()
137663 -BIT_CStream_t bitStream;  // bitStream tracking structure
137664 -FSE_CState_t  state;      // State tracking structure (can have several)
137667 -The first thing to do is to init bitStream and state.
137668 -       size_t errorCode = BIT_initCStream(&bitStream, dstBuffer, maxDstSize);
137669 -       FSE_initCState(&state, ct);
137671 -Note that BIT_initCStream() can produce an error code, so its result should be tested, using FSE_isError();
137672 -You can then encode your input data, byte after byte.
137673 -FSE_encodeSymbol() outputs a maximum of 'tableLog' bits at a time.
137674 -Remember decoding will be done in reverse direction.
137675 -       FSE_encodeByte(&bitStream, &state, symbol);
137677 -At any time, you can also add any bit sequence.
137678 -Note : maximum allowed nbBits is 25, for compatibility with 32-bits decoders
137679 -       BIT_addBits(&bitStream, bitField, nbBits);
137681 -The above methods don't commit data to memory, they just store it into local register, for speed.
137682 -Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
137683 -Writing data to memory is a manual operation, performed by the flushBits function.
137684 -       BIT_flushBits(&bitStream);
137686 -Your last FSE encoding operation shall be to flush your last state value(s).
137687 -       FSE_flushState(&bitStream, &state);
137689 -Finally, you must close the bitStream.
137690 -The function returns the size of CStream in bytes.
137691 -If data couldn't fit into dstBuffer, it will return a 0 ( == not compressible)
137692 -If there is an error, it returns an errorCode (which can be tested using FSE_isError()).
137693 -       size_t size = BIT_closeCStream(&bitStream);
137696 -/* *****************************************
137697 -*  FSE symbol decompression API
137698 -*******************************************/
137699 -typedef struct {
137700 -       size_t state;
137701 -       const void *table; /* precise table may vary, depending on U16 */
137702 -} FSE_DState_t;
137704 -static void FSE_initDState(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD, const FSE_DTable *dt);
137706 -static unsigned char FSE_decodeSymbol(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD);
137708 -static unsigned FSE_endOfDState(const FSE_DState_t *DStatePtr);
137710 -/**<
137711 -Let's now decompose FSE_decompress_usingDTable() into its unitary components.
137712 -You will decode FSE-encoded symbols from the bitStream,
137713 -and also any other bitFields you put in, **in reverse order**.
137715 -You will need a few variables to track your bitStream. They are :
137717 -BIT_DStream_t DStream;    // Stream context
137718 -FSE_DState_t  DState;     // State context. Multiple ones are possible
137719 -FSE_DTable*   DTablePtr;  // Decoding table, provided by FSE_buildDTable()
137721 -The first thing to do is to init the bitStream.
137722 -       errorCode = BIT_initDStream(&DStream, srcBuffer, srcSize);
137724 -You should then retrieve your initial state(s)
137725 -(in reverse flushing order if you have several ones) :
137726 -       errorCode = FSE_initDState(&DState, &DStream, DTablePtr);
137728 -You can then decode your data, symbol after symbol.
137729 -For information the maximum number of bits read by FSE_decodeSymbol() is 'tableLog'.
137730 -Keep in mind that symbols are decoded in reverse order, like a LIFO stack (last in, first out).
137731 -       unsigned char symbol = FSE_decodeSymbol(&DState, &DStream);
137733 -You can retrieve any bitfield you eventually stored into the bitStream (in reverse order)
137734 -Note : maximum allowed nbBits is 25, for 32-bits compatibility
137735 -       size_t bitField = BIT_readBits(&DStream, nbBits);
137737 -All above operations only read from local register (which size depends on size_t).
137738 -Refueling the register from memory is manually performed by the reload method.
137739 -       endSignal = FSE_reloadDStream(&DStream);
137741 -BIT_reloadDStream() result tells if there is still some more data to read from DStream.
137742 -BIT_DStream_unfinished : there is still some data left into the DStream.
137743 -BIT_DStream_endOfBuffer : Dstream reached end of buffer. Its container may no longer be completely filled.
137744 -BIT_DStream_completed : Dstream reached its exact end, corresponding in general to decompression completed.
137745 -BIT_DStream_tooFar : Dstream went too far. Decompression result is corrupted.
137747 -When reaching end of buffer (BIT_DStream_endOfBuffer), progress slowly, notably if you decode multiple symbols per loop,
137748 -to properly detect the exact end of stream.
137749 -After each decoded symbol, check if DStream is fully consumed using this simple test :
137750 -       BIT_reloadDStream(&DStream) >= BIT_DStream_completed
137752 -When it's done, verify decompression is fully completed, by checking both DStream and the relevant states.
137753 -Checking if DStream has reached its end is performed by :
137754 -       BIT_endOfDStream(&DStream);
137755 -Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible.
137756 -       FSE_endOfDState(&DState);
137759 -/* *****************************************
137760 -*  FSE unsafe API
137761 -*******************************************/
137762 -static unsigned char FSE_decodeSymbolFast(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD);
137763 -/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
137765 -/* *****************************************
137766 -*  Implementation of inlined functions
137767 -*******************************************/
137768 -typedef struct {
137769 -       int deltaFindState;
137770 -       U32 deltaNbBits;
137771 -} FSE_symbolCompressionTransform; /* total 8 bytes */
137773 -ZSTD_STATIC void FSE_initCState(FSE_CState_t *statePtr, const FSE_CTable *ct)
137775 -       const void *ptr = ct;
137776 -       const U16 *u16ptr = (const U16 *)ptr;
137777 -       const U32 tableLog = ZSTD_read16(ptr);
137778 -       statePtr->value = (ptrdiff_t)1 << tableLog;
137779 -       statePtr->stateTable = u16ptr + 2;
137780 -       statePtr->symbolTT = ((const U32 *)ct + 1 + (tableLog ? (1 << (tableLog - 1)) : 1));
137781 -       statePtr->stateLog = tableLog;
137784 -/*! FSE_initCState2() :
137785 -*   Same as FSE_initCState(), but the first symbol to include (which will be the last to be read)
137786 -*   uses the smallest state value possible, saving the cost of this symbol */
137787 -ZSTD_STATIC void FSE_initCState2(FSE_CState_t *statePtr, const FSE_CTable *ct, U32 symbol)
137789 -       FSE_initCState(statePtr, ct);
137790 -       {
137791 -               const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform *)(statePtr->symbolTT))[symbol];
137792 -               const U16 *stateTable = (const U16 *)(statePtr->stateTable);
137793 -               U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1 << 15)) >> 16);
137794 -               statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits;
137795 -               statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
137796 -       }
137799 -ZSTD_STATIC void FSE_encodeSymbol(BIT_CStream_t *bitC, FSE_CState_t *statePtr, U32 symbol)
137801 -       const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform *)(statePtr->symbolTT))[symbol];
137802 -       const U16 *const stateTable = (const U16 *)(statePtr->stateTable);
137803 -       U32 nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
137804 -       BIT_addBits(bitC, statePtr->value, nbBitsOut);
137805 -       statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
137808 -ZSTD_STATIC void FSE_flushCState(BIT_CStream_t *bitC, const FSE_CState_t *statePtr)
137810 -       BIT_addBits(bitC, statePtr->value, statePtr->stateLog);
137811 -       BIT_flushBits(bitC);
137814 -/* ======    Decompression    ====== */
137816 -typedef struct {
137817 -       U16 tableLog;
137818 -       U16 fastMode;
137819 -} FSE_DTableHeader; /* sizeof U32 */
137821 -typedef struct {
137822 -       unsigned short newState;
137823 -       unsigned char symbol;
137824 -       unsigned char nbBits;
137825 -} FSE_decode_t; /* size == U32 */
137827 -ZSTD_STATIC void FSE_initDState(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD, const FSE_DTable *dt)
137829 -       const void *ptr = dt;
137830 -       const FSE_DTableHeader *const DTableH = (const FSE_DTableHeader *)ptr;
137831 -       DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
137832 -       BIT_reloadDStream(bitD);
137833 -       DStatePtr->table = dt + 1;
137836 -ZSTD_STATIC BYTE FSE_peekSymbol(const FSE_DState_t *DStatePtr)
137838 -       FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state];
137839 -       return DInfo.symbol;
137842 -ZSTD_STATIC void FSE_updateState(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD)
137844 -       FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state];
137845 -       U32 const nbBits = DInfo.nbBits;
137846 -       size_t const lowBits = BIT_readBits(bitD, nbBits);
137847 -       DStatePtr->state = DInfo.newState + lowBits;
137850 -ZSTD_STATIC BYTE FSE_decodeSymbol(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD)
137852 -       FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state];
137853 -       U32 const nbBits = DInfo.nbBits;
137854 -       BYTE const symbol = DInfo.symbol;
137855 -       size_t const lowBits = BIT_readBits(bitD, nbBits);
137857 -       DStatePtr->state = DInfo.newState + lowBits;
137858 -       return symbol;
137861 -/*! FSE_decodeSymbolFast() :
137862 -       unsafe, only works if no symbol has a probability > 50% */
137863 -ZSTD_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t *DStatePtr, BIT_DStream_t *bitD)
137865 -       FSE_decode_t const DInfo = ((const FSE_decode_t *)(DStatePtr->table))[DStatePtr->state];
137866 -       U32 const nbBits = DInfo.nbBits;
137867 -       BYTE const symbol = DInfo.symbol;
137868 -       size_t const lowBits = BIT_readBitsFast(bitD, nbBits);
137870 -       DStatePtr->state = DInfo.newState + lowBits;
137871 -       return symbol;
137874 -ZSTD_STATIC unsigned FSE_endOfDState(const FSE_DState_t *DStatePtr) { return DStatePtr->state == 0; }
137876 -/* **************************************************************
137877 -*  Tuning parameters
137878 -****************************************************************/
137879 -/*!MEMORY_USAGE :
137880 -*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
137881 -*  Increasing memory usage improves compression ratio
137882 -*  Reduced memory usage can improve speed, due to cache effect
137883 -*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
137884 -#ifndef FSE_MAX_MEMORY_USAGE
137885 -#define FSE_MAX_MEMORY_USAGE 14
137886 -#endif
137887 -#ifndef FSE_DEFAULT_MEMORY_USAGE
137888 -#define FSE_DEFAULT_MEMORY_USAGE 13
137889 -#endif
137891 -/*!FSE_MAX_SYMBOL_VALUE :
137892 -*  Maximum symbol value authorized.
137893 -*  Required for proper stack allocation */
137894 -#ifndef FSE_MAX_SYMBOL_VALUE
137895 -#define FSE_MAX_SYMBOL_VALUE 255
137896 -#endif
137898 -/* **************************************************************
137899 -*  template functions type & suffix
137900 -****************************************************************/
137901 -#define FSE_FUNCTION_TYPE BYTE
137902 -#define FSE_FUNCTION_EXTENSION
137903 -#define FSE_DECODE_TYPE FSE_decode_t
137905 -/* ***************************************************************
137906 -*  Constants
137907 -*****************************************************************/
137908 -#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE - 2)
137909 -#define FSE_MAX_TABLESIZE (1U << FSE_MAX_TABLELOG)
137910 -#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE - 1)
137911 -#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE - 2)
137912 -#define FSE_MIN_TABLELOG 5
137914 -#define FSE_TABLELOG_ABSOLUTE_MAX 15
137915 -#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
137916 -#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
137917 -#endif
137919 -#define FSE_TABLESTEP(tableSize) ((tableSize >> 1) + (tableSize >> 3) + 3)
137921 -#endif /* FSE_H */
137922 diff --git a/lib/zstd/fse_compress.c b/lib/zstd/fse_compress.c
137923 deleted file mode 100644
137924 index ef3d1741d532..000000000000
137925 --- a/lib/zstd/fse_compress.c
137926 +++ /dev/null
137927 @@ -1,795 +0,0 @@
137929 - * FSE : Finite State Entropy encoder
137930 - * Copyright (C) 2013-2015, Yann Collet.
137932 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
137934 - * Redistribution and use in source and binary forms, with or without
137935 - * modification, are permitted provided that the following conditions are
137936 - * met:
137938 - *   * Redistributions of source code must retain the above copyright
137939 - * notice, this list of conditions and the following disclaimer.
137940 - *   * Redistributions in binary form must reproduce the above
137941 - * copyright notice, this list of conditions and the following disclaimer
137942 - * in the documentation and/or other materials provided with the
137943 - * distribution.
137945 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
137946 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
137947 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
137948 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
137949 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
137950 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
137951 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
137952 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
137953 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
137954 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
137955 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
137957 - * This program is free software; you can redistribute it and/or modify it under
137958 - * the terms of the GNU General Public License version 2 as published by the
137959 - * Free Software Foundation. This program is dual-licensed; you may select
137960 - * either version 2 of the GNU General Public License ("GPL") or BSD license
137961 - * ("BSD").
137963 - * You can contact the author at :
137964 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
137965 - */
137967 -/* **************************************************************
137968 -*  Compiler specifics
137969 -****************************************************************/
137970 -#define FORCE_INLINE static __always_inline
137972 -/* **************************************************************
137973 -*  Includes
137974 -****************************************************************/
137975 -#include "bitstream.h"
137976 -#include "fse.h"
137977 -#include <linux/compiler.h>
137978 -#include <linux/kernel.h>
137979 -#include <linux/math64.h>
137980 -#include <linux/string.h> /* memcpy, memset */
137982 -/* **************************************************************
137983 -*  Error Management
137984 -****************************************************************/
137985 -#define FSE_STATIC_ASSERT(c)                                   \
137986 -       {                                                      \
137987 -               enum { FSE_static_assert = 1 / (int)(!!(c)) }; \
137988 -       } /* use only *after* variable declarations */
137990 -/* **************************************************************
137991 -*  Templates
137992 -****************************************************************/
137994 -  designed to be included
137995 -  for type-specific functions (template emulation in C)
137996 -  Objective is to write these functions only once, for improved maintenance
137999 -/* safety checks */
138000 -#ifndef FSE_FUNCTION_EXTENSION
138001 -#error "FSE_FUNCTION_EXTENSION must be defined"
138002 -#endif
138003 -#ifndef FSE_FUNCTION_TYPE
138004 -#error "FSE_FUNCTION_TYPE must be defined"
138005 -#endif
138007 -/* Function names */
138008 -#define FSE_CAT(X, Y) X##Y
138009 -#define FSE_FUNCTION_NAME(X, Y) FSE_CAT(X, Y)
138010 -#define FSE_TYPE_NAME(X, Y) FSE_CAT(X, Y)
138012 -/* Function templates */
138014 -/* FSE_buildCTable_wksp() :
138015 - * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
138016 - * wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
138017 - * workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
138018 - */
138019 -size_t FSE_buildCTable_wksp(FSE_CTable *ct, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workspace, size_t workspaceSize)
138021 -       U32 const tableSize = 1 << tableLog;
138022 -       U32 const tableMask = tableSize - 1;
138023 -       void *const ptr = ct;
138024 -       U16 *const tableU16 = ((U16 *)ptr) + 2;
138025 -       void *const FSCT = ((U32 *)ptr) + 1 /* header */ + (tableLog ? tableSize >> 1 : 1);
138026 -       FSE_symbolCompressionTransform *const symbolTT = (FSE_symbolCompressionTransform *)(FSCT);
138027 -       U32 const step = FSE_TABLESTEP(tableSize);
138028 -       U32 highThreshold = tableSize - 1;
138030 -       U32 *cumul;
138031 -       FSE_FUNCTION_TYPE *tableSymbol;
138032 -       size_t spaceUsed32 = 0;
138034 -       cumul = (U32 *)workspace + spaceUsed32;
138035 -       spaceUsed32 += FSE_MAX_SYMBOL_VALUE + 2;
138036 -       tableSymbol = (FSE_FUNCTION_TYPE *)((U32 *)workspace + spaceUsed32);
138037 -       spaceUsed32 += ALIGN(sizeof(FSE_FUNCTION_TYPE) * ((size_t)1 << tableLog), sizeof(U32)) >> 2;
138039 -       if ((spaceUsed32 << 2) > workspaceSize)
138040 -               return ERROR(tableLog_tooLarge);
138041 -       workspace = (U32 *)workspace + spaceUsed32;
138042 -       workspaceSize -= (spaceUsed32 << 2);
138044 -       /* CTable header */
138045 -       tableU16[-2] = (U16)tableLog;
138046 -       tableU16[-1] = (U16)maxSymbolValue;
138048 -       /* For explanations on how to distribute symbol values over the table :
138049 -       *  http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
138051 -       /* symbol start positions */
138052 -       {
138053 -               U32 u;
138054 -               cumul[0] = 0;
138055 -               for (u = 1; u <= maxSymbolValue + 1; u++) {
138056 -                       if (normalizedCounter[u - 1] == -1) { /* Low proba symbol */
138057 -                               cumul[u] = cumul[u - 1] + 1;
138058 -                               tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u - 1);
138059 -                       } else {
138060 -                               cumul[u] = cumul[u - 1] + normalizedCounter[u - 1];
138061 -                       }
138062 -               }
138063 -               cumul[maxSymbolValue + 1] = tableSize + 1;
138064 -       }
138066 -       /* Spread symbols */
138067 -       {
138068 -               U32 position = 0;
138069 -               U32 symbol;
138070 -               for (symbol = 0; symbol <= maxSymbolValue; symbol++) {
138071 -                       int nbOccurences;
138072 -                       for (nbOccurences = 0; nbOccurences < normalizedCounter[symbol]; nbOccurences++) {
138073 -                               tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
138074 -                               position = (position + step) & tableMask;
138075 -                               while (position > highThreshold)
138076 -                                       position = (position + step) & tableMask; /* Low proba area */
138077 -                       }
138078 -               }
138080 -               if (position != 0)
138081 -                       return ERROR(GENERIC); /* Must have gone through all positions */
138082 -       }
138084 -       /* Build table */
138085 -       {
138086 -               U32 u;
138087 -               for (u = 0; u < tableSize; u++) {
138088 -                       FSE_FUNCTION_TYPE s = tableSymbol[u];   /* note : static analyzer may not understand tableSymbol is properly initialized */
138089 -                       tableU16[cumul[s]++] = (U16)(tableSize + u); /* TableU16 : sorted by symbol order; gives next state value */
138090 -               }
138091 -       }
138093 -       /* Build Symbol Transformation Table */
138094 -       {
138095 -               unsigned total = 0;
138096 -               unsigned s;
138097 -               for (s = 0; s <= maxSymbolValue; s++) {
138098 -                       switch (normalizedCounter[s]) {
138099 -                       case 0: break;
138101 -                       case -1:
138102 -                       case 1:
138103 -                               symbolTT[s].deltaNbBits = (tableLog << 16) - (1 << tableLog);
138104 -                               symbolTT[s].deltaFindState = total - 1;
138105 -                               total++;
138106 -                               break;
138107 -                       default: {
138108 -                               U32 const maxBitsOut = tableLog - BIT_highbit32(normalizedCounter[s] - 1);
138109 -                               U32 const minStatePlus = normalizedCounter[s] << maxBitsOut;
138110 -                               symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
138111 -                               symbolTT[s].deltaFindState = total - normalizedCounter[s];
138112 -                               total += normalizedCounter[s];
138113 -                       }
138114 -                       }
138115 -               }
138116 -       }
138118 -       return 0;
138121 -/*-**************************************************************
138122 -*  FSE NCount encoding-decoding
138123 -****************************************************************/
138124 -size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
138126 -       size_t const maxHeaderSize = (((maxSymbolValue + 1) * tableLog) >> 3) + 3;
138127 -       return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */
138130 -static size_t FSE_writeNCount_generic(void *header, size_t headerBufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
138131 -                                     unsigned writeIsSafe)
138133 -       BYTE *const ostart = (BYTE *)header;
138134 -       BYTE *out = ostart;
138135 -       BYTE *const oend = ostart + headerBufferSize;
138136 -       int nbBits;
138137 -       const int tableSize = 1 << tableLog;
138138 -       int remaining;
138139 -       int threshold;
138140 -       U32 bitStream;
138141 -       int bitCount;
138142 -       unsigned charnum = 0;
138143 -       int previous0 = 0;
138145 -       bitStream = 0;
138146 -       bitCount = 0;
138147 -       /* Table Size */
138148 -       bitStream += (tableLog - FSE_MIN_TABLELOG) << bitCount;
138149 -       bitCount += 4;
138151 -       /* Init */
138152 -       remaining = tableSize + 1; /* +1 for extra accuracy */
138153 -       threshold = tableSize;
138154 -       nbBits = tableLog + 1;
138156 -       while (remaining > 1) { /* stops at 1 */
138157 -               if (previous0) {
138158 -                       unsigned start = charnum;
138159 -                       while (!normalizedCounter[charnum])
138160 -                               charnum++;
138161 -                       while (charnum >= start + 24) {
138162 -                               start += 24;
138163 -                               bitStream += 0xFFFFU << bitCount;
138164 -                               if ((!writeIsSafe) && (out > oend - 2))
138165 -                                       return ERROR(dstSize_tooSmall); /* Buffer overflow */
138166 -                               out[0] = (BYTE)bitStream;
138167 -                               out[1] = (BYTE)(bitStream >> 8);
138168 -                               out += 2;
138169 -                               bitStream >>= 16;
138170 -                       }
138171 -                       while (charnum >= start + 3) {
138172 -                               start += 3;
138173 -                               bitStream += 3 << bitCount;
138174 -                               bitCount += 2;
138175 -                       }
138176 -                       bitStream += (charnum - start) << bitCount;
138177 -                       bitCount += 2;
138178 -                       if (bitCount > 16) {
138179 -                               if ((!writeIsSafe) && (out > oend - 2))
138180 -                                       return ERROR(dstSize_tooSmall); /* Buffer overflow */
138181 -                               out[0] = (BYTE)bitStream;
138182 -                               out[1] = (BYTE)(bitStream >> 8);
138183 -                               out += 2;
138184 -                               bitStream >>= 16;
138185 -                               bitCount -= 16;
138186 -                       }
138187 -               }
138188 -               {
138189 -                       int count = normalizedCounter[charnum++];
138190 -                       int const max = (2 * threshold - 1) - remaining;
138191 -                       remaining -= count < 0 ? -count : count;
138192 -                       count++; /* +1 for extra accuracy */
138193 -                       if (count >= threshold)
138194 -                               count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
138195 -                       bitStream += count << bitCount;
138196 -                       bitCount += nbBits;
138197 -                       bitCount -= (count < max);
138198 -                       previous0 = (count == 1);
138199 -                       if (remaining < 1)
138200 -                               return ERROR(GENERIC);
138201 -                       while (remaining < threshold)
138202 -                               nbBits--, threshold >>= 1;
138203 -               }
138204 -               if (bitCount > 16) {
138205 -                       if ((!writeIsSafe) && (out > oend - 2))
138206 -                               return ERROR(dstSize_tooSmall); /* Buffer overflow */
138207 -                       out[0] = (BYTE)bitStream;
138208 -                       out[1] = (BYTE)(bitStream >> 8);
138209 -                       out += 2;
138210 -                       bitStream >>= 16;
138211 -                       bitCount -= 16;
138212 -               }
138213 -       }
138215 -       /* flush remaining bitStream */
138216 -       if ((!writeIsSafe) && (out > oend - 2))
138217 -               return ERROR(dstSize_tooSmall); /* Buffer overflow */
138218 -       out[0] = (BYTE)bitStream;
138219 -       out[1] = (BYTE)(bitStream >> 8);
138220 -       out += (bitCount + 7) / 8;
138222 -       if (charnum > maxSymbolValue + 1)
138223 -               return ERROR(GENERIC);
138225 -       return (out - ostart);
138228 -size_t FSE_writeNCount(void *buffer, size_t bufferSize, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
138230 -       if (tableLog > FSE_MAX_TABLELOG)
138231 -               return ERROR(tableLog_tooLarge); /* Unsupported */
138232 -       if (tableLog < FSE_MIN_TABLELOG)
138233 -               return ERROR(GENERIC); /* Unsupported */
138235 -       if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
138236 -               return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);
138238 -       return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1);
138241 -/*-**************************************************************
138242 -*  Counting histogram
138243 -****************************************************************/
138244 -/*! FSE_count_simple
138245 -       This function counts byte values within `src`, and store the histogram into table `count`.
138246 -       It doesn't use any additional memory.
138247 -       But this function is unsafe : it doesn't check that all values within `src` can fit into `count`.
138248 -       For this reason, prefer using a table `count` with 256 elements.
138249 -       @return : count of most numerous element
138251 -size_t FSE_count_simple(unsigned *count, unsigned *maxSymbolValuePtr, const void *src, size_t srcSize)
138253 -       const BYTE *ip = (const BYTE *)src;
138254 -       const BYTE *const end = ip + srcSize;
138255 -       unsigned maxSymbolValue = *maxSymbolValuePtr;
138256 -       unsigned max = 0;
138258 -       memset(count, 0, (maxSymbolValue + 1) * sizeof(*count));
138259 -       if (srcSize == 0) {
138260 -               *maxSymbolValuePtr = 0;
138261 -               return 0;
138262 -       }
138264 -       while (ip < end)
138265 -               count[*ip++]++;
138267 -       while (!count[maxSymbolValue])
138268 -               maxSymbolValue--;
138269 -       *maxSymbolValuePtr = maxSymbolValue;
138271 -       {
138272 -               U32 s;
138273 -               for (s = 0; s <= maxSymbolValue; s++)
138274 -                       if (count[s] > max)
138275 -                               max = count[s];
138276 -       }
138278 -       return (size_t)max;
138281 -/* FSE_count_parallel_wksp() :
138282 - * Same as FSE_count_parallel(), but using an externally provided scratch buffer.
138283 - * `workSpace` size must be a minimum of `1024 * sizeof(unsigned)`` */
138284 -static size_t FSE_count_parallel_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned checkMax,
138285 -                                     unsigned *const workSpace)
138287 -       const BYTE *ip = (const BYTE *)source;
138288 -       const BYTE *const iend = ip + sourceSize;
138289 -       unsigned maxSymbolValue = *maxSymbolValuePtr;
138290 -       unsigned max = 0;
138291 -       U32 *const Counting1 = workSpace;
138292 -       U32 *const Counting2 = Counting1 + 256;
138293 -       U32 *const Counting3 = Counting2 + 256;
138294 -       U32 *const Counting4 = Counting3 + 256;
138296 -       memset(Counting1, 0, 4 * 256 * sizeof(unsigned));
138298 -       /* safety checks */
138299 -       if (!sourceSize) {
138300 -               memset(count, 0, maxSymbolValue + 1);
138301 -               *maxSymbolValuePtr = 0;
138302 -               return 0;
138303 -       }
138304 -       if (!maxSymbolValue)
138305 -               maxSymbolValue = 255; /* 0 == default */
138307 -       /* by stripes of 16 bytes */
138308 -       {
138309 -               U32 cached = ZSTD_read32(ip);
138310 -               ip += 4;
138311 -               while (ip < iend - 15) {
138312 -                       U32 c = cached;
138313 -                       cached = ZSTD_read32(ip);
138314 -                       ip += 4;
138315 -                       Counting1[(BYTE)c]++;
138316 -                       Counting2[(BYTE)(c >> 8)]++;
138317 -                       Counting3[(BYTE)(c >> 16)]++;
138318 -                       Counting4[c >> 24]++;
138319 -                       c = cached;
138320 -                       cached = ZSTD_read32(ip);
138321 -                       ip += 4;
138322 -                       Counting1[(BYTE)c]++;
138323 -                       Counting2[(BYTE)(c >> 8)]++;
138324 -                       Counting3[(BYTE)(c >> 16)]++;
138325 -                       Counting4[c >> 24]++;
138326 -                       c = cached;
138327 -                       cached = ZSTD_read32(ip);
138328 -                       ip += 4;
138329 -                       Counting1[(BYTE)c]++;
138330 -                       Counting2[(BYTE)(c >> 8)]++;
138331 -                       Counting3[(BYTE)(c >> 16)]++;
138332 -                       Counting4[c >> 24]++;
138333 -                       c = cached;
138334 -                       cached = ZSTD_read32(ip);
138335 -                       ip += 4;
138336 -                       Counting1[(BYTE)c]++;
138337 -                       Counting2[(BYTE)(c >> 8)]++;
138338 -                       Counting3[(BYTE)(c >> 16)]++;
138339 -                       Counting4[c >> 24]++;
138340 -               }
138341 -               ip -= 4;
138342 -       }
138344 -       /* finish last symbols */
138345 -       while (ip < iend)
138346 -               Counting1[*ip++]++;
138348 -       if (checkMax) { /* verify stats will fit into destination table */
138349 -               U32 s;
138350 -               for (s = 255; s > maxSymbolValue; s--) {
138351 -                       Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
138352 -                       if (Counting1[s])
138353 -                               return ERROR(maxSymbolValue_tooSmall);
138354 -               }
138355 -       }
138357 -       {
138358 -               U32 s;
138359 -               for (s = 0; s <= maxSymbolValue; s++) {
138360 -                       count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s];
138361 -                       if (count[s] > max)
138362 -                               max = count[s];
138363 -               }
138364 -       }
138366 -       while (!count[maxSymbolValue])
138367 -               maxSymbolValue--;
138368 -       *maxSymbolValuePtr = maxSymbolValue;
138369 -       return (size_t)max;
138372 -/* FSE_countFast_wksp() :
138373 - * Same as FSE_countFast(), but using an externally provided scratch buffer.
138374 - * `workSpace` size must be table of >= `1024` unsigned */
138375 -size_t FSE_countFast_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned *workSpace)
138377 -       if (sourceSize < 1500)
138378 -               return FSE_count_simple(count, maxSymbolValuePtr, source, sourceSize);
138379 -       return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 0, workSpace);
138382 -/* FSE_count_wksp() :
138383 - * Same as FSE_count(), but using an externally provided scratch buffer.
138384 - * `workSpace` size must be table of >= `1024` unsigned */
138385 -size_t FSE_count_wksp(unsigned *count, unsigned *maxSymbolValuePtr, const void *source, size_t sourceSize, unsigned *workSpace)
138387 -       if (*maxSymbolValuePtr < 255)
138388 -               return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 1, workSpace);
138389 -       *maxSymbolValuePtr = 255;
138390 -       return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace);
138393 -/*-**************************************************************
138394 -*  FSE Compression Code
138395 -****************************************************************/
138396 -/*! FSE_sizeof_CTable() :
138397 -       FSE_CTable is a variable size structure which contains :
138398 -       `U16 tableLog;`
138399 -       `U16 maxSymbolValue;`
138400 -       `U16 nextStateNumber[1 << tableLog];`                         // This size is variable
138401 -       `FSE_symbolCompressionTransform symbolTT[maxSymbolValue+1];`  // This size is variable
138402 -Allocation is manual (C standard does not support variable-size structures).
138404 -size_t FSE_sizeof_CTable(unsigned maxSymbolValue, unsigned tableLog)
138406 -       if (tableLog > FSE_MAX_TABLELOG)
138407 -               return ERROR(tableLog_tooLarge);
138408 -       return FSE_CTABLE_SIZE_U32(tableLog, maxSymbolValue) * sizeof(U32);
138411 -/* provides the minimum logSize to safely represent a distribution */
138412 -static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
138414 -       U32 minBitsSrc = BIT_highbit32((U32)(srcSize - 1)) + 1;
138415 -       U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
138416 -       U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
138417 -       return minBits;
138420 -unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
138422 -       U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;
138423 -       U32 tableLog = maxTableLog;
138424 -       U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
138425 -       if (tableLog == 0)
138426 -               tableLog = FSE_DEFAULT_TABLELOG;
138427 -       if (maxBitsSrc < tableLog)
138428 -               tableLog = maxBitsSrc; /* Accuracy can be reduced */
138429 -       if (minBits > tableLog)
138430 -               tableLog = minBits; /* Need a minimum to safely represent all symbol values */
138431 -       if (tableLog < FSE_MIN_TABLELOG)
138432 -               tableLog = FSE_MIN_TABLELOG;
138433 -       if (tableLog > FSE_MAX_TABLELOG)
138434 -               tableLog = FSE_MAX_TABLELOG;
138435 -       return tableLog;
138438 -unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
138440 -       return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
138443 -/* Secondary normalization method.
138444 -   To be used when primary method fails. */
138446 -static size_t FSE_normalizeM2(short *norm, U32 tableLog, const unsigned *count, size_t total, U32 maxSymbolValue)
138448 -       short const NOT_YET_ASSIGNED = -2;
138449 -       U32 s;
138450 -       U32 distributed = 0;
138451 -       U32 ToDistribute;
138453 -       /* Init */
138454 -       U32 const lowThreshold = (U32)(total >> tableLog);
138455 -       U32 lowOne = (U32)((total * 3) >> (tableLog + 1));
138457 -       for (s = 0; s <= maxSymbolValue; s++) {
138458 -               if (count[s] == 0) {
138459 -                       norm[s] = 0;
138460 -                       continue;
138461 -               }
138462 -               if (count[s] <= lowThreshold) {
138463 -                       norm[s] = -1;
138464 -                       distributed++;
138465 -                       total -= count[s];
138466 -                       continue;
138467 -               }
138468 -               if (count[s] <= lowOne) {
138469 -                       norm[s] = 1;
138470 -                       distributed++;
138471 -                       total -= count[s];
138472 -                       continue;
138473 -               }
138475 -               norm[s] = NOT_YET_ASSIGNED;
138476 -       }
138477 -       ToDistribute = (1 << tableLog) - distributed;
138479 -       if ((total / ToDistribute) > lowOne) {
138480 -               /* risk of rounding to zero */
138481 -               lowOne = (U32)((total * 3) / (ToDistribute * 2));
138482 -               for (s = 0; s <= maxSymbolValue; s++) {
138483 -                       if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) {
138484 -                               norm[s] = 1;
138485 -                               distributed++;
138486 -                               total -= count[s];
138487 -                               continue;
138488 -                       }
138489 -               }
138490 -               ToDistribute = (1 << tableLog) - distributed;
138491 -       }
138493 -       if (distributed == maxSymbolValue + 1) {
138494 -               /* all values are pretty poor;
138495 -                  probably incompressible data (should have already been detected);
138496 -                  find max, then give all remaining points to max */
138497 -               U32 maxV = 0, maxC = 0;
138498 -               for (s = 0; s <= maxSymbolValue; s++)
138499 -                       if (count[s] > maxC)
138500 -                               maxV = s, maxC = count[s];
138501 -               norm[maxV] += (short)ToDistribute;
138502 -               return 0;
138503 -       }
138505 -       if (total == 0) {
138506 -               /* all of the symbols were low enough for the lowOne or lowThreshold */
138507 -               for (s = 0; ToDistribute > 0; s = (s + 1) % (maxSymbolValue + 1))
138508 -                       if (norm[s] > 0)
138509 -                               ToDistribute--, norm[s]++;
138510 -               return 0;
138511 -       }
138513 -       {
138514 -               U64 const vStepLog = 62 - tableLog;
138515 -               U64 const mid = (1ULL << (vStepLog - 1)) - 1;
138516 -               U64 const rStep = div_u64((((U64)1 << vStepLog) * ToDistribute) + mid, (U32)total); /* scale on remaining */
138517 -               U64 tmpTotal = mid;
138518 -               for (s = 0; s <= maxSymbolValue; s++) {
138519 -                       if (norm[s] == NOT_YET_ASSIGNED) {
138520 -                               U64 const end = tmpTotal + (count[s] * rStep);
138521 -                               U32 const sStart = (U32)(tmpTotal >> vStepLog);
138522 -                               U32 const sEnd = (U32)(end >> vStepLog);
138523 -                               U32 const weight = sEnd - sStart;
138524 -                               if (weight < 1)
138525 -                                       return ERROR(GENERIC);
138526 -                               norm[s] = (short)weight;
138527 -                               tmpTotal = end;
138528 -                       }
138529 -               }
138530 -       }
138532 -       return 0;
138535 -size_t FSE_normalizeCount(short *normalizedCounter, unsigned tableLog, const unsigned *count, size_t total, unsigned maxSymbolValue)
138537 -       /* Sanity checks */
138538 -       if (tableLog == 0)
138539 -               tableLog = FSE_DEFAULT_TABLELOG;
138540 -       if (tableLog < FSE_MIN_TABLELOG)
138541 -               return ERROR(GENERIC); /* Unsupported size */
138542 -       if (tableLog > FSE_MAX_TABLELOG)
138543 -               return ERROR(tableLog_tooLarge); /* Unsupported size */
138544 -       if (tableLog < FSE_minTableLog(total, maxSymbolValue))
138545 -               return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */
138547 -       {
138548 -               U32 const rtbTable[] = {0, 473195, 504333, 520860, 550000, 700000, 750000, 830000};
138549 -               U64 const scale = 62 - tableLog;
138550 -               U64 const step = div_u64((U64)1 << 62, (U32)total); /* <== here, one division ! */
138551 -               U64 const vStep = 1ULL << (scale - 20);
138552 -               int stillToDistribute = 1 << tableLog;
138553 -               unsigned s;
138554 -               unsigned largest = 0;
138555 -               short largestP = 0;
138556 -               U32 lowThreshold = (U32)(total >> tableLog);
138558 -               for (s = 0; s <= maxSymbolValue; s++) {
138559 -                       if (count[s] == total)
138560 -                               return 0; /* rle special case */
138561 -                       if (count[s] == 0) {
138562 -                               normalizedCounter[s] = 0;
138563 -                               continue;
138564 -                       }
138565 -                       if (count[s] <= lowThreshold) {
138566 -                               normalizedCounter[s] = -1;
138567 -                               stillToDistribute--;
138568 -                       } else {
138569 -                               short proba = (short)((count[s] * step) >> scale);
138570 -                               if (proba < 8) {
138571 -                                       U64 restToBeat = vStep * rtbTable[proba];
138572 -                                       proba += (count[s] * step) - ((U64)proba << scale) > restToBeat;
138573 -                               }
138574 -                               if (proba > largestP)
138575 -                                       largestP = proba, largest = s;
138576 -                               normalizedCounter[s] = proba;
138577 -                               stillToDistribute -= proba;
138578 -                       }
138579 -               }
138580 -               if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
138581 -                       /* corner case, need another normalization method */
138582 -                       size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue);
138583 -                       if (FSE_isError(errorCode))
138584 -                               return errorCode;
138585 -               } else
138586 -                       normalizedCounter[largest] += (short)stillToDistribute;
138587 -       }
138589 -       return tableLog;
138592 -/* fake FSE_CTable, for raw (uncompressed) input */
138593 -size_t FSE_buildCTable_raw(FSE_CTable *ct, unsigned nbBits)
138595 -       const unsigned tableSize = 1 << nbBits;
138596 -       const unsigned tableMask = tableSize - 1;
138597 -       const unsigned maxSymbolValue = tableMask;
138598 -       void *const ptr = ct;
138599 -       U16 *const tableU16 = ((U16 *)ptr) + 2;
138600 -       void *const FSCT = ((U32 *)ptr) + 1 /* header */ + (tableSize >> 1); /* assumption : tableLog >= 1 */
138601 -       FSE_symbolCompressionTransform *const symbolTT = (FSE_symbolCompressionTransform *)(FSCT);
138602 -       unsigned s;
138604 -       /* Sanity checks */
138605 -       if (nbBits < 1)
138606 -               return ERROR(GENERIC); /* min size */
138608 -       /* header */
138609 -       tableU16[-2] = (U16)nbBits;
138610 -       tableU16[-1] = (U16)maxSymbolValue;
138612 -       /* Build table */
138613 -       for (s = 0; s < tableSize; s++)
138614 -               tableU16[s] = (U16)(tableSize + s);
138616 -       /* Build Symbol Transformation Table */
138617 -       {
138618 -               const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);
138619 -               for (s = 0; s <= maxSymbolValue; s++) {
138620 -                       symbolTT[s].deltaNbBits = deltaNbBits;
138621 -                       symbolTT[s].deltaFindState = s - 1;
138622 -               }
138623 -       }
138625 -       return 0;
138628 -/* fake FSE_CTable, for rle input (always same symbol) */
138629 -size_t FSE_buildCTable_rle(FSE_CTable *ct, BYTE symbolValue)
138631 -       void *ptr = ct;
138632 -       U16 *tableU16 = ((U16 *)ptr) + 2;
138633 -       void *FSCTptr = (U32 *)ptr + 2;
138634 -       FSE_symbolCompressionTransform *symbolTT = (FSE_symbolCompressionTransform *)FSCTptr;
138636 -       /* header */
138637 -       tableU16[-2] = (U16)0;
138638 -       tableU16[-1] = (U16)symbolValue;
138640 -       /* Build table */
138641 -       tableU16[0] = 0;
138642 -       tableU16[1] = 0; /* just in case */
138644 -       /* Build Symbol Transformation Table */
138645 -       symbolTT[symbolValue].deltaNbBits = 0;
138646 -       symbolTT[symbolValue].deltaFindState = 0;
138648 -       return 0;
138651 -static size_t FSE_compress_usingCTable_generic(void *dst, size_t dstSize, const void *src, size_t srcSize, const FSE_CTable *ct, const unsigned fast)
138653 -       const BYTE *const istart = (const BYTE *)src;
138654 -       const BYTE *const iend = istart + srcSize;
138655 -       const BYTE *ip = iend;
138657 -       BIT_CStream_t bitC;
138658 -       FSE_CState_t CState1, CState2;
138660 -       /* init */
138661 -       if (srcSize <= 2)
138662 -               return 0;
138663 -       {
138664 -               size_t const initError = BIT_initCStream(&bitC, dst, dstSize);
138665 -               if (FSE_isError(initError))
138666 -                       return 0; /* not enough space available to write a bitstream */
138667 -       }
138669 -#define FSE_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
138671 -       if (srcSize & 1) {
138672 -               FSE_initCState2(&CState1, ct, *--ip);
138673 -               FSE_initCState2(&CState2, ct, *--ip);
138674 -               FSE_encodeSymbol(&bitC, &CState1, *--ip);
138675 -               FSE_FLUSHBITS(&bitC);
138676 -       } else {
138677 -               FSE_initCState2(&CState2, ct, *--ip);
138678 -               FSE_initCState2(&CState1, ct, *--ip);
138679 -       }
138681 -       /* join to mod 4 */
138682 -       srcSize -= 2;
138683 -       if ((sizeof(bitC.bitContainer) * 8 > FSE_MAX_TABLELOG * 4 + 7) && (srcSize & 2)) { /* test bit 2 */
138684 -               FSE_encodeSymbol(&bitC, &CState2, *--ip);
138685 -               FSE_encodeSymbol(&bitC, &CState1, *--ip);
138686 -               FSE_FLUSHBITS(&bitC);
138687 -       }
138689 -       /* 2 or 4 encoding per loop */
138690 -       while (ip > istart) {
138692 -               FSE_encodeSymbol(&bitC, &CState2, *--ip);
138694 -               if (sizeof(bitC.bitContainer) * 8 < FSE_MAX_TABLELOG * 2 + 7) /* this test must be static */
138695 -                       FSE_FLUSHBITS(&bitC);
138697 -               FSE_encodeSymbol(&bitC, &CState1, *--ip);
138699 -               if (sizeof(bitC.bitContainer) * 8 > FSE_MAX_TABLELOG * 4 + 7) { /* this test must be static */
138700 -                       FSE_encodeSymbol(&bitC, &CState2, *--ip);
138701 -                       FSE_encodeSymbol(&bitC, &CState1, *--ip);
138702 -               }
138704 -               FSE_FLUSHBITS(&bitC);
138705 -       }
138707 -       FSE_flushCState(&bitC, &CState2);
138708 -       FSE_flushCState(&bitC, &CState1);
138709 -       return BIT_closeCStream(&bitC);
138712 -size_t FSE_compress_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const FSE_CTable *ct)
138714 -       unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize));
138716 -       if (fast)
138717 -               return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);
138718 -       else
138719 -               return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0);
138722 -size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
138723 diff --git a/lib/zstd/fse_decompress.c b/lib/zstd/fse_decompress.c
138724 deleted file mode 100644
138725 index 0b353530fb3f..000000000000
138726 --- a/lib/zstd/fse_decompress.c
138727 +++ /dev/null
138728 @@ -1,325 +0,0 @@
138730 - * FSE : Finite State Entropy decoder
138731 - * Copyright (C) 2013-2015, Yann Collet.
138733 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
138735 - * Redistribution and use in source and binary forms, with or without
138736 - * modification, are permitted provided that the following conditions are
138737 - * met:
138739 - *   * Redistributions of source code must retain the above copyright
138740 - * notice, this list of conditions and the following disclaimer.
138741 - *   * Redistributions in binary form must reproduce the above
138742 - * copyright notice, this list of conditions and the following disclaimer
138743 - * in the documentation and/or other materials provided with the
138744 - * distribution.
138746 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
138747 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
138748 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
138749 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
138750 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
138751 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
138752 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
138753 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
138754 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
138755 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
138756 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
138758 - * This program is free software; you can redistribute it and/or modify it under
138759 - * the terms of the GNU General Public License version 2 as published by the
138760 - * Free Software Foundation. This program is dual-licensed; you may select
138761 - * either version 2 of the GNU General Public License ("GPL") or BSD license
138762 - * ("BSD").
138764 - * You can contact the author at :
138765 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
138766 - */
138768 -/* **************************************************************
138769 -*  Compiler specifics
138770 -****************************************************************/
138771 -#define FORCE_INLINE static __always_inline
138773 -/* **************************************************************
138774 -*  Includes
138775 -****************************************************************/
138776 -#include "bitstream.h"
138777 -#include "fse.h"
138778 -#include "zstd_internal.h"
138779 -#include <linux/compiler.h>
138780 -#include <linux/kernel.h>
138781 -#include <linux/string.h> /* memcpy, memset */
138783 -/* **************************************************************
138784 -*  Error Management
138785 -****************************************************************/
138786 -#define FSE_isError ERR_isError
138787 -#define FSE_STATIC_ASSERT(c)                                   \
138788 -       {                                                      \
138789 -               enum { FSE_static_assert = 1 / (int)(!!(c)) }; \
138790 -       } /* use only *after* variable declarations */
138792 -/* **************************************************************
138793 -*  Templates
138794 -****************************************************************/
138796 -  designed to be included
138797 -  for type-specific functions (template emulation in C)
138798 -  Objective is to write these functions only once, for improved maintenance
138801 -/* safety checks */
138802 -#ifndef FSE_FUNCTION_EXTENSION
138803 -#error "FSE_FUNCTION_EXTENSION must be defined"
138804 -#endif
138805 -#ifndef FSE_FUNCTION_TYPE
138806 -#error "FSE_FUNCTION_TYPE must be defined"
138807 -#endif
138809 -/* Function names */
138810 -#define FSE_CAT(X, Y) X##Y
138811 -#define FSE_FUNCTION_NAME(X, Y) FSE_CAT(X, Y)
138812 -#define FSE_TYPE_NAME(X, Y) FSE_CAT(X, Y)
138814 -/* Function templates */
138816 -size_t FSE_buildDTable_wksp(FSE_DTable *dt, const short *normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void *workspace, size_t workspaceSize)
138818 -       void *const tdPtr = dt + 1; /* because *dt is unsigned, 32-bits aligned on 32-bits */
138819 -       FSE_DECODE_TYPE *const tableDecode = (FSE_DECODE_TYPE *)(tdPtr);
138820 -       U16 *symbolNext = (U16 *)workspace;
138822 -       U32 const maxSV1 = maxSymbolValue + 1;
138823 -       U32 const tableSize = 1 << tableLog;
138824 -       U32 highThreshold = tableSize - 1;
138826 -       /* Sanity Checks */
138827 -       if (workspaceSize < sizeof(U16) * (FSE_MAX_SYMBOL_VALUE + 1))
138828 -               return ERROR(tableLog_tooLarge);
138829 -       if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE)
138830 -               return ERROR(maxSymbolValue_tooLarge);
138831 -       if (tableLog > FSE_MAX_TABLELOG)
138832 -               return ERROR(tableLog_tooLarge);
138834 -       /* Init, lay down lowprob symbols */
138835 -       {
138836 -               FSE_DTableHeader DTableH;
138837 -               DTableH.tableLog = (U16)tableLog;
138838 -               DTableH.fastMode = 1;
138839 -               {
138840 -                       S16 const largeLimit = (S16)(1 << (tableLog - 1));
138841 -                       U32 s;
138842 -                       for (s = 0; s < maxSV1; s++) {
138843 -                               if (normalizedCounter[s] == -1) {
138844 -                                       tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
138845 -                                       symbolNext[s] = 1;
138846 -                               } else {
138847 -                                       if (normalizedCounter[s] >= largeLimit)
138848 -                                               DTableH.fastMode = 0;
138849 -                                       symbolNext[s] = normalizedCounter[s];
138850 -                               }
138851 -                       }
138852 -               }
138853 -               memcpy(dt, &DTableH, sizeof(DTableH));
138854 -       }
138856 -       /* Spread symbols */
138857 -       {
138858 -               U32 const tableMask = tableSize - 1;
138859 -               U32 const step = FSE_TABLESTEP(tableSize);
138860 -               U32 s, position = 0;
138861 -               for (s = 0; s < maxSV1; s++) {
138862 -                       int i;
138863 -                       for (i = 0; i < normalizedCounter[s]; i++) {
138864 -                               tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
138865 -                               position = (position + step) & tableMask;
138866 -                               while (position > highThreshold)
138867 -                                       position = (position + step) & tableMask; /* lowprob area */
138868 -                       }
138869 -               }
138870 -               if (position != 0)
138871 -                       return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
138872 -       }
138874 -       /* Build Decoding table */
138875 -       {
138876 -               U32 u;
138877 -               for (u = 0; u < tableSize; u++) {
138878 -                       FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
138879 -                       U16 nextState = symbolNext[symbol]++;
138880 -                       tableDecode[u].nbBits = (BYTE)(tableLog - BIT_highbit32((U32)nextState));
138881 -                       tableDecode[u].newState = (U16)((nextState << tableDecode[u].nbBits) - tableSize);
138882 -               }
138883 -       }
138885 -       return 0;
138888 -/*-*******************************************************
138889 -*  Decompression (Byte symbols)
138890 -*********************************************************/
138891 -size_t FSE_buildDTable_rle(FSE_DTable *dt, BYTE symbolValue)
138893 -       void *ptr = dt;
138894 -       FSE_DTableHeader *const DTableH = (FSE_DTableHeader *)ptr;
138895 -       void *dPtr = dt + 1;
138896 -       FSE_decode_t *const cell = (FSE_decode_t *)dPtr;
138898 -       DTableH->tableLog = 0;
138899 -       DTableH->fastMode = 0;
138901 -       cell->newState = 0;
138902 -       cell->symbol = symbolValue;
138903 -       cell->nbBits = 0;
138905 -       return 0;
138908 -size_t FSE_buildDTable_raw(FSE_DTable *dt, unsigned nbBits)
138910 -       void *ptr = dt;
138911 -       FSE_DTableHeader *const DTableH = (FSE_DTableHeader *)ptr;
138912 -       void *dPtr = dt + 1;
138913 -       FSE_decode_t *const dinfo = (FSE_decode_t *)dPtr;
138914 -       const unsigned tableSize = 1 << nbBits;
138915 -       const unsigned tableMask = tableSize - 1;
138916 -       const unsigned maxSV1 = tableMask + 1;
138917 -       unsigned s;
138919 -       /* Sanity checks */
138920 -       if (nbBits < 1)
138921 -               return ERROR(GENERIC); /* min size */
138923 -       /* Build Decoding Table */
138924 -       DTableH->tableLog = (U16)nbBits;
138925 -       DTableH->fastMode = 1;
138926 -       for (s = 0; s < maxSV1; s++) {
138927 -               dinfo[s].newState = 0;
138928 -               dinfo[s].symbol = (BYTE)s;
138929 -               dinfo[s].nbBits = (BYTE)nbBits;
138930 -       }
138932 -       return 0;
138935 -FORCE_INLINE size_t FSE_decompress_usingDTable_generic(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt,
138936 -                                                      const unsigned fast)
138938 -       BYTE *const ostart = (BYTE *)dst;
138939 -       BYTE *op = ostart;
138940 -       BYTE *const omax = op + maxDstSize;
138941 -       BYTE *const olimit = omax - 3;
138943 -       BIT_DStream_t bitD;
138944 -       FSE_DState_t state1;
138945 -       FSE_DState_t state2;
138947 -       /* Init */
138948 -       CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize));
138950 -       FSE_initDState(&state1, &bitD, dt);
138951 -       FSE_initDState(&state2, &bitD, dt);
138953 -#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
138955 -       /* 4 symbols per loop */
138956 -       for (; (BIT_reloadDStream(&bitD) == BIT_DStream_unfinished) & (op < olimit); op += 4) {
138957 -               op[0] = FSE_GETSYMBOL(&state1);
138959 -               if (FSE_MAX_TABLELOG * 2 + 7 > sizeof(bitD.bitContainer) * 8) /* This test must be static */
138960 -                       BIT_reloadDStream(&bitD);
138962 -               op[1] = FSE_GETSYMBOL(&state2);
138964 -               if (FSE_MAX_TABLELOG * 4 + 7 > sizeof(bitD.bitContainer) * 8) /* This test must be static */
138965 -               {
138966 -                       if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) {
138967 -                               op += 2;
138968 -                               break;
138969 -                       }
138970 -               }
138972 -               op[2] = FSE_GETSYMBOL(&state1);
138974 -               if (FSE_MAX_TABLELOG * 2 + 7 > sizeof(bitD.bitContainer) * 8) /* This test must be static */
138975 -                       BIT_reloadDStream(&bitD);
138977 -               op[3] = FSE_GETSYMBOL(&state2);
138978 -       }
138980 -       /* tail */
138981 -       /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
138982 -       while (1) {
138983 -               if (op > (omax - 2))
138984 -                       return ERROR(dstSize_tooSmall);
138985 -               *op++ = FSE_GETSYMBOL(&state1);
138986 -               if (BIT_reloadDStream(&bitD) == BIT_DStream_overflow) {
138987 -                       *op++ = FSE_GETSYMBOL(&state2);
138988 -                       break;
138989 -               }
138991 -               if (op > (omax - 2))
138992 -                       return ERROR(dstSize_tooSmall);
138993 -               *op++ = FSE_GETSYMBOL(&state2);
138994 -               if (BIT_reloadDStream(&bitD) == BIT_DStream_overflow) {
138995 -                       *op++ = FSE_GETSYMBOL(&state1);
138996 -                       break;
138997 -               }
138998 -       }
139000 -       return op - ostart;
139003 -size_t FSE_decompress_usingDTable(void *dst, size_t originalSize, const void *cSrc, size_t cSrcSize, const FSE_DTable *dt)
139005 -       const void *ptr = dt;
139006 -       const FSE_DTableHeader *DTableH = (const FSE_DTableHeader *)ptr;
139007 -       const U32 fastMode = DTableH->fastMode;
139009 -       /* select fast mode (static) */
139010 -       if (fastMode)
139011 -               return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
139012 -       return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
139015 -size_t FSE_decompress_wksp(void *dst, size_t dstCapacity, const void *cSrc, size_t cSrcSize, unsigned maxLog, void *workspace, size_t workspaceSize)
139017 -       const BYTE *const istart = (const BYTE *)cSrc;
139018 -       const BYTE *ip = istart;
139019 -       unsigned tableLog;
139020 -       unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
139021 -       size_t NCountLength;
139023 -       FSE_DTable *dt;
139024 -       short *counting;
139025 -       size_t spaceUsed32 = 0;
139027 -       FSE_STATIC_ASSERT(sizeof(FSE_DTable) == sizeof(U32));
139029 -       dt = (FSE_DTable *)((U32 *)workspace + spaceUsed32);
139030 -       spaceUsed32 += FSE_DTABLE_SIZE_U32(maxLog);
139031 -       counting = (short *)((U32 *)workspace + spaceUsed32);
139032 -       spaceUsed32 += ALIGN(sizeof(short) * (FSE_MAX_SYMBOL_VALUE + 1), sizeof(U32)) >> 2;
139034 -       if ((spaceUsed32 << 2) > workspaceSize)
139035 -               return ERROR(tableLog_tooLarge);
139036 -       workspace = (U32 *)workspace + spaceUsed32;
139037 -       workspaceSize -= (spaceUsed32 << 2);
139039 -       /* normal FSE decoding mode */
139040 -       NCountLength = FSE_readNCount(counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
139041 -       if (FSE_isError(NCountLength))
139042 -               return NCountLength;
139043 -       // if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong);   /* too small input size; supposed to be already checked in NCountLength, only remaining
139044 -       // case : NCountLength==cSrcSize */
139045 -       if (tableLog > maxLog)
139046 -               return ERROR(tableLog_tooLarge);
139047 -       ip += NCountLength;
139048 -       cSrcSize -= NCountLength;
139050 -       CHECK_F(FSE_buildDTable_wksp(dt, counting, maxSymbolValue, tableLog, workspace, workspaceSize));
139052 -       return FSE_decompress_usingDTable(dst, dstCapacity, ip, cSrcSize, dt); /* always return, even if it is an error code */
139054 diff --git a/lib/zstd/huf.h b/lib/zstd/huf.h
139055 deleted file mode 100644
139056 index 2143da28d952..000000000000
139057 --- a/lib/zstd/huf.h
139058 +++ /dev/null
139059 @@ -1,212 +0,0 @@
139061 - * Huffman coder, part of New Generation Entropy library
139062 - * header file
139063 - * Copyright (C) 2013-2016, Yann Collet.
139065 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
139067 - * Redistribution and use in source and binary forms, with or without
139068 - * modification, are permitted provided that the following conditions are
139069 - * met:
139071 - *   * Redistributions of source code must retain the above copyright
139072 - * notice, this list of conditions and the following disclaimer.
139073 - *   * Redistributions in binary form must reproduce the above
139074 - * copyright notice, this list of conditions and the following disclaimer
139075 - * in the documentation and/or other materials provided with the
139076 - * distribution.
139078 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
139079 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
139080 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
139081 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
139082 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
139083 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
139084 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
139085 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
139086 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
139087 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
139088 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
139090 - * This program is free software; you can redistribute it and/or modify it under
139091 - * the terms of the GNU General Public License version 2 as published by the
139092 - * Free Software Foundation. This program is dual-licensed; you may select
139093 - * either version 2 of the GNU General Public License ("GPL") or BSD license
139094 - * ("BSD").
139096 - * You can contact the author at :
139097 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
139098 - */
139099 -#ifndef HUF_H_298734234
139100 -#define HUF_H_298734234
139102 -/* *** Dependencies *** */
139103 -#include <linux/types.h> /* size_t */
139105 -/* ***   Tool functions *** */
139106 -#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */
139107 -size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */
139109 -/* Error Management */
139110 -unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */
139112 -/* ***   Advanced function   *** */
139114 -/** HUF_compress4X_wksp() :
139115 -*   Same as HUF_compress2(), but uses externally allocated `workSpace`, which must be a table of >= 1024 unsigned */
139116 -size_t HUF_compress4X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace,
139117 -                          size_t wkspSize); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */
139119 -/* *** Dependencies *** */
139120 -#include "mem.h" /* U32 */
139122 -/* *** Constants *** */
139123 -#define HUF_TABLELOG_MAX 12     /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
139124 -#define HUF_TABLELOG_DEFAULT 11 /* tableLog by default, when not specified */
139125 -#define HUF_SYMBOLVALUE_MAX 255
139127 -#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
139128 -#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
139129 -#error "HUF_TABLELOG_MAX is too large !"
139130 -#endif
139132 -/* ****************************************
139133 -*  Static allocation
139134 -******************************************/
139135 -/* HUF buffer bounds */
139136 -#define HUF_CTABLEBOUND 129
139137 -#define HUF_BLOCKBOUND(size) (size + (size >> 8) + 8)                   /* only true if incompressible pre-filtered with fast heuristic */
139138 -#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
139140 -/* static allocation of HUF's Compression Table */
139141 -#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
139142 -       U32 name##hb[maxSymbolValue + 1];              \
139143 -       void *name##hv = &(name##hb);                  \
139144 -       HUF_CElt *name = (HUF_CElt *)(name##hv) /* no final ; */
139146 -/* static allocation of HUF's DTable */
139147 -typedef U32 HUF_DTable;
139148 -#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1 << (maxTableLog)))
139149 -#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = {((U32)((maxTableLog)-1) * 0x01000001)}
139150 -#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = {((U32)(maxTableLog)*0x01000001)}
139152 -/* The workspace must have alignment at least 4 and be at least this large */
139153 -#define HUF_COMPRESS_WORKSPACE_SIZE (6 << 10)
139154 -#define HUF_COMPRESS_WORKSPACE_SIZE_U32 (HUF_COMPRESS_WORKSPACE_SIZE / sizeof(U32))
139156 -/* The workspace must have alignment at least 4 and be at least this large */
139157 -#define HUF_DECOMPRESS_WORKSPACE_SIZE (3 << 10)
139158 -#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
139160 -/* ****************************************
139161 -*  Advanced decompression functions
139162 -******************************************/
139163 -size_t HUF_decompress4X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize); /**< decodes RLE and uncompressed */
139164 -size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
139165 -                               size_t workspaceSize);                                                         /**< considers RLE and uncompressed as errors */
139166 -size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
139167 -                                  size_t workspaceSize); /**< single-symbol decoder */
139168 -size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
139169 -                                  size_t workspaceSize); /**< double-symbols decoder */
139171 -/* ****************************************
139172 -*  HUF detailed API
139173 -******************************************/
139175 -HUF_compress() does the following:
139176 -1. count symbol occurrence from source[] into table count[] using FSE_count()
139177 -2. (optional) refine tableLog using HUF_optimalTableLog()
139178 -3. build Huffman table from count using HUF_buildCTable()
139179 -4. save Huffman table to memory buffer using HUF_writeCTable_wksp()
139180 -5. encode the data stream using HUF_compress4X_usingCTable()
139182 -The following API allows targeting specific sub-functions for advanced tasks.
139183 -For example, it's possible to compress several blocks using the same 'CTable',
139184 -or to save and regenerate 'CTable' using external methods.
139186 -/* FSE_count() : find it within "fse.h" */
139187 -unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
139188 -typedef struct HUF_CElt_s HUF_CElt; /* incomplete type */
139189 -size_t HUF_writeCTable_wksp(void *dst, size_t maxDstSize, const HUF_CElt *CTable, unsigned maxSymbolValue, unsigned huffLog, void *workspace, size_t workspaceSize);
139190 -size_t HUF_compress4X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable);
139192 -typedef enum {
139193 -       HUF_repeat_none,  /**< Cannot use the previous table */
139194 -       HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1,
139195 -                            4}X_repeat */
139196 -       HUF_repeat_valid  /**< Can use the previous table and it is asumed to be valid */
139197 -} HUF_repeat;
139198 -/** HUF_compress4X_repeat() :
139199 -*   Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
139200 -*   If it uses hufTable it does not modify hufTable or repeat.
139201 -*   If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
139202 -*   If preferRepeat then the old table will always be used if valid. */
139203 -size_t HUF_compress4X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace,
139204 -                            size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat,
139205 -                            int preferRepeat); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */
139207 -/** HUF_buildCTable_wksp() :
139208 - *  Same as HUF_buildCTable(), but using externally allocated scratch buffer.
139209 - *  `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned.
139210 - */
139211 -size_t HUF_buildCTable_wksp(HUF_CElt *tree, const U32 *count, U32 maxSymbolValue, U32 maxNbBits, void *workSpace, size_t wkspSize);
139213 -/*! HUF_readStats() :
139214 -       Read compact Huffman tree, saved by HUF_writeCTable().
139215 -       `huffWeight` is destination buffer.
139216 -       @return : size read from `src` , or an error Code .
139217 -       Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */
139218 -size_t HUF_readStats_wksp(BYTE *huffWeight, size_t hwSize, U32 *rankStats, U32 *nbSymbolsPtr, U32 *tableLogPtr, const void *src, size_t srcSize,
139219 -                         void *workspace, size_t workspaceSize);
139221 -/** HUF_readCTable() :
139222 -*   Loading a CTable saved with HUF_writeCTable() */
139223 -size_t HUF_readCTable_wksp(HUF_CElt *CTable, unsigned maxSymbolValue, const void *src, size_t srcSize, void *workspace, size_t workspaceSize);
139226 -HUF_decompress() does the following:
139227 -1. select the decompression algorithm (X2, X4) based on pre-computed heuristics
139228 -2. build Huffman table from save, using HUF_readDTableXn()
139229 -3. decode 1 or 4 segments in parallel using HUF_decompressSXn_usingDTable
139232 -/** HUF_selectDecoder() :
139233 -*   Tells which decoder is likely to decode faster,
139234 -*   based on a set of pre-determined metrics.
139235 -*   @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
139236 -*   Assumption : 0 < cSrcSize < dstSize <= 128 KB */
139237 -U32 HUF_selectDecoder(size_t dstSize, size_t cSrcSize);
139239 -size_t HUF_readDTableX2_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize);
139240 -size_t HUF_readDTableX4_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize);
139242 -size_t HUF_decompress4X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
139243 -size_t HUF_decompress4X2_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
139244 -size_t HUF_decompress4X4_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
139246 -/* single stream variants */
139248 -size_t HUF_compress1X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace,
139249 -                          size_t wkspSize); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */
139250 -size_t HUF_compress1X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable);
139251 -/** HUF_compress1X_repeat() :
139252 -*   Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
139253 -*   If it uses hufTable it does not modify hufTable or repeat.
139254 -*   If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
139255 -*   If preferRepeat then the old table will always be used if valid. */
139256 -size_t HUF_compress1X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void *workSpace,
139257 -                            size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat,
139258 -                            int preferRepeat); /**< `workSpace` must be a table of at least HUF_COMPRESS_WORKSPACE_SIZE_U32 unsigned */
139260 -size_t HUF_decompress1X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize);
139261 -size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
139262 -                                  size_t workspaceSize); /**< single-symbol decoder */
139263 -size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace,
139264 -                                  size_t workspaceSize); /**< double-symbols decoder */
139266 -size_t HUF_decompress1X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize,
139267 -                                   const HUF_DTable *DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */
139268 -size_t HUF_decompress1X2_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
139269 -size_t HUF_decompress1X4_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable);
139271 -#endif /* HUF_H_298734234 */
139272 diff --git a/lib/zstd/huf_compress.c b/lib/zstd/huf_compress.c
139273 deleted file mode 100644
139274 index fd32838c185f..000000000000
139275 --- a/lib/zstd/huf_compress.c
139276 +++ /dev/null
139277 @@ -1,773 +0,0 @@
139279 - * Huffman encoder, part of New Generation Entropy library
139280 - * Copyright (C) 2013-2016, Yann Collet.
139282 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
139284 - * Redistribution and use in source and binary forms, with or without
139285 - * modification, are permitted provided that the following conditions are
139286 - * met:
139288 - *   * Redistributions of source code must retain the above copyright
139289 - * notice, this list of conditions and the following disclaimer.
139290 - *   * Redistributions in binary form must reproduce the above
139291 - * copyright notice, this list of conditions and the following disclaimer
139292 - * in the documentation and/or other materials provided with the
139293 - * distribution.
139295 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
139296 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
139297 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
139298 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
139299 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
139300 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
139301 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
139302 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
139303 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
139304 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
139305 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
139307 - * This program is free software; you can redistribute it and/or modify it under
139308 - * the terms of the GNU General Public License version 2 as published by the
139309 - * Free Software Foundation. This program is dual-licensed; you may select
139310 - * either version 2 of the GNU General Public License ("GPL") or BSD license
139311 - * ("BSD").
139313 - * You can contact the author at :
139314 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
139315 - */
139317 -/* **************************************************************
139318 -*  Includes
139319 -****************************************************************/
139320 -#include "bitstream.h"
139321 -#include "fse.h" /* header compression */
139322 -#include "huf.h"
139323 -#include <linux/kernel.h>
139324 -#include <linux/string.h> /* memcpy, memset */
139326 -/* **************************************************************
139327 -*  Error Management
139328 -****************************************************************/
139329 -#define HUF_STATIC_ASSERT(c)                                   \
139330 -       {                                                      \
139331 -               enum { HUF_static_assert = 1 / (int)(!!(c)) }; \
139332 -       } /* use only *after* variable declarations */
139333 -#define CHECK_V_F(e, f)     \
139334 -       size_t const e = f; \
139335 -       if (ERR_isError(e)) \
139336 -       return f
139337 -#define CHECK_F(f)                        \
139338 -       {                                 \
139339 -               CHECK_V_F(_var_err__, f); \
139340 -       }
139342 -/* **************************************************************
139343 -*  Utils
139344 -****************************************************************/
139345 -unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
139347 -       return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
139350 -/* *******************************************************
139351 -*  HUF : Huffman block compression
139352 -*********************************************************/
139353 -/* HUF_compressWeights() :
139354 - * Same as FSE_compress(), but dedicated to huff0's weights compression.
139355 - * The use case needs much less stack memory.
139356 - * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
139357 - */
139358 -#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
139359 -size_t HUF_compressWeights_wksp(void *dst, size_t dstSize, const void *weightTable, size_t wtSize, void *workspace, size_t workspaceSize)
139361 -       BYTE *const ostart = (BYTE *)dst;
139362 -       BYTE *op = ostart;
139363 -       BYTE *const oend = ostart + dstSize;
139365 -       U32 maxSymbolValue = HUF_TABLELOG_MAX;
139366 -       U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
139368 -       FSE_CTable *CTable;
139369 -       U32 *count;
139370 -       S16 *norm;
139371 -       size_t spaceUsed32 = 0;
139373 -       HUF_STATIC_ASSERT(sizeof(FSE_CTable) == sizeof(U32));
139375 -       CTable = (FSE_CTable *)((U32 *)workspace + spaceUsed32);
139376 -       spaceUsed32 += FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX);
139377 -       count = (U32 *)workspace + spaceUsed32;
139378 -       spaceUsed32 += HUF_TABLELOG_MAX + 1;
139379 -       norm = (S16 *)((U32 *)workspace + spaceUsed32);
139380 -       spaceUsed32 += ALIGN(sizeof(S16) * (HUF_TABLELOG_MAX + 1), sizeof(U32)) >> 2;
139382 -       if ((spaceUsed32 << 2) > workspaceSize)
139383 -               return ERROR(tableLog_tooLarge);
139384 -       workspace = (U32 *)workspace + spaceUsed32;
139385 -       workspaceSize -= (spaceUsed32 << 2);
139387 -       /* init conditions */
139388 -       if (wtSize <= 1)
139389 -               return 0; /* Not compressible */
139391 -       /* Scan input and build symbol stats */
139392 -       {
139393 -               CHECK_V_F(maxCount, FSE_count_simple(count, &maxSymbolValue, weightTable, wtSize));
139394 -               if (maxCount == wtSize)
139395 -                       return 1; /* only a single symbol in src : rle */
139396 -               if (maxCount == 1)
139397 -                       return 0; /* each symbol present maximum once => not compressible */
139398 -       }
139400 -       tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
139401 -       CHECK_F(FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue));
139403 -       /* Write table description header */
139404 -       {
139405 -               CHECK_V_F(hSize, FSE_writeNCount(op, oend - op, norm, maxSymbolValue, tableLog));
139406 -               op += hSize;
139407 -       }
139409 -       /* Compress */
139410 -       CHECK_F(FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, workspace, workspaceSize));
139411 -       {
139412 -               CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable));
139413 -               if (cSize == 0)
139414 -                       return 0; /* not enough space for compressed data */
139415 -               op += cSize;
139416 -       }
139418 -       return op - ostart;
139421 -struct HUF_CElt_s {
139422 -       U16 val;
139423 -       BYTE nbBits;
139424 -}; /* typedef'd to HUF_CElt within "huf.h" */
139426 -/*! HUF_writeCTable_wksp() :
139427 -       `CTable` : Huffman tree to save, using huf representation.
139428 -       @return : size of saved CTable */
139429 -size_t HUF_writeCTable_wksp(void *dst, size_t maxDstSize, const HUF_CElt *CTable, U32 maxSymbolValue, U32 huffLog, void *workspace, size_t workspaceSize)
139431 -       BYTE *op = (BYTE *)dst;
139432 -       U32 n;
139434 -       BYTE *bitsToWeight;
139435 -       BYTE *huffWeight;
139436 -       size_t spaceUsed32 = 0;
139438 -       bitsToWeight = (BYTE *)((U32 *)workspace + spaceUsed32);
139439 -       spaceUsed32 += ALIGN(HUF_TABLELOG_MAX + 1, sizeof(U32)) >> 2;
139440 -       huffWeight = (BYTE *)((U32 *)workspace + spaceUsed32);
139441 -       spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX, sizeof(U32)) >> 2;
139443 -       if ((spaceUsed32 << 2) > workspaceSize)
139444 -               return ERROR(tableLog_tooLarge);
139445 -       workspace = (U32 *)workspace + spaceUsed32;
139446 -       workspaceSize -= (spaceUsed32 << 2);
139448 -       /* check conditions */
139449 -       if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
139450 -               return ERROR(maxSymbolValue_tooLarge);
139452 -       /* convert to weight */
139453 -       bitsToWeight[0] = 0;
139454 -       for (n = 1; n < huffLog + 1; n++)
139455 -               bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
139456 -       for (n = 0; n < maxSymbolValue; n++)
139457 -               huffWeight[n] = bitsToWeight[CTable[n].nbBits];
139459 -       /* attempt weights compression by FSE */
139460 -       {
139461 -               CHECK_V_F(hSize, HUF_compressWeights_wksp(op + 1, maxDstSize - 1, huffWeight, maxSymbolValue, workspace, workspaceSize));
139462 -               if ((hSize > 1) & (hSize < maxSymbolValue / 2)) { /* FSE compressed */
139463 -                       op[0] = (BYTE)hSize;
139464 -                       return hSize + 1;
139465 -               }
139466 -       }
139468 -       /* write raw values as 4-bits (max : 15) */
139469 -       if (maxSymbolValue > (256 - 128))
139470 -               return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */
139471 -       if (((maxSymbolValue + 1) / 2) + 1 > maxDstSize)
139472 -               return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */
139473 -       op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue - 1));
139474 -       huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */
139475 -       for (n = 0; n < maxSymbolValue; n += 2)
139476 -               op[(n / 2) + 1] = (BYTE)((huffWeight[n] << 4) + huffWeight[n + 1]);
139477 -       return ((maxSymbolValue + 1) / 2) + 1;
139480 -size_t HUF_readCTable_wksp(HUF_CElt *CTable, U32 maxSymbolValue, const void *src, size_t srcSize, void *workspace, size_t workspaceSize)
139482 -       U32 *rankVal;
139483 -       BYTE *huffWeight;
139484 -       U32 tableLog = 0;
139485 -       U32 nbSymbols = 0;
139486 -       size_t readSize;
139487 -       size_t spaceUsed32 = 0;
139489 -       rankVal = (U32 *)workspace + spaceUsed32;
139490 -       spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1;
139491 -       huffWeight = (BYTE *)((U32 *)workspace + spaceUsed32);
139492 -       spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
139494 -       if ((spaceUsed32 << 2) > workspaceSize)
139495 -               return ERROR(tableLog_tooLarge);
139496 -       workspace = (U32 *)workspace + spaceUsed32;
139497 -       workspaceSize -= (spaceUsed32 << 2);
139499 -       /* get symbol weights */
139500 -       readSize = HUF_readStats_wksp(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize, workspace, workspaceSize);
139501 -       if (ERR_isError(readSize))
139502 -               return readSize;
139504 -       /* check result */
139505 -       if (tableLog > HUF_TABLELOG_MAX)
139506 -               return ERROR(tableLog_tooLarge);
139507 -       if (nbSymbols > maxSymbolValue + 1)
139508 -               return ERROR(maxSymbolValue_tooSmall);
139510 -       /* Prepare base value per rank */
139511 -       {
139512 -               U32 n, nextRankStart = 0;
139513 -               for (n = 1; n <= tableLog; n++) {
139514 -                       U32 curr = nextRankStart;
139515 -                       nextRankStart += (rankVal[n] << (n - 1));
139516 -                       rankVal[n] = curr;
139517 -               }
139518 -       }
139520 -       /* fill nbBits */
139521 -       {
139522 -               U32 n;
139523 -               for (n = 0; n < nbSymbols; n++) {
139524 -                       const U32 w = huffWeight[n];
139525 -                       CTable[n].nbBits = (BYTE)(tableLog + 1 - w);
139526 -               }
139527 -       }
139529 -       /* fill val */
139530 -       {
139531 -               U16 nbPerRank[HUF_TABLELOG_MAX + 2] = {0}; /* support w=0=>n=tableLog+1 */
139532 -               U16 valPerRank[HUF_TABLELOG_MAX + 2] = {0};
139533 -               {
139534 -                       U32 n;
139535 -                       for (n = 0; n < nbSymbols; n++)
139536 -                               nbPerRank[CTable[n].nbBits]++;
139537 -               }
139538 -               /* determine stating value per rank */
139539 -               valPerRank[tableLog + 1] = 0; /* for w==0 */
139540 -               {
139541 -                       U16 min = 0;
139542 -                       U32 n;
139543 -                       for (n = tableLog; n > 0; n--) { /* start at n=tablelog <-> w=1 */
139544 -                               valPerRank[n] = min;     /* get starting value within each rank */
139545 -                               min += nbPerRank[n];
139546 -                               min >>= 1;
139547 -                       }
139548 -               }
139549 -               /* assign value within rank, symbol order */
139550 -               {
139551 -                       U32 n;
139552 -                       for (n = 0; n <= maxSymbolValue; n++)
139553 -                               CTable[n].val = valPerRank[CTable[n].nbBits]++;
139554 -               }
139555 -       }
139557 -       return readSize;
139560 -typedef struct nodeElt_s {
139561 -       U32 count;
139562 -       U16 parent;
139563 -       BYTE byte;
139564 -       BYTE nbBits;
139565 -} nodeElt;
139567 -static U32 HUF_setMaxHeight(nodeElt *huffNode, U32 lastNonNull, U32 maxNbBits)
139569 -       const U32 largestBits = huffNode[lastNonNull].nbBits;
139570 -       if (largestBits <= maxNbBits)
139571 -               return largestBits; /* early exit : no elt > maxNbBits */
139573 -       /* there are several too large elements (at least >= 2) */
139574 -       {
139575 -               int totalCost = 0;
139576 -               const U32 baseCost = 1 << (largestBits - maxNbBits);
139577 -               U32 n = lastNonNull;
139579 -               while (huffNode[n].nbBits > maxNbBits) {
139580 -                       totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
139581 -                       huffNode[n].nbBits = (BYTE)maxNbBits;
139582 -                       n--;
139583 -               } /* n stops at huffNode[n].nbBits <= maxNbBits */
139584 -               while (huffNode[n].nbBits == maxNbBits)
139585 -                       n--; /* n end at index of smallest symbol using < maxNbBits */
139587 -               /* renorm totalCost */
139588 -               totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */
139590 -               /* repay normalized cost */
139591 -               {
139592 -                       U32 const noSymbol = 0xF0F0F0F0;
139593 -                       U32 rankLast[HUF_TABLELOG_MAX + 2];
139594 -                       int pos;
139596 -                       /* Get pos of last (smallest) symbol per rank */
139597 -                       memset(rankLast, 0xF0, sizeof(rankLast));
139598 -                       {
139599 -                               U32 currNbBits = maxNbBits;
139600 -                               for (pos = n; pos >= 0; pos--) {
139601 -                                       if (huffNode[pos].nbBits >= currNbBits)
139602 -                                               continue;
139603 -                                       currNbBits = huffNode[pos].nbBits; /* < maxNbBits */
139604 -                                       rankLast[maxNbBits - currNbBits] = pos;
139605 -                               }
139606 -                       }
139608 -                       while (totalCost > 0) {
139609 -                               U32 nBitsToDecrease = BIT_highbit32(totalCost) + 1;
139610 -                               for (; nBitsToDecrease > 1; nBitsToDecrease--) {
139611 -                                       U32 highPos = rankLast[nBitsToDecrease];
139612 -                                       U32 lowPos = rankLast[nBitsToDecrease - 1];
139613 -                                       if (highPos == noSymbol)
139614 -                                               continue;
139615 -                                       if (lowPos == noSymbol)
139616 -                                               break;
139617 -                                       {
139618 -                                               U32 const highTotal = huffNode[highPos].count;
139619 -                                               U32 const lowTotal = 2 * huffNode[lowPos].count;
139620 -                                               if (highTotal <= lowTotal)
139621 -                                                       break;
139622 -                                       }
139623 -                               }
139624 -                               /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
139625 -                               /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
139626 -                               while ((nBitsToDecrease <= HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
139627 -                                       nBitsToDecrease++;
139628 -                               totalCost -= 1 << (nBitsToDecrease - 1);
139629 -                               if (rankLast[nBitsToDecrease - 1] == noSymbol)
139630 -                                       rankLast[nBitsToDecrease - 1] = rankLast[nBitsToDecrease]; /* this rank is no longer empty */
139631 -                               huffNode[rankLast[nBitsToDecrease]].nbBits++;
139632 -                               if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */
139633 -                                       rankLast[nBitsToDecrease] = noSymbol;
139634 -                               else {
139635 -                                       rankLast[nBitsToDecrease]--;
139636 -                                       if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits - nBitsToDecrease)
139637 -                                               rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */
139638 -                               }
139639 -                       } /* while (totalCost > 0) */
139641 -                       while (totalCost < 0) {                /* Sometimes, cost correction overshoot */
139642 -                               if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0
139643 -                                                                 (using maxNbBits) */
139644 -                                       while (huffNode[n].nbBits == maxNbBits)
139645 -                                               n--;
139646 -                                       huffNode[n + 1].nbBits--;
139647 -                                       rankLast[1] = n + 1;
139648 -                                       totalCost++;
139649 -                                       continue;
139650 -                               }
139651 -                               huffNode[rankLast[1] + 1].nbBits--;
139652 -                               rankLast[1]++;
139653 -                               totalCost++;
139654 -                       }
139655 -               }
139656 -       } /* there are several too large elements (at least >= 2) */
139658 -       return maxNbBits;
139661 -typedef struct {
139662 -       U32 base;
139663 -       U32 curr;
139664 -} rankPos;
139666 -static void HUF_sort(nodeElt *huffNode, const U32 *count, U32 maxSymbolValue)
139668 -       rankPos rank[32];
139669 -       U32 n;
139671 -       memset(rank, 0, sizeof(rank));
139672 -       for (n = 0; n <= maxSymbolValue; n++) {
139673 -               U32 r = BIT_highbit32(count[n] + 1);
139674 -               rank[r].base++;
139675 -       }
139676 -       for (n = 30; n > 0; n--)
139677 -               rank[n - 1].base += rank[n].base;
139678 -       for (n = 0; n < 32; n++)
139679 -               rank[n].curr = rank[n].base;
139680 -       for (n = 0; n <= maxSymbolValue; n++) {
139681 -               U32 const c = count[n];
139682 -               U32 const r = BIT_highbit32(c + 1) + 1;
139683 -               U32 pos = rank[r].curr++;
139684 -               while ((pos > rank[r].base) && (c > huffNode[pos - 1].count))
139685 -                       huffNode[pos] = huffNode[pos - 1], pos--;
139686 -               huffNode[pos].count = c;
139687 -               huffNode[pos].byte = (BYTE)n;
139688 -       }
139691 -/** HUF_buildCTable_wksp() :
139692 - *  Same as HUF_buildCTable(), but using externally allocated scratch buffer.
139693 - *  `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned.
139694 - */
139695 -#define STARTNODE (HUF_SYMBOLVALUE_MAX + 1)
139696 -typedef nodeElt huffNodeTable[2 * HUF_SYMBOLVALUE_MAX + 1 + 1];
139697 -size_t HUF_buildCTable_wksp(HUF_CElt *tree, const U32 *count, U32 maxSymbolValue, U32 maxNbBits, void *workSpace, size_t wkspSize)
139699 -       nodeElt *const huffNode0 = (nodeElt *)workSpace;
139700 -       nodeElt *const huffNode = huffNode0 + 1;
139701 -       U32 n, nonNullRank;
139702 -       int lowS, lowN;
139703 -       U16 nodeNb = STARTNODE;
139704 -       U32 nodeRoot;
139706 -       /* safety checks */
139707 -       if (wkspSize < sizeof(huffNodeTable))
139708 -               return ERROR(GENERIC); /* workSpace is not large enough */
139709 -       if (maxNbBits == 0)
139710 -               maxNbBits = HUF_TABLELOG_DEFAULT;
139711 -       if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
139712 -               return ERROR(GENERIC);
139713 -       memset(huffNode0, 0, sizeof(huffNodeTable));
139715 -       /* sort, decreasing order */
139716 -       HUF_sort(huffNode, count, maxSymbolValue);
139718 -       /* init for parents */
139719 -       nonNullRank = maxSymbolValue;
139720 -       while (huffNode[nonNullRank].count == 0)
139721 -               nonNullRank--;
139722 -       lowS = nonNullRank;
139723 -       nodeRoot = nodeNb + lowS - 1;
139724 -       lowN = nodeNb;
139725 -       huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS - 1].count;
139726 -       huffNode[lowS].parent = huffNode[lowS - 1].parent = nodeNb;
139727 -       nodeNb++;
139728 -       lowS -= 2;
139729 -       for (n = nodeNb; n <= nodeRoot; n++)
139730 -               huffNode[n].count = (U32)(1U << 30);
139731 -       huffNode0[0].count = (U32)(1U << 31); /* fake entry, strong barrier */
139733 -       /* create parents */
139734 -       while (nodeNb <= nodeRoot) {
139735 -               U32 n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
139736 -               U32 n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
139737 -               huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;
139738 -               huffNode[n1].parent = huffNode[n2].parent = nodeNb;
139739 -               nodeNb++;
139740 -       }
139742 -       /* distribute weights (unlimited tree height) */
139743 -       huffNode[nodeRoot].nbBits = 0;
139744 -       for (n = nodeRoot - 1; n >= STARTNODE; n--)
139745 -               huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1;
139746 -       for (n = 0; n <= nonNullRank; n++)
139747 -               huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1;
139749 -       /* enforce maxTableLog */
139750 -       maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits);
139752 -       /* fill result into tree (val, nbBits) */
139753 -       {
139754 -               U16 nbPerRank[HUF_TABLELOG_MAX + 1] = {0};
139755 -               U16 valPerRank[HUF_TABLELOG_MAX + 1] = {0};
139756 -               if (maxNbBits > HUF_TABLELOG_MAX)
139757 -                       return ERROR(GENERIC); /* check fit into table */
139758 -               for (n = 0; n <= nonNullRank; n++)
139759 -                       nbPerRank[huffNode[n].nbBits]++;
139760 -               /* determine stating value per rank */
139761 -               {
139762 -                       U16 min = 0;
139763 -                       for (n = maxNbBits; n > 0; n--) {
139764 -                               valPerRank[n] = min; /* get starting value within each rank */
139765 -                               min += nbPerRank[n];
139766 -                               min >>= 1;
139767 -                       }
139768 -               }
139769 -               for (n = 0; n <= maxSymbolValue; n++)
139770 -                       tree[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */
139771 -               for (n = 0; n <= maxSymbolValue; n++)
139772 -                       tree[n].val = valPerRank[tree[n].nbBits]++; /* assign value within rank, symbol order */
139773 -       }
139775 -       return maxNbBits;
139778 -static size_t HUF_estimateCompressedSize(HUF_CElt *CTable, const unsigned *count, unsigned maxSymbolValue)
139780 -       size_t nbBits = 0;
139781 -       int s;
139782 -       for (s = 0; s <= (int)maxSymbolValue; ++s) {
139783 -               nbBits += CTable[s].nbBits * count[s];
139784 -       }
139785 -       return nbBits >> 3;
139788 -static int HUF_validateCTable(const HUF_CElt *CTable, const unsigned *count, unsigned maxSymbolValue)
139790 -       int bad = 0;
139791 -       int s;
139792 -       for (s = 0; s <= (int)maxSymbolValue; ++s) {
139793 -               bad |= (count[s] != 0) & (CTable[s].nbBits == 0);
139794 -       }
139795 -       return !bad;
139798 -static void HUF_encodeSymbol(BIT_CStream_t *bitCPtr, U32 symbol, const HUF_CElt *CTable)
139800 -       BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits);
139803 -size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
139805 -#define HUF_FLUSHBITS(s)  BIT_flushBits(s)
139807 -#define HUF_FLUSHBITS_1(stream)                                            \
139808 -       if (sizeof((stream)->bitContainer) * 8 < HUF_TABLELOG_MAX * 2 + 7) \
139809 -       HUF_FLUSHBITS(stream)
139811 -#define HUF_FLUSHBITS_2(stream)                                            \
139812 -       if (sizeof((stream)->bitContainer) * 8 < HUF_TABLELOG_MAX * 4 + 7) \
139813 -       HUF_FLUSHBITS(stream)
139815 -size_t HUF_compress1X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable)
139817 -       const BYTE *ip = (const BYTE *)src;
139818 -       BYTE *const ostart = (BYTE *)dst;
139819 -       BYTE *const oend = ostart + dstSize;
139820 -       BYTE *op = ostart;
139821 -       size_t n;
139822 -       BIT_CStream_t bitC;
139824 -       /* init */
139825 -       if (dstSize < 8)
139826 -               return 0; /* not enough space to compress */
139827 -       {
139828 -               size_t const initErr = BIT_initCStream(&bitC, op, oend - op);
139829 -               if (HUF_isError(initErr))
139830 -                       return 0;
139831 -       }
139833 -       n = srcSize & ~3; /* join to mod 4 */
139834 -       switch (srcSize & 3) {
139835 -       case 3: HUF_encodeSymbol(&bitC, ip[n + 2], CTable); HUF_FLUSHBITS_2(&bitC);
139836 -               fallthrough;
139837 -       case 2: HUF_encodeSymbol(&bitC, ip[n + 1], CTable); HUF_FLUSHBITS_1(&bitC);
139838 -               fallthrough;
139839 -       case 1: HUF_encodeSymbol(&bitC, ip[n + 0], CTable); HUF_FLUSHBITS(&bitC);
139840 -               fallthrough;
139841 -       case 0:
139842 -       default:;
139843 -       }
139845 -       for (; n > 0; n -= 4) { /* note : n&3==0 at this stage */
139846 -               HUF_encodeSymbol(&bitC, ip[n - 1], CTable);
139847 -               HUF_FLUSHBITS_1(&bitC);
139848 -               HUF_encodeSymbol(&bitC, ip[n - 2], CTable);
139849 -               HUF_FLUSHBITS_2(&bitC);
139850 -               HUF_encodeSymbol(&bitC, ip[n - 3], CTable);
139851 -               HUF_FLUSHBITS_1(&bitC);
139852 -               HUF_encodeSymbol(&bitC, ip[n - 4], CTable);
139853 -               HUF_FLUSHBITS(&bitC);
139854 -       }
139856 -       return BIT_closeCStream(&bitC);
139859 -size_t HUF_compress4X_usingCTable(void *dst, size_t dstSize, const void *src, size_t srcSize, const HUF_CElt *CTable)
139861 -       size_t const segmentSize = (srcSize + 3) / 4; /* first 3 segments */
139862 -       const BYTE *ip = (const BYTE *)src;
139863 -       const BYTE *const iend = ip + srcSize;
139864 -       BYTE *const ostart = (BYTE *)dst;
139865 -       BYTE *const oend = ostart + dstSize;
139866 -       BYTE *op = ostart;
139868 -       if (dstSize < 6 + 1 + 1 + 1 + 8)
139869 -               return 0; /* minimum space to compress successfully */
139870 -       if (srcSize < 12)
139871 -               return 0; /* no saving possible : too small input */
139872 -       op += 6;          /* jumpTable */
139874 -       {
139875 -               CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, segmentSize, CTable));
139876 -               if (cSize == 0)
139877 -                       return 0;
139878 -               ZSTD_writeLE16(ostart, (U16)cSize);
139879 -               op += cSize;
139880 -       }
139882 -       ip += segmentSize;
139883 -       {
139884 -               CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, segmentSize, CTable));
139885 -               if (cSize == 0)
139886 -                       return 0;
139887 -               ZSTD_writeLE16(ostart + 2, (U16)cSize);
139888 -               op += cSize;
139889 -       }
139891 -       ip += segmentSize;
139892 -       {
139893 -               CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, segmentSize, CTable));
139894 -               if (cSize == 0)
139895 -                       return 0;
139896 -               ZSTD_writeLE16(ostart + 4, (U16)cSize);
139897 -               op += cSize;
139898 -       }
139900 -       ip += segmentSize;
139901 -       {
139902 -               CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend - op, ip, iend - ip, CTable));
139903 -               if (cSize == 0)
139904 -                       return 0;
139905 -               op += cSize;
139906 -       }
139908 -       return op - ostart;
139911 -static size_t HUF_compressCTable_internal(BYTE *const ostart, BYTE *op, BYTE *const oend, const void *src, size_t srcSize, unsigned singleStream,
139912 -                                         const HUF_CElt *CTable)
139914 -       size_t const cSize =
139915 -           singleStream ? HUF_compress1X_usingCTable(op, oend - op, src, srcSize, CTable) : HUF_compress4X_usingCTable(op, oend - op, src, srcSize, CTable);
139916 -       if (HUF_isError(cSize)) {
139917 -               return cSize;
139918 -       }
139919 -       if (cSize == 0) {
139920 -               return 0;
139921 -       } /* uncompressible */
139922 -       op += cSize;
139923 -       /* check compressibility */
139924 -       if ((size_t)(op - ostart) >= srcSize - 1) {
139925 -               return 0;
139926 -       }
139927 -       return op - ostart;
139930 -/* `workSpace` must a table of at least 1024 unsigned */
139931 -static size_t HUF_compress_internal(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog,
139932 -                                   unsigned singleStream, void *workSpace, size_t wkspSize, HUF_CElt *oldHufTable, HUF_repeat *repeat, int preferRepeat)
139934 -       BYTE *const ostart = (BYTE *)dst;
139935 -       BYTE *const oend = ostart + dstSize;
139936 -       BYTE *op = ostart;
139938 -       U32 *count;
139939 -       size_t const countSize = sizeof(U32) * (HUF_SYMBOLVALUE_MAX + 1);
139940 -       HUF_CElt *CTable;
139941 -       size_t const CTableSize = sizeof(HUF_CElt) * (HUF_SYMBOLVALUE_MAX + 1);
139943 -       /* checks & inits */
139944 -       if (wkspSize < sizeof(huffNodeTable) + countSize + CTableSize)
139945 -               return ERROR(GENERIC);
139946 -       if (!srcSize)
139947 -               return 0; /* Uncompressed (note : 1 means rle, so first byte must be correct) */
139948 -       if (!dstSize)
139949 -               return 0; /* cannot fit within dst budget */
139950 -       if (srcSize > HUF_BLOCKSIZE_MAX)
139951 -               return ERROR(srcSize_wrong); /* curr block size limit */
139952 -       if (huffLog > HUF_TABLELOG_MAX)
139953 -               return ERROR(tableLog_tooLarge);
139954 -       if (!maxSymbolValue)
139955 -               maxSymbolValue = HUF_SYMBOLVALUE_MAX;
139956 -       if (!huffLog)
139957 -               huffLog = HUF_TABLELOG_DEFAULT;
139959 -       count = (U32 *)workSpace;
139960 -       workSpace = (BYTE *)workSpace + countSize;
139961 -       wkspSize -= countSize;
139962 -       CTable = (HUF_CElt *)workSpace;
139963 -       workSpace = (BYTE *)workSpace + CTableSize;
139964 -       wkspSize -= CTableSize;
139966 -       /* Heuristic : If we don't need to check the validity of the old table use the old table for small inputs */
139967 -       if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
139968 -               return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
139969 -       }
139971 -       /* Scan input and build symbol stats */
139972 -       {
139973 -               CHECK_V_F(largest, FSE_count_wksp(count, &maxSymbolValue, (const BYTE *)src, srcSize, (U32 *)workSpace));
139974 -               if (largest == srcSize) {
139975 -                       *ostart = ((const BYTE *)src)[0];
139976 -                       return 1;
139977 -               } /* single symbol, rle */
139978 -               if (largest <= (srcSize >> 7) + 1)
139979 -                       return 0; /* Fast heuristic : not compressible enough */
139980 -       }
139982 -       /* Check validity of previous table */
139983 -       if (repeat && *repeat == HUF_repeat_check && !HUF_validateCTable(oldHufTable, count, maxSymbolValue)) {
139984 -               *repeat = HUF_repeat_none;
139985 -       }
139986 -       /* Heuristic : use existing table for small inputs */
139987 -       if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
139988 -               return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
139989 -       }
139991 -       /* Build Huffman Tree */
139992 -       huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
139993 -       {
139994 -               CHECK_V_F(maxBits, HUF_buildCTable_wksp(CTable, count, maxSymbolValue, huffLog, workSpace, wkspSize));
139995 -               huffLog = (U32)maxBits;
139996 -               /* Zero the unused symbols so we can check it for validity */
139997 -               memset(CTable + maxSymbolValue + 1, 0, CTableSize - (maxSymbolValue + 1) * sizeof(HUF_CElt));
139998 -       }
140000 -       /* Write table description header */
140001 -       {
140002 -               CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, CTable, maxSymbolValue, huffLog, workSpace, wkspSize));
140003 -               /* Check if using the previous table will be beneficial */
140004 -               if (repeat && *repeat != HUF_repeat_none) {
140005 -                       size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, count, maxSymbolValue);
140006 -                       size_t const newSize = HUF_estimateCompressedSize(CTable, count, maxSymbolValue);
140007 -                       if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
140008 -                               return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
140009 -                       }
140010 -               }
140011 -               /* Use the new table */
140012 -               if (hSize + 12ul >= srcSize) {
140013 -                       return 0;
140014 -               }
140015 -               op += hSize;
140016 -               if (repeat) {
140017 -                       *repeat = HUF_repeat_none;
140018 -               }
140019 -               if (oldHufTable) {
140020 -                       memcpy(oldHufTable, CTable, CTableSize);
140021 -               } /* Save the new table */
140022 -       }
140023 -       return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, CTable);
140026 -size_t HUF_compress1X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace,
140027 -                          size_t wkspSize)
140029 -       return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, NULL, NULL, 0);
140032 -size_t HUF_compress1X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace,
140033 -                            size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat, int preferRepeat)
140035 -       return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, hufTable, repeat,
140036 -                                    preferRepeat);
140039 -size_t HUF_compress4X_wksp(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace,
140040 -                          size_t wkspSize)
140042 -       return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, NULL, NULL, 0);
140045 -size_t HUF_compress4X_repeat(void *dst, size_t dstSize, const void *src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void *workSpace,
140046 -                            size_t wkspSize, HUF_CElt *hufTable, HUF_repeat *repeat, int preferRepeat)
140048 -       return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, hufTable, repeat,
140049 -                                    preferRepeat);
140051 diff --git a/lib/zstd/huf_decompress.c b/lib/zstd/huf_decompress.c
140052 deleted file mode 100644
140053 index 6526482047dc..000000000000
140054 --- a/lib/zstd/huf_decompress.c
140055 +++ /dev/null
140056 @@ -1,960 +0,0 @@
140058 - * Huffman decoder, part of New Generation Entropy library
140059 - * Copyright (C) 2013-2016, Yann Collet.
140061 - * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
140063 - * Redistribution and use in source and binary forms, with or without
140064 - * modification, are permitted provided that the following conditions are
140065 - * met:
140067 - *   * Redistributions of source code must retain the above copyright
140068 - * notice, this list of conditions and the following disclaimer.
140069 - *   * Redistributions in binary form must reproduce the above
140070 - * copyright notice, this list of conditions and the following disclaimer
140071 - * in the documentation and/or other materials provided with the
140072 - * distribution.
140074 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
140075 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
140076 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
140077 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
140078 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
140079 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
140080 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
140081 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
140082 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
140083 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
140084 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
140086 - * This program is free software; you can redistribute it and/or modify it under
140087 - * the terms of the GNU General Public License version 2 as published by the
140088 - * Free Software Foundation. This program is dual-licensed; you may select
140089 - * either version 2 of the GNU General Public License ("GPL") or BSD license
140090 - * ("BSD").
140092 - * You can contact the author at :
140093 - * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
140094 - */
140096 -/* **************************************************************
140097 -*  Compiler specifics
140098 -****************************************************************/
140099 -#define FORCE_INLINE static __always_inline
140101 -/* **************************************************************
140102 -*  Dependencies
140103 -****************************************************************/
140104 -#include "bitstream.h" /* BIT_* */
140105 -#include "fse.h"       /* header compression */
140106 -#include "huf.h"
140107 -#include <linux/compiler.h>
140108 -#include <linux/kernel.h>
140109 -#include <linux/string.h> /* memcpy, memset */
140111 -/* **************************************************************
140112 -*  Error Management
140113 -****************************************************************/
140114 -#define HUF_STATIC_ASSERT(c)                                   \
140115 -       {                                                      \
140116 -               enum { HUF_static_assert = 1 / (int)(!!(c)) }; \
140117 -       } /* use only *after* variable declarations */
140119 -/*-***************************/
140120 -/*  generic DTableDesc       */
140121 -/*-***************************/
140123 -typedef struct {
140124 -       BYTE maxTableLog;
140125 -       BYTE tableType;
140126 -       BYTE tableLog;
140127 -       BYTE reserved;
140128 -} DTableDesc;
140130 -static DTableDesc HUF_getDTableDesc(const HUF_DTable *table)
140132 -       DTableDesc dtd;
140133 -       memcpy(&dtd, table, sizeof(dtd));
140134 -       return dtd;
140137 -/*-***************************/
140138 -/*  single-symbol decoding   */
140139 -/*-***************************/
140141 -typedef struct {
140142 -       BYTE byte;
140143 -       BYTE nbBits;
140144 -} HUF_DEltX2; /* single-symbol decoding */
140146 -size_t HUF_readDTableX2_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize)
140148 -       U32 tableLog = 0;
140149 -       U32 nbSymbols = 0;
140150 -       size_t iSize;
140151 -       void *const dtPtr = DTable + 1;
140152 -       HUF_DEltX2 *const dt = (HUF_DEltX2 *)dtPtr;
140154 -       U32 *rankVal;
140155 -       BYTE *huffWeight;
140156 -       size_t spaceUsed32 = 0;
140158 -       rankVal = (U32 *)workspace + spaceUsed32;
140159 -       spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1;
140160 -       huffWeight = (BYTE *)((U32 *)workspace + spaceUsed32);
140161 -       spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
140163 -       if ((spaceUsed32 << 2) > workspaceSize)
140164 -               return ERROR(tableLog_tooLarge);
140165 -       workspace = (U32 *)workspace + spaceUsed32;
140166 -       workspaceSize -= (spaceUsed32 << 2);
140168 -       HUF_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
140169 -       /* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
140171 -       iSize = HUF_readStats_wksp(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize, workspace, workspaceSize);
140172 -       if (HUF_isError(iSize))
140173 -               return iSize;
140175 -       /* Table header */
140176 -       {
140177 -               DTableDesc dtd = HUF_getDTableDesc(DTable);
140178 -               if (tableLog > (U32)(dtd.maxTableLog + 1))
140179 -                       return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */
140180 -               dtd.tableType = 0;
140181 -               dtd.tableLog = (BYTE)tableLog;
140182 -               memcpy(DTable, &dtd, sizeof(dtd));
140183 -       }
140185 -       /* Calculate starting value for each rank */
140186 -       {
140187 -               U32 n, nextRankStart = 0;
140188 -               for (n = 1; n < tableLog + 1; n++) {
140189 -                       U32 const curr = nextRankStart;
140190 -                       nextRankStart += (rankVal[n] << (n - 1));
140191 -                       rankVal[n] = curr;
140192 -               }
140193 -       }
140195 -       /* fill DTable */
140196 -       {
140197 -               U32 n;
140198 -               for (n = 0; n < nbSymbols; n++) {
140199 -                       U32 const w = huffWeight[n];
140200 -                       U32 const length = (1 << w) >> 1;
140201 -                       U32 u;
140202 -                       HUF_DEltX2 D;
140203 -                       D.byte = (BYTE)n;
140204 -                       D.nbBits = (BYTE)(tableLog + 1 - w);
140205 -                       for (u = rankVal[w]; u < rankVal[w] + length; u++)
140206 -                               dt[u] = D;
140207 -                       rankVal[w] += length;
140208 -               }
140209 -       }
140211 -       return iSize;
140214 -static BYTE HUF_decodeSymbolX2(BIT_DStream_t *Dstream, const HUF_DEltX2 *dt, const U32 dtLog)
140216 -       size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
140217 -       BYTE const c = dt[val].byte;
140218 -       BIT_skipBits(Dstream, dt[val].nbBits);
140219 -       return c;
140222 -#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)
140224 -#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr)         \
140225 -       if (ZSTD_64bits() || (HUF_TABLELOG_MAX <= 12)) \
140226 -       HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
140228 -#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
140229 -       if (ZSTD_64bits())                     \
140230 -       HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
140232 -FORCE_INLINE size_t HUF_decodeStreamX2(BYTE *p, BIT_DStream_t *const bitDPtr, BYTE *const pEnd, const HUF_DEltX2 *const dt, const U32 dtLog)
140234 -       BYTE *const pStart = p;
140236 -       /* up to 4 symbols at a time */
140237 -       while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd - 4)) {
140238 -               HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
140239 -               HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
140240 -               HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
140241 -               HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
140242 -       }
140244 -       /* closer to the end */
140245 -       while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd))
140246 -               HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
140248 -       /* no more data to retrieve from bitstream, hence no need to reload */
140249 -       while (p < pEnd)
140250 -               HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
140252 -       return pEnd - pStart;
140255 -static size_t HUF_decompress1X2_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
140257 -       BYTE *op = (BYTE *)dst;
140258 -       BYTE *const oend = op + dstSize;
140259 -       const void *dtPtr = DTable + 1;
140260 -       const HUF_DEltX2 *const dt = (const HUF_DEltX2 *)dtPtr;
140261 -       BIT_DStream_t bitD;
140262 -       DTableDesc const dtd = HUF_getDTableDesc(DTable);
140263 -       U32 const dtLog = dtd.tableLog;
140265 -       {
140266 -               size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);
140267 -               if (HUF_isError(errorCode))
140268 -                       return errorCode;
140269 -       }
140271 -       HUF_decodeStreamX2(op, &bitD, oend, dt, dtLog);
140273 -       /* check */
140274 -       if (!BIT_endOfDStream(&bitD))
140275 -               return ERROR(corruption_detected);
140277 -       return dstSize;
140280 -size_t HUF_decompress1X2_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
140282 -       DTableDesc dtd = HUF_getDTableDesc(DTable);
140283 -       if (dtd.tableType != 0)
140284 -               return ERROR(GENERIC);
140285 -       return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
140288 -size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable *DCtx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
140290 -       const BYTE *ip = (const BYTE *)cSrc;
140292 -       size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, workspace, workspaceSize);
140293 -       if (HUF_isError(hSize))
140294 -               return hSize;
140295 -       if (hSize >= cSrcSize)
140296 -               return ERROR(srcSize_wrong);
140297 -       ip += hSize;
140298 -       cSrcSize -= hSize;
140300 -       return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx);
140303 -static size_t HUF_decompress4X2_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
140305 -       /* Check */
140306 -       if (cSrcSize < 10)
140307 -               return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
140309 -       {
140310 -               const BYTE *const istart = (const BYTE *)cSrc;
140311 -               BYTE *const ostart = (BYTE *)dst;
140312 -               BYTE *const oend = ostart + dstSize;
140313 -               const void *const dtPtr = DTable + 1;
140314 -               const HUF_DEltX2 *const dt = (const HUF_DEltX2 *)dtPtr;
140316 -               /* Init */
140317 -               BIT_DStream_t bitD1;
140318 -               BIT_DStream_t bitD2;
140319 -               BIT_DStream_t bitD3;
140320 -               BIT_DStream_t bitD4;
140321 -               size_t const length1 = ZSTD_readLE16(istart);
140322 -               size_t const length2 = ZSTD_readLE16(istart + 2);
140323 -               size_t const length3 = ZSTD_readLE16(istart + 4);
140324 -               size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
140325 -               const BYTE *const istart1 = istart + 6; /* jumpTable */
140326 -               const BYTE *const istart2 = istart1 + length1;
140327 -               const BYTE *const istart3 = istart2 + length2;
140328 -               const BYTE *const istart4 = istart3 + length3;
140329 -               const size_t segmentSize = (dstSize + 3) / 4;
140330 -               BYTE *const opStart2 = ostart + segmentSize;
140331 -               BYTE *const opStart3 = opStart2 + segmentSize;
140332 -               BYTE *const opStart4 = opStart3 + segmentSize;
140333 -               BYTE *op1 = ostart;
140334 -               BYTE *op2 = opStart2;
140335 -               BYTE *op3 = opStart3;
140336 -               BYTE *op4 = opStart4;
140337 -               U32 endSignal;
140338 -               DTableDesc const dtd = HUF_getDTableDesc(DTable);
140339 -               U32 const dtLog = dtd.tableLog;
140341 -               if (length4 > cSrcSize)
140342 -                       return ERROR(corruption_detected); /* overflow */
140343 -               {
140344 -                       size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1);
140345 -                       if (HUF_isError(errorCode))
140346 -                               return errorCode;
140347 -               }
140348 -               {
140349 -                       size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2);
140350 -                       if (HUF_isError(errorCode))
140351 -                               return errorCode;
140352 -               }
140353 -               {
140354 -                       size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3);
140355 -                       if (HUF_isError(errorCode))
140356 -                               return errorCode;
140357 -               }
140358 -               {
140359 -                       size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4);
140360 -                       if (HUF_isError(errorCode))
140361 -                               return errorCode;
140362 -               }
140364 -               /* 16-32 symbols per loop (4-8 symbols per stream) */
140365 -               endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
140366 -               for (; (endSignal == BIT_DStream_unfinished) && (op4 < (oend - 7));) {
140367 -                       HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
140368 -                       HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
140369 -                       HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
140370 -                       HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
140371 -                       HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
140372 -                       HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
140373 -                       HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
140374 -                       HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
140375 -                       HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
140376 -                       HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
140377 -                       HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
140378 -                       HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
140379 -                       HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
140380 -                       HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
140381 -                       HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
140382 -                       HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
140383 -                       endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
140384 -               }
140386 -               /* check corruption */
140387 -               if (op1 > opStart2)
140388 -                       return ERROR(corruption_detected);
140389 -               if (op2 > opStart3)
140390 -                       return ERROR(corruption_detected);
140391 -               if (op3 > opStart4)
140392 -                       return ERROR(corruption_detected);
140393 -               /* note : op4 supposed already verified within main loop */
140395 -               /* finish bitStreams one by one */
140396 -               HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
140397 -               HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
140398 -               HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
140399 -               HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
140401 -               /* check */
140402 -               endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
140403 -               if (!endSignal)
140404 -                       return ERROR(corruption_detected);
140406 -               /* decoded size */
140407 -               return dstSize;
140408 -       }
140411 -size_t HUF_decompress4X2_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
140413 -       DTableDesc dtd = HUF_getDTableDesc(DTable);
140414 -       if (dtd.tableType != 0)
140415 -               return ERROR(GENERIC);
140416 -       return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
140419 -size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
140421 -       const BYTE *ip = (const BYTE *)cSrc;
140423 -       size_t const hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize, workspace, workspaceSize);
140424 -       if (HUF_isError(hSize))
140425 -               return hSize;
140426 -       if (hSize >= cSrcSize)
140427 -               return ERROR(srcSize_wrong);
140428 -       ip += hSize;
140429 -       cSrcSize -= hSize;
140431 -       return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx);
140434 -/* *************************/
140435 -/* double-symbols decoding */
140436 -/* *************************/
140437 -typedef struct {
140438 -       U16 sequence;
140439 -       BYTE nbBits;
140440 -       BYTE length;
140441 -} HUF_DEltX4; /* double-symbols decoding */
140443 -typedef struct {
140444 -       BYTE symbol;
140445 -       BYTE weight;
140446 -} sortedSymbol_t;
140448 -/* HUF_fillDTableX4Level2() :
140449 - * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */
140450 -static void HUF_fillDTableX4Level2(HUF_DEltX4 *DTable, U32 sizeLog, const U32 consumed, const U32 *rankValOrigin, const int minWeight,
140451 -                                  const sortedSymbol_t *sortedSymbols, const U32 sortedListSize, U32 nbBitsBaseline, U16 baseSeq)
140453 -       HUF_DEltX4 DElt;
140454 -       U32 rankVal[HUF_TABLELOG_MAX + 1];
140456 -       /* get pre-calculated rankVal */
140457 -       memcpy(rankVal, rankValOrigin, sizeof(rankVal));
140459 -       /* fill skipped values */
140460 -       if (minWeight > 1) {
140461 -               U32 i, skipSize = rankVal[minWeight];
140462 -               ZSTD_writeLE16(&(DElt.sequence), baseSeq);
140463 -               DElt.nbBits = (BYTE)(consumed);
140464 -               DElt.length = 1;
140465 -               for (i = 0; i < skipSize; i++)
140466 -                       DTable[i] = DElt;
140467 -       }
140469 -       /* fill DTable */
140470 -       {
140471 -               U32 s;
140472 -               for (s = 0; s < sortedListSize; s++) { /* note : sortedSymbols already skipped */
140473 -                       const U32 symbol = sortedSymbols[s].symbol;
140474 -                       const U32 weight = sortedSymbols[s].weight;
140475 -                       const U32 nbBits = nbBitsBaseline - weight;
140476 -                       const U32 length = 1 << (sizeLog - nbBits);
140477 -                       const U32 start = rankVal[weight];
140478 -                       U32 i = start;
140479 -                       const U32 end = start + length;
140481 -                       ZSTD_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
140482 -                       DElt.nbBits = (BYTE)(nbBits + consumed);
140483 -                       DElt.length = 2;
140484 -                       do {
140485 -                               DTable[i++] = DElt;
140486 -                       } while (i < end); /* since length >= 1 */
140488 -                       rankVal[weight] += length;
140489 -               }
140490 -       }
140493 -typedef U32 rankVal_t[HUF_TABLELOG_MAX][HUF_TABLELOG_MAX + 1];
140494 -typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1];
140496 -static void HUF_fillDTableX4(HUF_DEltX4 *DTable, const U32 targetLog, const sortedSymbol_t *sortedList, const U32 sortedListSize, const U32 *rankStart,
140497 -                            rankVal_t rankValOrigin, const U32 maxWeight, const U32 nbBitsBaseline)
140499 -       U32 rankVal[HUF_TABLELOG_MAX + 1];
140500 -       const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
140501 -       const U32 minBits = nbBitsBaseline - maxWeight;
140502 -       U32 s;
140504 -       memcpy(rankVal, rankValOrigin, sizeof(rankVal));
140506 -       /* fill DTable */
140507 -       for (s = 0; s < sortedListSize; s++) {
140508 -               const U16 symbol = sortedList[s].symbol;
140509 -               const U32 weight = sortedList[s].weight;
140510 -               const U32 nbBits = nbBitsBaseline - weight;
140511 -               const U32 start = rankVal[weight];
140512 -               const U32 length = 1 << (targetLog - nbBits);
140514 -               if (targetLog - nbBits >= minBits) { /* enough room for a second symbol */
140515 -                       U32 sortedRank;
140516 -                       int minWeight = nbBits + scaleLog;
140517 -                       if (minWeight < 1)
140518 -                               minWeight = 1;
140519 -                       sortedRank = rankStart[minWeight];
140520 -                       HUF_fillDTableX4Level2(DTable + start, targetLog - nbBits, nbBits, rankValOrigin[nbBits], minWeight, sortedList + sortedRank,
140521 -                                              sortedListSize - sortedRank, nbBitsBaseline, symbol);
140522 -               } else {
140523 -                       HUF_DEltX4 DElt;
140524 -                       ZSTD_writeLE16(&(DElt.sequence), symbol);
140525 -                       DElt.nbBits = (BYTE)(nbBits);
140526 -                       DElt.length = 1;
140527 -                       {
140528 -                               U32 const end = start + length;
140529 -                               U32 u;
140530 -                               for (u = start; u < end; u++)
140531 -                                       DTable[u] = DElt;
140532 -                       }
140533 -               }
140534 -               rankVal[weight] += length;
140535 -       }
140538 -size_t HUF_readDTableX4_wksp(HUF_DTable *DTable, const void *src, size_t srcSize, void *workspace, size_t workspaceSize)
140540 -       U32 tableLog, maxW, sizeOfSort, nbSymbols;
140541 -       DTableDesc dtd = HUF_getDTableDesc(DTable);
140542 -       U32 const maxTableLog = dtd.maxTableLog;
140543 -       size_t iSize;
140544 -       void *dtPtr = DTable + 1; /* force compiler to avoid strict-aliasing */
140545 -       HUF_DEltX4 *const dt = (HUF_DEltX4 *)dtPtr;
140546 -       U32 *rankStart;
140548 -       rankValCol_t *rankVal;
140549 -       U32 *rankStats;
140550 -       U32 *rankStart0;
140551 -       sortedSymbol_t *sortedSymbol;
140552 -       BYTE *weightList;
140553 -       size_t spaceUsed32 = 0;
140555 -       HUF_STATIC_ASSERT((sizeof(rankValCol_t) & 3) == 0);
140557 -       rankVal = (rankValCol_t *)((U32 *)workspace + spaceUsed32);
140558 -       spaceUsed32 += (sizeof(rankValCol_t) * HUF_TABLELOG_MAX) >> 2;
140559 -       rankStats = (U32 *)workspace + spaceUsed32;
140560 -       spaceUsed32 += HUF_TABLELOG_MAX + 1;
140561 -       rankStart0 = (U32 *)workspace + spaceUsed32;
140562 -       spaceUsed32 += HUF_TABLELOG_MAX + 2;
140563 -       sortedSymbol = (sortedSymbol_t *)((U32 *)workspace + spaceUsed32);
140564 -       spaceUsed32 += ALIGN(sizeof(sortedSymbol_t) * (HUF_SYMBOLVALUE_MAX + 1), sizeof(U32)) >> 2;
140565 -       weightList = (BYTE *)((U32 *)workspace + spaceUsed32);
140566 -       spaceUsed32 += ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
140568 -       if ((spaceUsed32 << 2) > workspaceSize)
140569 -               return ERROR(tableLog_tooLarge);
140570 -       workspace = (U32 *)workspace + spaceUsed32;
140571 -       workspaceSize -= (spaceUsed32 << 2);
140573 -       rankStart = rankStart0 + 1;
140574 -       memset(rankStats, 0, sizeof(U32) * (2 * HUF_TABLELOG_MAX + 2 + 1));
140576 -       HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */
140577 -       if (maxTableLog > HUF_TABLELOG_MAX)
140578 -               return ERROR(tableLog_tooLarge);
140579 -       /* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */
140581 -       iSize = HUF_readStats_wksp(weightList, HUF_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize, workspace, workspaceSize);
140582 -       if (HUF_isError(iSize))
140583 -               return iSize;
140585 -       /* check result */
140586 -       if (tableLog > maxTableLog)
140587 -               return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
140589 -       /* find maxWeight */
140590 -       for (maxW = tableLog; rankStats[maxW] == 0; maxW--) {
140591 -       } /* necessarily finds a solution before 0 */
140593 -       /* Get start index of each weight */
140594 -       {
140595 -               U32 w, nextRankStart = 0;
140596 -               for (w = 1; w < maxW + 1; w++) {
140597 -                       U32 curr = nextRankStart;
140598 -                       nextRankStart += rankStats[w];
140599 -                       rankStart[w] = curr;
140600 -               }
140601 -               rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
140602 -               sizeOfSort = nextRankStart;
140603 -       }
140605 -       /* sort symbols by weight */
140606 -       {
140607 -               U32 s;
140608 -               for (s = 0; s < nbSymbols; s++) {
140609 -                       U32 const w = weightList[s];
140610 -                       U32 const r = rankStart[w]++;
140611 -                       sortedSymbol[r].symbol = (BYTE)s;
140612 -                       sortedSymbol[r].weight = (BYTE)w;
140613 -               }
140614 -               rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
140615 -       }
140617 -       /* Build rankVal */
140618 -       {
140619 -               U32 *const rankVal0 = rankVal[0];
140620 -               {
140621 -                       int const rescale = (maxTableLog - tableLog) - 1; /* tableLog <= maxTableLog */
140622 -                       U32 nextRankVal = 0;
140623 -                       U32 w;
140624 -                       for (w = 1; w < maxW + 1; w++) {
140625 -                               U32 curr = nextRankVal;
140626 -                               nextRankVal += rankStats[w] << (w + rescale);
140627 -                               rankVal0[w] = curr;
140628 -                       }
140629 -               }
140630 -               {
140631 -                       U32 const minBits = tableLog + 1 - maxW;
140632 -                       U32 consumed;
140633 -                       for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
140634 -                               U32 *const rankValPtr = rankVal[consumed];
140635 -                               U32 w;
140636 -                               for (w = 1; w < maxW + 1; w++) {
140637 -                                       rankValPtr[w] = rankVal0[w] >> consumed;
140638 -                               }
140639 -                       }
140640 -               }
140641 -       }
140643 -       HUF_fillDTableX4(dt, maxTableLog, sortedSymbol, sizeOfSort, rankStart0, rankVal, maxW, tableLog + 1);
140645 -       dtd.tableLog = (BYTE)maxTableLog;
140646 -       dtd.tableType = 1;
140647 -       memcpy(DTable, &dtd, sizeof(dtd));
140648 -       return iSize;
140651 -static U32 HUF_decodeSymbolX4(void *op, BIT_DStream_t *DStream, const HUF_DEltX4 *dt, const U32 dtLog)
140653 -       size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
140654 -       memcpy(op, dt + val, 2);
140655 -       BIT_skipBits(DStream, dt[val].nbBits);
140656 -       return dt[val].length;
140659 -static U32 HUF_decodeLastSymbolX4(void *op, BIT_DStream_t *DStream, const HUF_DEltX4 *dt, const U32 dtLog)
140661 -       size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
140662 -       memcpy(op, dt + val, 1);
140663 -       if (dt[val].length == 1)
140664 -               BIT_skipBits(DStream, dt[val].nbBits);
140665 -       else {
140666 -               if (DStream->bitsConsumed < (sizeof(DStream->bitContainer) * 8)) {
140667 -                       BIT_skipBits(DStream, dt[val].nbBits);
140668 -                       if (DStream->bitsConsumed > (sizeof(DStream->bitContainer) * 8))
140669 -                               /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
140670 -                               DStream->bitsConsumed = (sizeof(DStream->bitContainer) * 8);
140671 -               }
140672 -       }
140673 -       return 1;
140676 -#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
140678 -#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr)         \
140679 -       if (ZSTD_64bits() || (HUF_TABLELOG_MAX <= 12)) \
140680 -       ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
140682 -#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
140683 -       if (ZSTD_64bits())                     \
140684 -       ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
140686 -FORCE_INLINE size_t HUF_decodeStreamX4(BYTE *p, BIT_DStream_t *bitDPtr, BYTE *const pEnd, const HUF_DEltX4 *const dt, const U32 dtLog)
140688 -       BYTE *const pStart = p;
140690 -       /* up to 8 symbols at a time */
140691 -       while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd - (sizeof(bitDPtr->bitContainer) - 1))) {
140692 -               HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
140693 -               HUF_DECODE_SYMBOLX4_1(p, bitDPtr);
140694 -               HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
140695 -               HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
140696 -       }
140698 -       /* closer to end : up to 2 symbols at a time */
140699 -       while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd - 2))
140700 -               HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
140702 -       while (p <= pEnd - 2)
140703 -               HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
140705 -       if (p < pEnd)
140706 -               p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
140708 -       return p - pStart;
140711 -static size_t HUF_decompress1X4_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
140713 -       BIT_DStream_t bitD;
140715 -       /* Init */
140716 -       {
140717 -               size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);
140718 -               if (HUF_isError(errorCode))
140719 -                       return errorCode;
140720 -       }
140722 -       /* decode */
140723 -       {
140724 -               BYTE *const ostart = (BYTE *)dst;
140725 -               BYTE *const oend = ostart + dstSize;
140726 -               const void *const dtPtr = DTable + 1; /* force compiler to not use strict-aliasing */
140727 -               const HUF_DEltX4 *const dt = (const HUF_DEltX4 *)dtPtr;
140728 -               DTableDesc const dtd = HUF_getDTableDesc(DTable);
140729 -               HUF_decodeStreamX4(ostart, &bitD, oend, dt, dtd.tableLog);
140730 -       }
140732 -       /* check */
140733 -       if (!BIT_endOfDStream(&bitD))
140734 -               return ERROR(corruption_detected);
140736 -       /* decoded size */
140737 -       return dstSize;
140740 -size_t HUF_decompress1X4_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
140742 -       DTableDesc dtd = HUF_getDTableDesc(DTable);
140743 -       if (dtd.tableType != 1)
140744 -               return ERROR(GENERIC);
140745 -       return HUF_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
140748 -size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable *DCtx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
140750 -       const BYTE *ip = (const BYTE *)cSrc;
140752 -       size_t const hSize = HUF_readDTableX4_wksp(DCtx, cSrc, cSrcSize, workspace, workspaceSize);
140753 -       if (HUF_isError(hSize))
140754 -               return hSize;
140755 -       if (hSize >= cSrcSize)
140756 -               return ERROR(srcSize_wrong);
140757 -       ip += hSize;
140758 -       cSrcSize -= hSize;
140760 -       return HUF_decompress1X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx);
140763 -static size_t HUF_decompress4X4_usingDTable_internal(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
140765 -       if (cSrcSize < 10)
140766 -               return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
140768 -       {
140769 -               const BYTE *const istart = (const BYTE *)cSrc;
140770 -               BYTE *const ostart = (BYTE *)dst;
140771 -               BYTE *const oend = ostart + dstSize;
140772 -               const void *const dtPtr = DTable + 1;
140773 -               const HUF_DEltX4 *const dt = (const HUF_DEltX4 *)dtPtr;
140775 -               /* Init */
140776 -               BIT_DStream_t bitD1;
140777 -               BIT_DStream_t bitD2;
140778 -               BIT_DStream_t bitD3;
140779 -               BIT_DStream_t bitD4;
140780 -               size_t const length1 = ZSTD_readLE16(istart);
140781 -               size_t const length2 = ZSTD_readLE16(istart + 2);
140782 -               size_t const length3 = ZSTD_readLE16(istart + 4);
140783 -               size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
140784 -               const BYTE *const istart1 = istart + 6; /* jumpTable */
140785 -               const BYTE *const istart2 = istart1 + length1;
140786 -               const BYTE *const istart3 = istart2 + length2;
140787 -               const BYTE *const istart4 = istart3 + length3;
140788 -               size_t const segmentSize = (dstSize + 3) / 4;
140789 -               BYTE *const opStart2 = ostart + segmentSize;
140790 -               BYTE *const opStart3 = opStart2 + segmentSize;
140791 -               BYTE *const opStart4 = opStart3 + segmentSize;
140792 -               BYTE *op1 = ostart;
140793 -               BYTE *op2 = opStart2;
140794 -               BYTE *op3 = opStart3;
140795 -               BYTE *op4 = opStart4;
140796 -               U32 endSignal;
140797 -               DTableDesc const dtd = HUF_getDTableDesc(DTable);
140798 -               U32 const dtLog = dtd.tableLog;
140800 -               if (length4 > cSrcSize)
140801 -                       return ERROR(corruption_detected); /* overflow */
140802 -               {
140803 -                       size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1);
140804 -                       if (HUF_isError(errorCode))
140805 -                               return errorCode;
140806 -               }
140807 -               {
140808 -                       size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2);
140809 -                       if (HUF_isError(errorCode))
140810 -                               return errorCode;
140811 -               }
140812 -               {
140813 -                       size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3);
140814 -                       if (HUF_isError(errorCode))
140815 -                               return errorCode;
140816 -               }
140817 -               {
140818 -                       size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4);
140819 -                       if (HUF_isError(errorCode))
140820 -                               return errorCode;
140821 -               }
140823 -               /* 16-32 symbols per loop (4-8 symbols per stream) */
140824 -               endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
140825 -               for (; (endSignal == BIT_DStream_unfinished) & (op4 < (oend - (sizeof(bitD4.bitContainer) - 1)));) {
140826 -                       HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
140827 -                       HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
140828 -                       HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
140829 -                       HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
140830 -                       HUF_DECODE_SYMBOLX4_1(op1, &bitD1);
140831 -                       HUF_DECODE_SYMBOLX4_1(op2, &bitD2);
140832 -                       HUF_DECODE_SYMBOLX4_1(op3, &bitD3);
140833 -                       HUF_DECODE_SYMBOLX4_1(op4, &bitD4);
140834 -                       HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
140835 -                       HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
140836 -                       HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
140837 -                       HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
140838 -                       HUF_DECODE_SYMBOLX4_0(op1, &bitD1);
140839 -                       HUF_DECODE_SYMBOLX4_0(op2, &bitD2);
140840 -                       HUF_DECODE_SYMBOLX4_0(op3, &bitD3);
140841 -                       HUF_DECODE_SYMBOLX4_0(op4, &bitD4);
140843 -                       endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
140844 -               }
140846 -               /* check corruption */
140847 -               if (op1 > opStart2)
140848 -                       return ERROR(corruption_detected);
140849 -               if (op2 > opStart3)
140850 -                       return ERROR(corruption_detected);
140851 -               if (op3 > opStart4)
140852 -                       return ERROR(corruption_detected);
140853 -               /* note : op4 already verified within main loop */
140855 -               /* finish bitStreams one by one */
140856 -               HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
140857 -               HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
140858 -               HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
140859 -               HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog);
140861 -               /* check */
140862 -               {
140863 -                       U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
140864 -                       if (!endCheck)
140865 -                               return ERROR(corruption_detected);
140866 -               }
140868 -               /* decoded size */
140869 -               return dstSize;
140870 -       }
140873 -size_t HUF_decompress4X4_usingDTable(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
140875 -       DTableDesc dtd = HUF_getDTableDesc(DTable);
140876 -       if (dtd.tableType != 1)
140877 -               return ERROR(GENERIC);
140878 -       return HUF_decompress4X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
140881 -size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
140883 -       const BYTE *ip = (const BYTE *)cSrc;
140885 -       size_t hSize = HUF_readDTableX4_wksp(dctx, cSrc, cSrcSize, workspace, workspaceSize);
140886 -       if (HUF_isError(hSize))
140887 -               return hSize;
140888 -       if (hSize >= cSrcSize)
140889 -               return ERROR(srcSize_wrong);
140890 -       ip += hSize;
140891 -       cSrcSize -= hSize;
140893 -       return HUF_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx);
140896 -/* ********************************/
140897 -/* Generic decompression selector */
140898 -/* ********************************/
140900 -size_t HUF_decompress1X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
140902 -       DTableDesc const dtd = HUF_getDTableDesc(DTable);
140903 -       return dtd.tableType ? HUF_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable)
140904 -                            : HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);
140907 -size_t HUF_decompress4X_usingDTable(void *dst, size_t maxDstSize, const void *cSrc, size_t cSrcSize, const HUF_DTable *DTable)
140909 -       DTableDesc const dtd = HUF_getDTableDesc(DTable);
140910 -       return dtd.tableType ? HUF_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable)
140911 -                            : HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);
140914 -typedef struct {
140915 -       U32 tableTime;
140916 -       U32 decode256Time;
140917 -} algo_time_t;
140918 -static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] = {
140919 -    /* single, double, quad */
140920 -    {{0, 0}, {1, 1}, {2, 2}},               /* Q==0 : impossible */
140921 -    {{0, 0}, {1, 1}, {2, 2}},               /* Q==1 : impossible */
140922 -    {{38, 130}, {1313, 74}, {2151, 38}},     /* Q == 2 : 12-18% */
140923 -    {{448, 128}, {1353, 74}, {2238, 41}},    /* Q == 3 : 18-25% */
140924 -    {{556, 128}, {1353, 74}, {2238, 47}},    /* Q == 4 : 25-32% */
140925 -    {{714, 128}, {1418, 74}, {2436, 53}},    /* Q == 5 : 32-38% */
140926 -    {{883, 128}, {1437, 74}, {2464, 61}},    /* Q == 6 : 38-44% */
140927 -    {{897, 128}, {1515, 75}, {2622, 68}},    /* Q == 7 : 44-50% */
140928 -    {{926, 128}, {1613, 75}, {2730, 75}},    /* Q == 8 : 50-56% */
140929 -    {{947, 128}, {1729, 77}, {3359, 77}},    /* Q == 9 : 56-62% */
140930 -    {{1107, 128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */
140931 -    {{1177, 128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */
140932 -    {{1242, 128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */
140933 -    {{1349, 128}, {2644, 106}, {5260, 106}}, /* Q ==13 : 81-87% */
140934 -    {{1455, 128}, {2422, 124}, {4174, 124}}, /* Q ==14 : 87-93% */
140935 -    {{722, 128}, {1891, 145}, {1936, 146}},  /* Q ==15 : 93-99% */
140938 -/** HUF_selectDecoder() :
140939 -*   Tells which decoder is likely to decode faster,
140940 -*   based on a set of pre-determined metrics.
140941 -*   @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
140942 -*   Assumption : 0 < cSrcSize < dstSize <= 128 KB */
140943 -U32 HUF_selectDecoder(size_t dstSize, size_t cSrcSize)
140945 -       /* decoder timing evaluation */
140946 -       U32 const Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */
140947 -       U32 const D256 = (U32)(dstSize >> 8);
140948 -       U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
140949 -       U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
140950 -       DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, for cache eviction */
140952 -       return DTime1 < DTime0;
140955 -typedef size_t (*decompressionAlgo)(void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize);
140957 -size_t HUF_decompress4X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
140959 -       /* validation checks */
140960 -       if (dstSize == 0)
140961 -               return ERROR(dstSize_tooSmall);
140962 -       if (cSrcSize > dstSize)
140963 -               return ERROR(corruption_detected); /* invalid */
140964 -       if (cSrcSize == dstSize) {
140965 -               memcpy(dst, cSrc, dstSize);
140966 -               return dstSize;
140967 -       } /* not compressed */
140968 -       if (cSrcSize == 1) {
140969 -               memset(dst, *(const BYTE *)cSrc, dstSize);
140970 -               return dstSize;
140971 -       } /* RLE */
140973 -       {
140974 -               U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
140975 -               return algoNb ? HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize)
140976 -                             : HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize);
140977 -       }
140980 -size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
140982 -       /* validation checks */
140983 -       if (dstSize == 0)
140984 -               return ERROR(dstSize_tooSmall);
140985 -       if ((cSrcSize >= dstSize) || (cSrcSize <= 1))
140986 -               return ERROR(corruption_detected); /* invalid */
140988 -       {
140989 -               U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
140990 -               return algoNb ? HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize)
140991 -                             : HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize);
140992 -       }
140995 -size_t HUF_decompress1X_DCtx_wksp(HUF_DTable *dctx, void *dst, size_t dstSize, const void *cSrc, size_t cSrcSize, void *workspace, size_t workspaceSize)
140997 -       /* validation checks */
140998 -       if (dstSize == 0)
140999 -               return ERROR(dstSize_tooSmall);
141000 -       if (cSrcSize > dstSize)
141001 -               return ERROR(corruption_detected); /* invalid */
141002 -       if (cSrcSize == dstSize) {
141003 -               memcpy(dst, cSrc, dstSize);
141004 -               return dstSize;
141005 -       } /* not compressed */
141006 -       if (cSrcSize == 1) {
141007 -               memset(dst, *(const BYTE *)cSrc, dstSize);
141008 -               return dstSize;
141009 -       } /* RLE */
141011 -       {
141012 -               U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
141013 -               return algoNb ? HUF_decompress1X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize)
141014 -                             : HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workspace, workspaceSize);
141015 -       }
141017 diff --git a/lib/zstd/mem.h b/lib/zstd/mem.h
141018 deleted file mode 100644
141019 index 93d7a2c377fe..000000000000
141020 --- a/lib/zstd/mem.h
141021 +++ /dev/null
141022 @@ -1,151 +0,0 @@
141024 - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
141025 - * All rights reserved.
141027 - * This source code is licensed under the BSD-style license found in the
141028 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
141029 - * An additional grant of patent rights can be found in the PATENTS file in the
141030 - * same directory.
141032 - * This program is free software; you can redistribute it and/or modify it under
141033 - * the terms of the GNU General Public License version 2 as published by the
141034 - * Free Software Foundation. This program is dual-licensed; you may select
141035 - * either version 2 of the GNU General Public License ("GPL") or BSD license
141036 - * ("BSD").
141037 - */
141039 -#ifndef MEM_H_MODULE
141040 -#define MEM_H_MODULE
141042 -/*-****************************************
141043 -*  Dependencies
141044 -******************************************/
141045 -#include <asm/unaligned.h>
141046 -#include <linux/string.h> /* memcpy */
141047 -#include <linux/types.h>  /* size_t, ptrdiff_t */
141049 -/*-****************************************
141050 -*  Compiler specifics
141051 -******************************************/
141052 -#define ZSTD_STATIC static inline
141054 -/*-**************************************************************
141055 -*  Basic Types
141056 -*****************************************************************/
141057 -typedef uint8_t BYTE;
141058 -typedef uint16_t U16;
141059 -typedef int16_t S16;
141060 -typedef uint32_t U32;
141061 -typedef int32_t S32;
141062 -typedef uint64_t U64;
141063 -typedef int64_t S64;
141064 -typedef ptrdiff_t iPtrDiff;
141065 -typedef uintptr_t uPtrDiff;
141067 -/*-**************************************************************
141068 -*  Memory I/O
141069 -*****************************************************************/
141070 -ZSTD_STATIC unsigned ZSTD_32bits(void) { return sizeof(size_t) == 4; }
141071 -ZSTD_STATIC unsigned ZSTD_64bits(void) { return sizeof(size_t) == 8; }
141073 -#if defined(__LITTLE_ENDIAN)
141074 -#define ZSTD_LITTLE_ENDIAN 1
141075 -#else
141076 -#define ZSTD_LITTLE_ENDIAN 0
141077 -#endif
141079 -ZSTD_STATIC unsigned ZSTD_isLittleEndian(void) { return ZSTD_LITTLE_ENDIAN; }
141081 -ZSTD_STATIC U16 ZSTD_read16(const void *memPtr) { return get_unaligned((const U16 *)memPtr); }
141083 -ZSTD_STATIC U32 ZSTD_read32(const void *memPtr) { return get_unaligned((const U32 *)memPtr); }
141085 -ZSTD_STATIC U64 ZSTD_read64(const void *memPtr) { return get_unaligned((const U64 *)memPtr); }
141087 -ZSTD_STATIC size_t ZSTD_readST(const void *memPtr) { return get_unaligned((const size_t *)memPtr); }
141089 -ZSTD_STATIC void ZSTD_write16(void *memPtr, U16 value) { put_unaligned(value, (U16 *)memPtr); }
141091 -ZSTD_STATIC void ZSTD_write32(void *memPtr, U32 value) { put_unaligned(value, (U32 *)memPtr); }
141093 -ZSTD_STATIC void ZSTD_write64(void *memPtr, U64 value) { put_unaligned(value, (U64 *)memPtr); }
141095 -/*=== Little endian r/w ===*/
141097 -ZSTD_STATIC U16 ZSTD_readLE16(const void *memPtr) { return get_unaligned_le16(memPtr); }
141099 -ZSTD_STATIC void ZSTD_writeLE16(void *memPtr, U16 val) { put_unaligned_le16(val, memPtr); }
141101 -ZSTD_STATIC U32 ZSTD_readLE24(const void *memPtr) { return ZSTD_readLE16(memPtr) + (((const BYTE *)memPtr)[2] << 16); }
141103 -ZSTD_STATIC void ZSTD_writeLE24(void *memPtr, U32 val)
141105 -       ZSTD_writeLE16(memPtr, (U16)val);
141106 -       ((BYTE *)memPtr)[2] = (BYTE)(val >> 16);
141109 -ZSTD_STATIC U32 ZSTD_readLE32(const void *memPtr) { return get_unaligned_le32(memPtr); }
141111 -ZSTD_STATIC void ZSTD_writeLE32(void *memPtr, U32 val32) { put_unaligned_le32(val32, memPtr); }
141113 -ZSTD_STATIC U64 ZSTD_readLE64(const void *memPtr) { return get_unaligned_le64(memPtr); }
141115 -ZSTD_STATIC void ZSTD_writeLE64(void *memPtr, U64 val64) { put_unaligned_le64(val64, memPtr); }
141117 -ZSTD_STATIC size_t ZSTD_readLEST(const void *memPtr)
141119 -       if (ZSTD_32bits())
141120 -               return (size_t)ZSTD_readLE32(memPtr);
141121 -       else
141122 -               return (size_t)ZSTD_readLE64(memPtr);
141125 -ZSTD_STATIC void ZSTD_writeLEST(void *memPtr, size_t val)
141127 -       if (ZSTD_32bits())
141128 -               ZSTD_writeLE32(memPtr, (U32)val);
141129 -       else
141130 -               ZSTD_writeLE64(memPtr, (U64)val);
141133 -/*=== Big endian r/w ===*/
141135 -ZSTD_STATIC U32 ZSTD_readBE32(const void *memPtr) { return get_unaligned_be32(memPtr); }
141137 -ZSTD_STATIC void ZSTD_writeBE32(void *memPtr, U32 val32) { put_unaligned_be32(val32, memPtr); }
141139 -ZSTD_STATIC U64 ZSTD_readBE64(const void *memPtr) { return get_unaligned_be64(memPtr); }
141141 -ZSTD_STATIC void ZSTD_writeBE64(void *memPtr, U64 val64) { put_unaligned_be64(val64, memPtr); }
141143 -ZSTD_STATIC size_t ZSTD_readBEST(const void *memPtr)
141145 -       if (ZSTD_32bits())
141146 -               return (size_t)ZSTD_readBE32(memPtr);
141147 -       else
141148 -               return (size_t)ZSTD_readBE64(memPtr);
141151 -ZSTD_STATIC void ZSTD_writeBEST(void *memPtr, size_t val)
141153 -       if (ZSTD_32bits())
141154 -               ZSTD_writeBE32(memPtr, (U32)val);
141155 -       else
141156 -               ZSTD_writeBE64(memPtr, (U64)val);
141159 -/* function safe only for comparisons */
141160 -ZSTD_STATIC U32 ZSTD_readMINMATCH(const void *memPtr, U32 length)
141162 -       switch (length) {
141163 -       default:
141164 -       case 4: return ZSTD_read32(memPtr);
141165 -       case 3:
141166 -               if (ZSTD_isLittleEndian())
141167 -                       return ZSTD_read32(memPtr) << 8;
141168 -               else
141169 -                       return ZSTD_read32(memPtr) >> 8;
141170 -       }
141173 -#endif /* MEM_H_MODULE */
141174 diff --git a/lib/zstd/zstd_common.c b/lib/zstd/zstd_common.c
141175 deleted file mode 100644
141176 index a282624ee155..000000000000
141177 --- a/lib/zstd/zstd_common.c
141178 +++ /dev/null
141179 @@ -1,75 +0,0 @@
141181 - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
141182 - * All rights reserved.
141184 - * This source code is licensed under the BSD-style license found in the
141185 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
141186 - * An additional grant of patent rights can be found in the PATENTS file in the
141187 - * same directory.
141189 - * This program is free software; you can redistribute it and/or modify it under
141190 - * the terms of the GNU General Public License version 2 as published by the
141191 - * Free Software Foundation. This program is dual-licensed; you may select
141192 - * either version 2 of the GNU General Public License ("GPL") or BSD license
141193 - * ("BSD").
141194 - */
141196 -/*-*************************************
141197 -*  Dependencies
141198 -***************************************/
141199 -#include "error_private.h"
141200 -#include "zstd_internal.h" /* declaration of ZSTD_isError, ZSTD_getErrorName, ZSTD_getErrorCode, ZSTD_getErrorString, ZSTD_versionNumber */
141201 -#include <linux/kernel.h>
141203 -/*=**************************************************************
141204 -*  Custom allocator
141205 -****************************************************************/
141207 -#define stack_push(stack, size)                                 \
141208 -       ({                                                      \
141209 -               void *const ptr = ZSTD_PTR_ALIGN((stack)->ptr); \
141210 -               (stack)->ptr = (char *)ptr + (size);            \
141211 -               (stack)->ptr <= (stack)->end ? ptr : NULL;      \
141212 -       })
141214 -ZSTD_customMem ZSTD_initStack(void *workspace, size_t workspaceSize)
141216 -       ZSTD_customMem stackMem = {ZSTD_stackAlloc, ZSTD_stackFree, workspace};
141217 -       ZSTD_stack *stack = (ZSTD_stack *)workspace;
141218 -       /* Verify preconditions */
141219 -       if (!workspace || workspaceSize < sizeof(ZSTD_stack) || workspace != ZSTD_PTR_ALIGN(workspace)) {
141220 -               ZSTD_customMem error = {NULL, NULL, NULL};
141221 -               return error;
141222 -       }
141223 -       /* Initialize the stack */
141224 -       stack->ptr = workspace;
141225 -       stack->end = (char *)workspace + workspaceSize;
141226 -       stack_push(stack, sizeof(ZSTD_stack));
141227 -       return stackMem;
141230 -void *ZSTD_stackAllocAll(void *opaque, size_t *size)
141232 -       ZSTD_stack *stack = (ZSTD_stack *)opaque;
141233 -       *size = (BYTE const *)stack->end - (BYTE *)ZSTD_PTR_ALIGN(stack->ptr);
141234 -       return stack_push(stack, *size);
141237 -void *ZSTD_stackAlloc(void *opaque, size_t size)
141239 -       ZSTD_stack *stack = (ZSTD_stack *)opaque;
141240 -       return stack_push(stack, size);
141242 -void ZSTD_stackFree(void *opaque, void *address)
141244 -       (void)opaque;
141245 -       (void)address;
141248 -void *ZSTD_malloc(size_t size, ZSTD_customMem customMem) { return customMem.customAlloc(customMem.opaque, size); }
141250 -void ZSTD_free(void *ptr, ZSTD_customMem customMem)
141252 -       if (ptr != NULL)
141253 -               customMem.customFree(customMem.opaque, ptr);
141255 diff --git a/lib/zstd/zstd_compress_module.c b/lib/zstd/zstd_compress_module.c
141256 new file mode 100644
141257 index 000000000000..37d08ff43e6e
141258 --- /dev/null
141259 +++ b/lib/zstd/zstd_compress_module.c
141260 @@ -0,0 +1,124 @@
141261 +// SPDX-License-Identifier: GPL-2.0-only
141263 + * Copyright (c) Facebook, Inc.
141264 + * All rights reserved.
141266 + * This source code is licensed under both the BSD-style license (found in the
141267 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
141268 + * in the COPYING file in the root directory of this source tree).
141269 + * You may select, at your option, one of the above-listed licenses.
141270 + */
141272 +#include <linux/kernel.h>
141273 +#include <linux/module.h>
141274 +#include <linux/string.h>
141275 +#include <linux/zstd.h>
141277 +#include "common/zstd_deps.h"
141278 +#include "common/zstd_internal.h"
141280 +int zstd_min_clevel(void)
141282 +       return ZSTD_minCLevel();
141284 +EXPORT_SYMBOL(zstd_min_clevel);
141286 +int zstd_max_clevel(void)
141288 +       return ZSTD_maxCLevel();
141290 +EXPORT_SYMBOL(zstd_max_clevel);
141292 +size_t zstd_compress_bound(size_t src_size)
141294 +       return ZSTD_compressBound(src_size);
141296 +EXPORT_SYMBOL(zstd_compress_bound);
141298 +zstd_parameters zstd_get_params(int level,
141299 +       unsigned long long estimated_src_size)
141301 +       return ZSTD_getParams(level, estimated_src_size, 0);
141303 +EXPORT_SYMBOL(zstd_get_params);
141305 +size_t zstd_cctx_workspace_bound(const zstd_compression_parameters *cparams)
141307 +       return ZSTD_estimateCCtxSize_usingCParams(*cparams);
141309 +EXPORT_SYMBOL(zstd_cctx_workspace_bound);
141311 +zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size)
141313 +       if (workspace == NULL)
141314 +               return NULL;
141315 +       return ZSTD_initStaticCCtx(workspace, workspace_size);
141317 +EXPORT_SYMBOL(zstd_init_cctx);
141319 +size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity,
141320 +       const void *src, size_t src_size, const zstd_parameters *parameters)
141322 +       return ZSTD_compress_advanced(cctx, dst, dst_capacity, src, src_size, NULL, 0, *parameters);
141324 +EXPORT_SYMBOL(zstd_compress_cctx);
141326 +size_t zstd_cstream_workspace_bound(const zstd_compression_parameters *cparams)
141328 +       return ZSTD_estimateCStreamSize_usingCParams(*cparams);
141330 +EXPORT_SYMBOL(zstd_cstream_workspace_bound);
141332 +zstd_cstream *zstd_init_cstream(const zstd_parameters *parameters,
141333 +       unsigned long long pledged_src_size, void *workspace, size_t workspace_size)
141335 +       zstd_cstream *cstream;
141336 +       size_t ret;
141338 +       if (workspace == NULL)
141339 +               return NULL;
141341 +       cstream = ZSTD_initStaticCStream(workspace, workspace_size);
141342 +       if (cstream == NULL)
141343 +               return NULL;
141345 +       /* 0 means unknown in linux zstd API but means 0 in new zstd API */
141346 +       if (pledged_src_size == 0)
141347 +               pledged_src_size = ZSTD_CONTENTSIZE_UNKNOWN;
141349 +       ret = ZSTD_initCStream_advanced(cstream, NULL, 0, *parameters, pledged_src_size);
141350 +       if (ZSTD_isError(ret))
141351 +               return NULL;
141353 +       return cstream;
141355 +EXPORT_SYMBOL(zstd_init_cstream);
141357 +size_t zstd_reset_cstream(zstd_cstream *cstream,
141358 +       unsigned long long pledged_src_size)
141360 +       return ZSTD_resetCStream(cstream, pledged_src_size);
141362 +EXPORT_SYMBOL(zstd_reset_cstream);
141364 +size_t zstd_compress_stream(zstd_cstream *cstream, zstd_out_buffer *output,
141365 +       zstd_in_buffer *input)
141367 +       return ZSTD_compressStream(cstream, output, input);
141369 +EXPORT_SYMBOL(zstd_compress_stream);
141371 +size_t zstd_flush_stream(zstd_cstream *cstream, zstd_out_buffer *output)
141373 +       return ZSTD_flushStream(cstream, output);
141375 +EXPORT_SYMBOL(zstd_flush_stream);
141377 +size_t zstd_end_stream(zstd_cstream *cstream, zstd_out_buffer *output)
141379 +       return ZSTD_endStream(cstream, output);
141381 +EXPORT_SYMBOL(zstd_end_stream);
141383 +MODULE_LICENSE("Dual BSD/GPL");
141384 +MODULE_DESCRIPTION("Zstd Compressor");
141385 diff --git a/lib/zstd/zstd_decompress_module.c b/lib/zstd/zstd_decompress_module.c
141386 new file mode 100644
141387 index 000000000000..15005cdb9eca
141388 --- /dev/null
141389 +++ b/lib/zstd/zstd_decompress_module.c
141390 @@ -0,0 +1,105 @@
141391 +// SPDX-License-Identifier: GPL-2.0-only
141393 + * Copyright (c) Facebook, Inc.
141394 + * All rights reserved.
141396 + * This source code is licensed under both the BSD-style license (found in the
141397 + * LICENSE file in the root directory of this source tree) and the GPLv2 (found
141398 + * in the COPYING file in the root directory of this source tree).
141399 + * You may select, at your option, one of the above-listed licenses.
141400 + */
141402 +#include <linux/kernel.h>
141403 +#include <linux/module.h>
141404 +#include <linux/string.h>
141405 +#include <linux/zstd.h>
141407 +#include "common/zstd_deps.h"
141409 +/* Common symbols. zstd_compress must depend on zstd_decompress. */
141411 +unsigned int zstd_is_error(size_t code)
141413 +       return ZSTD_isError(code);
141415 +EXPORT_SYMBOL(zstd_is_error);
141417 +zstd_error_code zstd_get_error_code(size_t code)
141419 +       return ZSTD_getErrorCode(code);
141421 +EXPORT_SYMBOL(zstd_get_error_code);
141423 +const char *zstd_get_error_name(size_t code)
141425 +       return ZSTD_getErrorName(code);
141427 +EXPORT_SYMBOL(zstd_get_error_name);
141429 +/* Decompression symbols. */
141431 +size_t zstd_dctx_workspace_bound(void)
141433 +       return ZSTD_estimateDCtxSize();
141435 +EXPORT_SYMBOL(zstd_dctx_workspace_bound);
141437 +zstd_dctx *zstd_init_dctx(void *workspace, size_t workspace_size)
141439 +       if (workspace == NULL)
141440 +               return NULL;
141441 +       return ZSTD_initStaticDCtx(workspace, workspace_size);
141443 +EXPORT_SYMBOL(zstd_init_dctx);
141445 +size_t zstd_decompress_dctx(zstd_dctx *dctx, void *dst, size_t dst_capacity,
141446 +       const void *src, size_t src_size)
141448 +       return ZSTD_decompressDCtx(dctx, dst, dst_capacity, src, src_size);
141450 +EXPORT_SYMBOL(zstd_decompress_dctx);
141452 +size_t zstd_dstream_workspace_bound(size_t max_window_size)
141454 +       return ZSTD_estimateDStreamSize(max_window_size);
141456 +EXPORT_SYMBOL(zstd_dstream_workspace_bound);
141458 +zstd_dstream *zstd_init_dstream(size_t max_window_size, void *workspace,
141459 +       size_t workspace_size)
141461 +       if (workspace == NULL)
141462 +               return NULL;
141463 +       (void)max_window_size;
141464 +       return ZSTD_initStaticDStream(workspace, workspace_size);
141466 +EXPORT_SYMBOL(zstd_init_dstream);
141468 +size_t zstd_reset_dstream(zstd_dstream *dstream)
141470 +       return ZSTD_resetDStream(dstream);
141472 +EXPORT_SYMBOL(zstd_reset_dstream);
141474 +size_t zstd_decompress_stream(zstd_dstream *dstream, zstd_out_buffer *output,
141475 +       zstd_in_buffer *input)
141477 +       return ZSTD_decompressStream(dstream, output, input);
141479 +EXPORT_SYMBOL(zstd_decompress_stream);
141481 +size_t zstd_find_frame_compressed_size(const void *src, size_t src_size)
141483 +       return ZSTD_findFrameCompressedSize(src, src_size);
141485 +EXPORT_SYMBOL(zstd_find_frame_compressed_size);
141487 +size_t zstd_get_frame_header(zstd_frame_header *header, const void *src,
141488 +       size_t src_size)
141490 +       return ZSTD_getFrameHeader(header, src, src_size);
141492 +EXPORT_SYMBOL(zstd_get_frame_header);
141494 +MODULE_LICENSE("Dual BSD/GPL");
141495 +MODULE_DESCRIPTION("Zstd Decompressor");
141496 diff --git a/lib/zstd/zstd_internal.h b/lib/zstd/zstd_internal.h
141497 deleted file mode 100644
141498 index dac753397f86..000000000000
141499 --- a/lib/zstd/zstd_internal.h
141500 +++ /dev/null
141501 @@ -1,273 +0,0 @@
141503 - * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
141504 - * All rights reserved.
141506 - * This source code is licensed under the BSD-style license found in the
141507 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
141508 - * An additional grant of patent rights can be found in the PATENTS file in the
141509 - * same directory.
141511 - * This program is free software; you can redistribute it and/or modify it under
141512 - * the terms of the GNU General Public License version 2 as published by the
141513 - * Free Software Foundation. This program is dual-licensed; you may select
141514 - * either version 2 of the GNU General Public License ("GPL") or BSD license
141515 - * ("BSD").
141516 - */
141518 -#ifndef ZSTD_CCOMMON_H_MODULE
141519 -#define ZSTD_CCOMMON_H_MODULE
141521 -/*-*******************************************************
141522 -*  Compiler specifics
141523 -*********************************************************/
141524 -#define FORCE_INLINE static __always_inline
141525 -#define FORCE_NOINLINE static noinline
141527 -/*-*************************************
141528 -*  Dependencies
141529 -***************************************/
141530 -#include "error_private.h"
141531 -#include "mem.h"
141532 -#include <linux/compiler.h>
141533 -#include <linux/kernel.h>
141534 -#include <linux/xxhash.h>
141535 -#include <linux/zstd.h>
141537 -/*-*************************************
141538 -*  shared macros
141539 -***************************************/
141540 -#define MIN(a, b) ((a) < (b) ? (a) : (b))
141541 -#define MAX(a, b) ((a) > (b) ? (a) : (b))
141542 -#define CHECK_F(f)                       \
141543 -       {                                \
141544 -               size_t const errcod = f; \
141545 -               if (ERR_isError(errcod)) \
141546 -                       return errcod;   \
141547 -       } /* check and Forward error code */
141548 -#define CHECK_E(f, e)                    \
141549 -       {                                \
141550 -               size_t const errcod = f; \
141551 -               if (ERR_isError(errcod)) \
141552 -                       return ERROR(e); \
141553 -       } /* check and send Error code */
141554 -#define ZSTD_STATIC_ASSERT(c)                                   \
141555 -       {                                                       \
141556 -               enum { ZSTD_static_assert = 1 / (int)(!!(c)) }; \
141557 -       }
141559 -/*-*************************************
141560 -*  Common constants
141561 -***************************************/
141562 -#define ZSTD_OPT_NUM (1 << 12)
141563 -#define ZSTD_DICT_MAGIC 0xEC30A437 /* v0.7+ */
141565 -#define ZSTD_REP_NUM 3               /* number of repcodes */
141566 -#define ZSTD_REP_CHECK (ZSTD_REP_NUM) /* number of repcodes to check by the optimal parser */
141567 -#define ZSTD_REP_MOVE (ZSTD_REP_NUM - 1)
141568 -#define ZSTD_REP_MOVE_OPT (ZSTD_REP_NUM)
141569 -static const U32 repStartValue[ZSTD_REP_NUM] = {1, 4, 8};
141571 -#define KB *(1 << 10)
141572 -#define MB *(1 << 20)
141573 -#define GB *(1U << 30)
141575 -#define BIT7 128
141576 -#define BIT6 64
141577 -#define BIT5 32
141578 -#define BIT4 16
141579 -#define BIT1 2
141580 -#define BIT0 1
141582 -#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
141583 -static const size_t ZSTD_fcs_fieldSize[4] = {0, 2, 4, 8};
141584 -static const size_t ZSTD_did_fieldSize[4] = {0, 1, 2, 4};
141586 -#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
141587 -static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
141588 -typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
141590 -#define MIN_SEQUENCES_SIZE 1                                                                     /* nbSeq==0 */
141591 -#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */
141593 -#define HufLog 12
141594 -typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;
141596 -#define LONGNBSEQ 0x7F00
141598 -#define MINMATCH 3
141599 -#define EQUAL_READ32 4
141601 -#define Litbits 8
141602 -#define MaxLit ((1 << Litbits) - 1)
141603 -#define MaxML 52
141604 -#define MaxLL 35
141605 -#define MaxOff 28
141606 -#define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */
141607 -#define MLFSELog 9
141608 -#define LLFSELog 9
141609 -#define OffFSELog 8
141611 -static const U32 LL_bits[MaxLL + 1] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
141612 -static const S16 LL_defaultNorm[MaxLL + 1] = {4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, -1, -1, -1, -1};
141613 -#define LL_DEFAULTNORMLOG 6 /* for static allocation */
141614 -static const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
141616 -static const U32 ML_bits[MaxML + 1] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,  0,  0,  0,  0,  0,  0, 0,
141617 -                                      0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
141618 -static const S16 ML_defaultNorm[MaxML + 1] = {1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,  1,  1,  1,  1,  1,  1, 1,
141619 -                                             1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1, -1};
141620 -#define ML_DEFAULTNORMLOG 6 /* for static allocation */
141621 -static const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
141623 -static const S16 OF_defaultNorm[MaxOff + 1] = {1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1};
141624 -#define OF_DEFAULTNORMLOG 5 /* for static allocation */
141625 -static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
141627 -/*-*******************************************
141628 -*  Shared functions to include for inlining
141629 -*********************************************/
141630 -ZSTD_STATIC void ZSTD_copy8(void *dst, const void *src) {
141631 -       /*
141632 -        * zstd relies heavily on gcc being able to analyze and inline this
141633 -        * memcpy() call, since it is called in a tight loop. Preboot mode
141634 -        * is compiled in freestanding mode, which stops gcc from analyzing
141635 -        * memcpy(). Use __builtin_memcpy() to tell gcc to analyze this as a
141636 -        * regular memcpy().
141637 -        */
141638 -       __builtin_memcpy(dst, src, 8);
141640 -/*! ZSTD_wildcopy() :
141641 -*   custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */
141642 -#define WILDCOPY_OVERLENGTH 8
141643 -ZSTD_STATIC void ZSTD_wildcopy(void *dst, const void *src, ptrdiff_t length)
141645 -       const BYTE* ip = (const BYTE*)src;
141646 -       BYTE* op = (BYTE*)dst;
141647 -       BYTE* const oend = op + length;
141648 -#if defined(GCC_VERSION) && GCC_VERSION >= 70000 && GCC_VERSION < 70200
141649 -       /*
141650 -        * Work around https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81388.
141651 -        * Avoid the bad case where the loop only runs once by handling the
141652 -        * special case separately. This doesn't trigger the bug because it
141653 -        * doesn't involve pointer/integer overflow.
141654 -        */
141655 -       if (length <= 8)
141656 -               return ZSTD_copy8(dst, src);
141657 -#endif
141658 -       do {
141659 -               ZSTD_copy8(op, ip);
141660 -               op += 8;
141661 -               ip += 8;
141662 -       } while (op < oend);
141665 -/*-*******************************************
141666 -*  Private interfaces
141667 -*********************************************/
141668 -typedef struct ZSTD_stats_s ZSTD_stats_t;
141670 -typedef struct {
141671 -       U32 off;
141672 -       U32 len;
141673 -} ZSTD_match_t;
141675 -typedef struct {
141676 -       U32 price;
141677 -       U32 off;
141678 -       U32 mlen;
141679 -       U32 litlen;
141680 -       U32 rep[ZSTD_REP_NUM];
141681 -} ZSTD_optimal_t;
141683 -typedef struct seqDef_s {
141684 -       U32 offset;
141685 -       U16 litLength;
141686 -       U16 matchLength;
141687 -} seqDef;
141689 -typedef struct {
141690 -       seqDef *sequencesStart;
141691 -       seqDef *sequences;
141692 -       BYTE *litStart;
141693 -       BYTE *lit;
141694 -       BYTE *llCode;
141695 -       BYTE *mlCode;
141696 -       BYTE *ofCode;
141697 -       U32 longLengthID; /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
141698 -       U32 longLengthPos;
141699 -       /* opt */
141700 -       ZSTD_optimal_t *priceTable;
141701 -       ZSTD_match_t *matchTable;
141702 -       U32 *matchLengthFreq;
141703 -       U32 *litLengthFreq;
141704 -       U32 *litFreq;
141705 -       U32 *offCodeFreq;
141706 -       U32 matchLengthSum;
141707 -       U32 matchSum;
141708 -       U32 litLengthSum;
141709 -       U32 litSum;
141710 -       U32 offCodeSum;
141711 -       U32 log2matchLengthSum;
141712 -       U32 log2matchSum;
141713 -       U32 log2litLengthSum;
141714 -       U32 log2litSum;
141715 -       U32 log2offCodeSum;
141716 -       U32 factor;
141717 -       U32 staticPrices;
141718 -       U32 cachedPrice;
141719 -       U32 cachedLitLength;
141720 -       const BYTE *cachedLiterals;
141721 -} seqStore_t;
141723 -const seqStore_t *ZSTD_getSeqStore(const ZSTD_CCtx *ctx);
141724 -void ZSTD_seqToCodes(const seqStore_t *seqStorePtr);
141725 -int ZSTD_isSkipFrame(ZSTD_DCtx *dctx);
141727 -/*= Custom memory allocation functions */
141728 -typedef void *(*ZSTD_allocFunction)(void *opaque, size_t size);
141729 -typedef void (*ZSTD_freeFunction)(void *opaque, void *address);
141730 -typedef struct {
141731 -       ZSTD_allocFunction customAlloc;
141732 -       ZSTD_freeFunction customFree;
141733 -       void *opaque;
141734 -} ZSTD_customMem;
141736 -void *ZSTD_malloc(size_t size, ZSTD_customMem customMem);
141737 -void ZSTD_free(void *ptr, ZSTD_customMem customMem);
141739 -/*====== stack allocation  ======*/
141741 -typedef struct {
141742 -       void *ptr;
141743 -       const void *end;
141744 -} ZSTD_stack;
141746 -#define ZSTD_ALIGN(x) ALIGN(x, sizeof(size_t))
141747 -#define ZSTD_PTR_ALIGN(p) PTR_ALIGN(p, sizeof(size_t))
141749 -ZSTD_customMem ZSTD_initStack(void *workspace, size_t workspaceSize);
141751 -void *ZSTD_stackAllocAll(void *opaque, size_t *size);
141752 -void *ZSTD_stackAlloc(void *opaque, size_t size);
141753 -void ZSTD_stackFree(void *opaque, void *address);
141755 -/*======  common function  ======*/
141757 -ZSTD_STATIC U32 ZSTD_highbit32(U32 val) { return 31 - __builtin_clz(val); }
141759 -/* hidden functions */
141761 -/* ZSTD_invalidateRepCodes() :
141762 - * ensures next compression will not use repcodes from previous block.
141763 - * Note : only works with regular variant;
141764 - *        do not use with extDict variant ! */
141765 -void ZSTD_invalidateRepCodes(ZSTD_CCtx *cctx);
141767 -size_t ZSTD_freeCCtx(ZSTD_CCtx *cctx);
141768 -size_t ZSTD_freeDCtx(ZSTD_DCtx *dctx);
141769 -size_t ZSTD_freeCDict(ZSTD_CDict *cdict);
141770 -size_t ZSTD_freeDDict(ZSTD_DDict *cdict);
141771 -size_t ZSTD_freeCStream(ZSTD_CStream *zcs);
141772 -size_t ZSTD_freeDStream(ZSTD_DStream *zds);
141774 -#endif /* ZSTD_CCOMMON_H_MODULE */
141775 diff --git a/lib/zstd/zstd_opt.h b/lib/zstd/zstd_opt.h
141776 deleted file mode 100644
141777 index 55e1b4cba808..000000000000
141778 --- a/lib/zstd/zstd_opt.h
141779 +++ /dev/null
141780 @@ -1,1014 +0,0 @@
141782 - * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
141783 - * All rights reserved.
141785 - * This source code is licensed under the BSD-style license found in the
141786 - * LICENSE file in the root directory of https://github.com/facebook/zstd.
141787 - * An additional grant of patent rights can be found in the PATENTS file in the
141788 - * same directory.
141790 - * This program is free software; you can redistribute it and/or modify it under
141791 - * the terms of the GNU General Public License version 2 as published by the
141792 - * Free Software Foundation. This program is dual-licensed; you may select
141793 - * either version 2 of the GNU General Public License ("GPL") or BSD license
141794 - * ("BSD").
141795 - */
141797 -/* Note : this file is intended to be included within zstd_compress.c */
141799 -#ifndef ZSTD_OPT_H_91842398743
141800 -#define ZSTD_OPT_H_91842398743
141802 -#define ZSTD_LITFREQ_ADD 2
141803 -#define ZSTD_FREQ_DIV 4
141804 -#define ZSTD_MAX_PRICE (1 << 30)
141806 -/*-*************************************
141807 -*  Price functions for optimal parser
141808 -***************************************/
141809 -FORCE_INLINE void ZSTD_setLog2Prices(seqStore_t *ssPtr)
141811 -       ssPtr->log2matchLengthSum = ZSTD_highbit32(ssPtr->matchLengthSum + 1);
141812 -       ssPtr->log2litLengthSum = ZSTD_highbit32(ssPtr->litLengthSum + 1);
141813 -       ssPtr->log2litSum = ZSTD_highbit32(ssPtr->litSum + 1);
141814 -       ssPtr->log2offCodeSum = ZSTD_highbit32(ssPtr->offCodeSum + 1);
141815 -       ssPtr->factor = 1 + ((ssPtr->litSum >> 5) / ssPtr->litLengthSum) + ((ssPtr->litSum << 1) / (ssPtr->litSum + ssPtr->matchSum));
141818 -ZSTD_STATIC void ZSTD_rescaleFreqs(seqStore_t *ssPtr, const BYTE *src, size_t srcSize)
141820 -       unsigned u;
141822 -       ssPtr->cachedLiterals = NULL;
141823 -       ssPtr->cachedPrice = ssPtr->cachedLitLength = 0;
141824 -       ssPtr->staticPrices = 0;
141826 -       if (ssPtr->litLengthSum == 0) {
141827 -               if (srcSize <= 1024)
141828 -                       ssPtr->staticPrices = 1;
141830 -               for (u = 0; u <= MaxLit; u++)
141831 -                       ssPtr->litFreq[u] = 0;
141832 -               for (u = 0; u < srcSize; u++)
141833 -                       ssPtr->litFreq[src[u]]++;
141835 -               ssPtr->litSum = 0;
141836 -               ssPtr->litLengthSum = MaxLL + 1;
141837 -               ssPtr->matchLengthSum = MaxML + 1;
141838 -               ssPtr->offCodeSum = (MaxOff + 1);
141839 -               ssPtr->matchSum = (ZSTD_LITFREQ_ADD << Litbits);
141841 -               for (u = 0; u <= MaxLit; u++) {
141842 -                       ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u] >> ZSTD_FREQ_DIV);
141843 -                       ssPtr->litSum += ssPtr->litFreq[u];
141844 -               }
141845 -               for (u = 0; u <= MaxLL; u++)
141846 -                       ssPtr->litLengthFreq[u] = 1;
141847 -               for (u = 0; u <= MaxML; u++)
141848 -                       ssPtr->matchLengthFreq[u] = 1;
141849 -               for (u = 0; u <= MaxOff; u++)
141850 -                       ssPtr->offCodeFreq[u] = 1;
141851 -       } else {
141852 -               ssPtr->matchLengthSum = 0;
141853 -               ssPtr->litLengthSum = 0;
141854 -               ssPtr->offCodeSum = 0;
141855 -               ssPtr->matchSum = 0;
141856 -               ssPtr->litSum = 0;
141858 -               for (u = 0; u <= MaxLit; u++) {
141859 -                       ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u] >> (ZSTD_FREQ_DIV + 1));
141860 -                       ssPtr->litSum += ssPtr->litFreq[u];
141861 -               }
141862 -               for (u = 0; u <= MaxLL; u++) {
141863 -                       ssPtr->litLengthFreq[u] = 1 + (ssPtr->litLengthFreq[u] >> (ZSTD_FREQ_DIV + 1));
141864 -                       ssPtr->litLengthSum += ssPtr->litLengthFreq[u];
141865 -               }
141866 -               for (u = 0; u <= MaxML; u++) {
141867 -                       ssPtr->matchLengthFreq[u] = 1 + (ssPtr->matchLengthFreq[u] >> ZSTD_FREQ_DIV);
141868 -                       ssPtr->matchLengthSum += ssPtr->matchLengthFreq[u];
141869 -                       ssPtr->matchSum += ssPtr->matchLengthFreq[u] * (u + 3);
141870 -               }
141871 -               ssPtr->matchSum *= ZSTD_LITFREQ_ADD;
141872 -               for (u = 0; u <= MaxOff; u++) {
141873 -                       ssPtr->offCodeFreq[u] = 1 + (ssPtr->offCodeFreq[u] >> ZSTD_FREQ_DIV);
141874 -                       ssPtr->offCodeSum += ssPtr->offCodeFreq[u];
141875 -               }
141876 -       }
141878 -       ZSTD_setLog2Prices(ssPtr);
141881 -FORCE_INLINE U32 ZSTD_getLiteralPrice(seqStore_t *ssPtr, U32 litLength, const BYTE *literals)
141883 -       U32 price, u;
141885 -       if (ssPtr->staticPrices)
141886 -               return ZSTD_highbit32((U32)litLength + 1) + (litLength * 6);
141888 -       if (litLength == 0)
141889 -               return ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[0] + 1);
141891 -       /* literals */
141892 -       if (ssPtr->cachedLiterals == literals) {
141893 -               U32 const additional = litLength - ssPtr->cachedLitLength;
141894 -               const BYTE *literals2 = ssPtr->cachedLiterals + ssPtr->cachedLitLength;
141895 -               price = ssPtr->cachedPrice + additional * ssPtr->log2litSum;
141896 -               for (u = 0; u < additional; u++)
141897 -                       price -= ZSTD_highbit32(ssPtr->litFreq[literals2[u]] + 1);
141898 -               ssPtr->cachedPrice = price;
141899 -               ssPtr->cachedLitLength = litLength;
141900 -       } else {
141901 -               price = litLength * ssPtr->log2litSum;
141902 -               for (u = 0; u < litLength; u++)
141903 -                       price -= ZSTD_highbit32(ssPtr->litFreq[literals[u]] + 1);
141905 -               if (litLength >= 12) {
141906 -                       ssPtr->cachedLiterals = literals;
141907 -                       ssPtr->cachedPrice = price;
141908 -                       ssPtr->cachedLitLength = litLength;
141909 -               }
141910 -       }
141912 -       /* literal Length */
141913 -       {
141914 -               const BYTE LL_deltaCode = 19;
141915 -               const BYTE llCode = (litLength > 63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
141916 -               price += LL_bits[llCode] + ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[llCode] + 1);
141917 -       }
141919 -       return price;
141922 -FORCE_INLINE U32 ZSTD_getPrice(seqStore_t *seqStorePtr, U32 litLength, const BYTE *literals, U32 offset, U32 matchLength, const int ultra)
141924 -       /* offset */
141925 -       U32 price;
141926 -       BYTE const offCode = (BYTE)ZSTD_highbit32(offset + 1);
141928 -       if (seqStorePtr->staticPrices)
141929 -               return ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + ZSTD_highbit32((U32)matchLength + 1) + 16 + offCode;
141931 -       price = offCode + seqStorePtr->log2offCodeSum - ZSTD_highbit32(seqStorePtr->offCodeFreq[offCode] + 1);
141932 -       if (!ultra && offCode >= 20)
141933 -               price += (offCode - 19) * 2;
141935 -       /* match Length */
141936 -       {
141937 -               const BYTE ML_deltaCode = 36;
141938 -               const BYTE mlCode = (matchLength > 127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength];
141939 -               price += ML_bits[mlCode] + seqStorePtr->log2matchLengthSum - ZSTD_highbit32(seqStorePtr->matchLengthFreq[mlCode] + 1);
141940 -       }
141942 -       return price + ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + seqStorePtr->factor;
141945 -ZSTD_STATIC void ZSTD_updatePrice(seqStore_t *seqStorePtr, U32 litLength, const BYTE *literals, U32 offset, U32 matchLength)
141947 -       U32 u;
141949 -       /* literals */
141950 -       seqStorePtr->litSum += litLength * ZSTD_LITFREQ_ADD;
141951 -       for (u = 0; u < litLength; u++)
141952 -               seqStorePtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
141954 -       /* literal Length */
141955 -       {
141956 -               const BYTE LL_deltaCode = 19;
141957 -               const BYTE llCode = (litLength > 63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
141958 -               seqStorePtr->litLengthFreq[llCode]++;
141959 -               seqStorePtr->litLengthSum++;
141960 -       }
141962 -       /* match offset */
141963 -       {
141964 -               BYTE const offCode = (BYTE)ZSTD_highbit32(offset + 1);
141965 -               seqStorePtr->offCodeSum++;
141966 -               seqStorePtr->offCodeFreq[offCode]++;
141967 -       }
141969 -       /* match Length */
141970 -       {
141971 -               const BYTE ML_deltaCode = 36;
141972 -               const BYTE mlCode = (matchLength > 127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength];
141973 -               seqStorePtr->matchLengthFreq[mlCode]++;
141974 -               seqStorePtr->matchLengthSum++;
141975 -       }
141977 -       ZSTD_setLog2Prices(seqStorePtr);
141980 -#define SET_PRICE(pos, mlen_, offset_, litlen_, price_)           \
141981 -       {                                                         \
141982 -               while (last_pos < pos) {                          \
141983 -                       opt[last_pos + 1].price = ZSTD_MAX_PRICE; \
141984 -                       last_pos++;                               \
141985 -               }                                                 \
141986 -               opt[pos].mlen = mlen_;                            \
141987 -               opt[pos].off = offset_;                           \
141988 -               opt[pos].litlen = litlen_;                        \
141989 -               opt[pos].price = price_;                          \
141990 -       }
141992 -/* Update hashTable3 up to ip (excluded)
141993 -   Assumption : always within prefix (i.e. not within extDict) */
141994 -FORCE_INLINE
141995 -U32 ZSTD_insertAndFindFirstIndexHash3(ZSTD_CCtx *zc, const BYTE *ip)
141997 -       U32 *const hashTable3 = zc->hashTable3;
141998 -       U32 const hashLog3 = zc->hashLog3;
141999 -       const BYTE *const base = zc->base;
142000 -       U32 idx = zc->nextToUpdate3;
142001 -       const U32 target = zc->nextToUpdate3 = (U32)(ip - base);
142002 -       const size_t hash3 = ZSTD_hash3Ptr(ip, hashLog3);
142004 -       while (idx < target) {
142005 -               hashTable3[ZSTD_hash3Ptr(base + idx, hashLog3)] = idx;
142006 -               idx++;
142007 -       }
142009 -       return hashTable3[hash3];
142012 -/*-*************************************
142013 -*  Binary Tree search
142014 -***************************************/
142015 -static U32 ZSTD_insertBtAndGetAllMatches(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, U32 nbCompares, const U32 mls, U32 extDict,
142016 -                                        ZSTD_match_t *matches, const U32 minMatchLen)
142018 -       const BYTE *const base = zc->base;
142019 -       const U32 curr = (U32)(ip - base);
142020 -       const U32 hashLog = zc->params.cParams.hashLog;
142021 -       const size_t h = ZSTD_hashPtr(ip, hashLog, mls);
142022 -       U32 *const hashTable = zc->hashTable;
142023 -       U32 matchIndex = hashTable[h];
142024 -       U32 *const bt = zc->chainTable;
142025 -       const U32 btLog = zc->params.cParams.chainLog - 1;
142026 -       const U32 btMask = (1U << btLog) - 1;
142027 -       size_t commonLengthSmaller = 0, commonLengthLarger = 0;
142028 -       const BYTE *const dictBase = zc->dictBase;
142029 -       const U32 dictLimit = zc->dictLimit;
142030 -       const BYTE *const dictEnd = dictBase + dictLimit;
142031 -       const BYTE *const prefixStart = base + dictLimit;
142032 -       const U32 btLow = btMask >= curr ? 0 : curr - btMask;
142033 -       const U32 windowLow = zc->lowLimit;
142034 -       U32 *smallerPtr = bt + 2 * (curr & btMask);
142035 -       U32 *largerPtr = bt + 2 * (curr & btMask) + 1;
142036 -       U32 matchEndIdx = curr + 8;
142037 -       U32 dummy32; /* to be nullified at the end */
142038 -       U32 mnum = 0;
142040 -       const U32 minMatch = (mls == 3) ? 3 : 4;
142041 -       size_t bestLength = minMatchLen - 1;
142043 -       if (minMatch == 3) { /* HC3 match finder */
142044 -               U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(zc, ip);
142045 -               if (matchIndex3 > windowLow && (curr - matchIndex3 < (1 << 18))) {
142046 -                       const BYTE *match;
142047 -                       size_t currMl = 0;
142048 -                       if ((!extDict) || matchIndex3 >= dictLimit) {
142049 -                               match = base + matchIndex3;
142050 -                               if (match[bestLength] == ip[bestLength])
142051 -                                       currMl = ZSTD_count(ip, match, iLimit);
142052 -                       } else {
142053 -                               match = dictBase + matchIndex3;
142054 -                               if (ZSTD_readMINMATCH(match, MINMATCH) ==
142055 -                                   ZSTD_readMINMATCH(ip, MINMATCH)) /* assumption : matchIndex3 <= dictLimit-4 (by table construction) */
142056 -                                       currMl = ZSTD_count_2segments(ip + MINMATCH, match + MINMATCH, iLimit, dictEnd, prefixStart) + MINMATCH;
142057 -                       }
142059 -                       /* save best solution */
142060 -                       if (currMl > bestLength) {
142061 -                               bestLength = currMl;
142062 -                               matches[mnum].off = ZSTD_REP_MOVE_OPT + curr - matchIndex3;
142063 -                               matches[mnum].len = (U32)currMl;
142064 -                               mnum++;
142065 -                               if (currMl > ZSTD_OPT_NUM)
142066 -                                       goto update;
142067 -                               if (ip + currMl == iLimit)
142068 -                                       goto update; /* best possible, and avoid read overflow*/
142069 -                       }
142070 -               }
142071 -       }
142073 -       hashTable[h] = curr; /* Update Hash Table */
142075 -       while (nbCompares-- && (matchIndex > windowLow)) {
142076 -               U32 *nextPtr = bt + 2 * (matchIndex & btMask);
142077 -               size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
142078 -               const BYTE *match;
142080 -               if ((!extDict) || (matchIndex + matchLength >= dictLimit)) {
142081 -                       match = base + matchIndex;
142082 -                       if (match[matchLength] == ip[matchLength]) {
142083 -                               matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iLimit) + 1;
142084 -                       }
142085 -               } else {
142086 -                       match = dictBase + matchIndex;
142087 -                       matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iLimit, dictEnd, prefixStart);
142088 -                       if (matchIndex + matchLength >= dictLimit)
142089 -                               match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
142090 -               }
142092 -               if (matchLength > bestLength) {
142093 -                       if (matchLength > matchEndIdx - matchIndex)
142094 -                               matchEndIdx = matchIndex + (U32)matchLength;
142095 -                       bestLength = matchLength;
142096 -                       matches[mnum].off = ZSTD_REP_MOVE_OPT + curr - matchIndex;
142097 -                       matches[mnum].len = (U32)matchLength;
142098 -                       mnum++;
142099 -                       if (matchLength > ZSTD_OPT_NUM)
142100 -                               break;
142101 -                       if (ip + matchLength == iLimit) /* equal : no way to know if inf or sup */
142102 -                               break;                  /* drop, to guarantee consistency (miss a little bit of compression) */
142103 -               }
142105 -               if (match[matchLength] < ip[matchLength]) {
142106 -                       /* match is smaller than curr */
142107 -                       *smallerPtr = matchIndex;         /* update smaller idx */
142108 -                       commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
142109 -                       if (matchIndex <= btLow) {
142110 -                               smallerPtr = &dummy32;
142111 -                               break;
142112 -                       }                         /* beyond tree size, stop the search */
142113 -                       smallerPtr = nextPtr + 1; /* new "smaller" => larger of match */
142114 -                       matchIndex = nextPtr[1];  /* new matchIndex larger than previous (closer to curr) */
142115 -               } else {
142116 -                       /* match is larger than curr */
142117 -                       *largerPtr = matchIndex;
142118 -                       commonLengthLarger = matchLength;
142119 -                       if (matchIndex <= btLow) {
142120 -                               largerPtr = &dummy32;
142121 -                               break;
142122 -                       } /* beyond tree size, stop the search */
142123 -                       largerPtr = nextPtr;
142124 -                       matchIndex = nextPtr[0];
142125 -               }
142126 -       }
142128 -       *smallerPtr = *largerPtr = 0;
142130 -update:
142131 -       zc->nextToUpdate = (matchEndIdx > curr + 8) ? matchEndIdx - 8 : curr + 1;
142132 -       return mnum;
142135 -/** Tree updater, providing best match */
142136 -static U32 ZSTD_BtGetAllMatches(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, const U32 maxNbAttempts, const U32 mls, ZSTD_match_t *matches,
142137 -                               const U32 minMatchLen)
142139 -       if (ip < zc->base + zc->nextToUpdate)
142140 -               return 0; /* skipped area */
142141 -       ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
142142 -       return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 0, matches, minMatchLen);
142145 -static U32 ZSTD_BtGetAllMatches_selectMLS(ZSTD_CCtx *zc, /* Index table will be updated */
142146 -                                         const BYTE *ip, const BYTE *const iHighLimit, const U32 maxNbAttempts, const U32 matchLengthSearch,
142147 -                                         ZSTD_match_t *matches, const U32 minMatchLen)
142149 -       switch (matchLengthSearch) {
142150 -       case 3: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen);
142151 -       default:
142152 -       case 4: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen);
142153 -       case 5: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen);
142154 -       case 7:
142155 -       case 6: return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen);
142156 -       }
142159 -/** Tree updater, providing best match */
142160 -static U32 ZSTD_BtGetAllMatches_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, const U32 maxNbAttempts, const U32 mls,
142161 -                                       ZSTD_match_t *matches, const U32 minMatchLen)
142163 -       if (ip < zc->base + zc->nextToUpdate)
142164 -               return 0; /* skipped area */
142165 -       ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls);
142166 -       return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 1, matches, minMatchLen);
142169 -static U32 ZSTD_BtGetAllMatches_selectMLS_extDict(ZSTD_CCtx *zc, /* Index table will be updated */
142170 -                                                 const BYTE *ip, const BYTE *const iHighLimit, const U32 maxNbAttempts, const U32 matchLengthSearch,
142171 -                                                 ZSTD_match_t *matches, const U32 minMatchLen)
142173 -       switch (matchLengthSearch) {
142174 -       case 3: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen);
142175 -       default:
142176 -       case 4: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen);
142177 -       case 5: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen);
142178 -       case 7:
142179 -       case 6: return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen);
142180 -       }
142183 -/*-*******************************
142184 -*  Optimal parser
142185 -*********************************/
142186 -FORCE_INLINE
142187 -void ZSTD_compressBlock_opt_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const int ultra)
142189 -       seqStore_t *seqStorePtr = &(ctx->seqStore);
142190 -       const BYTE *const istart = (const BYTE *)src;
142191 -       const BYTE *ip = istart;
142192 -       const BYTE *anchor = istart;
142193 -       const BYTE *const iend = istart + srcSize;
142194 -       const BYTE *const ilimit = iend - 8;
142195 -       const BYTE *const base = ctx->base;
142196 -       const BYTE *const prefixStart = base + ctx->dictLimit;
142198 -       const U32 maxSearches = 1U << ctx->params.cParams.searchLog;
142199 -       const U32 sufficient_len = ctx->params.cParams.targetLength;
142200 -       const U32 mls = ctx->params.cParams.searchLength;
142201 -       const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4;
142203 -       ZSTD_optimal_t *opt = seqStorePtr->priceTable;
142204 -       ZSTD_match_t *matches = seqStorePtr->matchTable;
142205 -       const BYTE *inr;
142206 -       U32 offset, rep[ZSTD_REP_NUM];
142208 -       /* init */
142209 -       ctx->nextToUpdate3 = ctx->nextToUpdate;
142210 -       ZSTD_rescaleFreqs(seqStorePtr, (const BYTE *)src, srcSize);
142211 -       ip += (ip == prefixStart);
142212 -       {
142213 -               U32 i;
142214 -               for (i = 0; i < ZSTD_REP_NUM; i++)
142215 -                       rep[i] = ctx->rep[i];
142216 -       }
142218 -       /* Match Loop */
142219 -       while (ip < ilimit) {
142220 -               U32 cur, match_num, last_pos, litlen, price;
142221 -               U32 u, mlen, best_mlen, best_off, litLength;
142222 -               memset(opt, 0, sizeof(ZSTD_optimal_t));
142223 -               last_pos = 0;
142224 -               litlen = (U32)(ip - anchor);
142226 -               /* check repCode */
142227 -               {
142228 -                       U32 i, last_i = ZSTD_REP_CHECK + (ip == anchor);
142229 -                       for (i = (ip == anchor); i < last_i; i++) {
142230 -                               const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i];
142231 -                               if ((repCur > 0) && (repCur < (S32)(ip - prefixStart)) &&
142232 -                                   (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repCur, minMatch))) {
142233 -                                       mlen = (U32)ZSTD_count(ip + minMatch, ip + minMatch - repCur, iend) + minMatch;
142234 -                                       if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) {
142235 -                                               best_mlen = mlen;
142236 -                                               best_off = i;
142237 -                                               cur = 0;
142238 -                                               last_pos = 1;
142239 -                                               goto _storeSequence;
142240 -                                       }
142241 -                                       best_off = i - (ip == anchor);
142242 -                                       do {
142243 -                                               price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
142244 -                                               if (mlen > last_pos || price < opt[mlen].price)
142245 -                                                       SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */
142246 -                                               mlen--;
142247 -                                       } while (mlen >= minMatch);
142248 -                               }
142249 -                       }
142250 -               }
142252 -               match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, ip, iend, maxSearches, mls, matches, minMatch);
142254 -               if (!last_pos && !match_num) {
142255 -                       ip++;
142256 -                       continue;
142257 -               }
142259 -               if (match_num && (matches[match_num - 1].len > sufficient_len || matches[match_num - 1].len >= ZSTD_OPT_NUM)) {
142260 -                       best_mlen = matches[match_num - 1].len;
142261 -                       best_off = matches[match_num - 1].off;
142262 -                       cur = 0;
142263 -                       last_pos = 1;
142264 -                       goto _storeSequence;
142265 -               }
142267 -               /* set prices using matches at position = 0 */
142268 -               best_mlen = (last_pos) ? last_pos : minMatch;
142269 -               for (u = 0; u < match_num; u++) {
142270 -                       mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen;
142271 -                       best_mlen = matches[u].len;
142272 -                       while (mlen <= best_mlen) {
142273 -                               price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra);
142274 -                               if (mlen > last_pos || price < opt[mlen].price)
142275 -                                       SET_PRICE(mlen, mlen, matches[u].off, litlen, price); /* note : macro modifies last_pos */
142276 -                               mlen++;
142277 -                       }
142278 -               }
142280 -               if (last_pos < minMatch) {
142281 -                       ip++;
142282 -                       continue;
142283 -               }
142285 -               /* initialize opt[0] */
142286 -               {
142287 -                       U32 i;
142288 -                       for (i = 0; i < ZSTD_REP_NUM; i++)
142289 -                               opt[0].rep[i] = rep[i];
142290 -               }
142291 -               opt[0].mlen = 1;
142292 -               opt[0].litlen = litlen;
142294 -               /* check further positions */
142295 -               for (cur = 1; cur <= last_pos; cur++) {
142296 -                       inr = ip + cur;
142298 -                       if (opt[cur - 1].mlen == 1) {
142299 -                               litlen = opt[cur - 1].litlen + 1;
142300 -                               if (cur > litlen) {
142301 -                                       price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - litlen);
142302 -                               } else
142303 -                                       price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor);
142304 -                       } else {
142305 -                               litlen = 1;
142306 -                               price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - 1);
142307 -                       }
142309 -                       if (cur > last_pos || price <= opt[cur].price)
142310 -                               SET_PRICE(cur, 1, 0, litlen, price);
142312 -                       if (cur == last_pos)
142313 -                               break;
142315 -                       if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */
142316 -                               continue;
142318 -                       mlen = opt[cur].mlen;
142319 -                       if (opt[cur].off > ZSTD_REP_MOVE_OPT) {
142320 -                               opt[cur].rep[2] = opt[cur - mlen].rep[1];
142321 -                               opt[cur].rep[1] = opt[cur - mlen].rep[0];
142322 -                               opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT;
142323 -                       } else {
142324 -                               opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur - mlen].rep[1] : opt[cur - mlen].rep[2];
142325 -                               opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur - mlen].rep[0] : opt[cur - mlen].rep[1];
142326 -                               opt[cur].rep[0] =
142327 -                                   ((opt[cur].off == ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur - mlen].rep[0] - 1) : (opt[cur - mlen].rep[opt[cur].off]);
142328 -                       }
142330 -                       best_mlen = minMatch;
142331 -                       {
142332 -                               U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1);
142333 -                               for (i = (opt[cur].mlen != 1); i < last_i; i++) { /* check rep */
142334 -                                       const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
142335 -                                       if ((repCur > 0) && (repCur < (S32)(inr - prefixStart)) &&
142336 -                                           (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(inr - repCur, minMatch))) {
142337 -                                               mlen = (U32)ZSTD_count(inr + minMatch, inr + minMatch - repCur, iend) + minMatch;
142339 -                                               if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) {
142340 -                                                       best_mlen = mlen;
142341 -                                                       best_off = i;
142342 -                                                       last_pos = cur + 1;
142343 -                                                       goto _storeSequence;
142344 -                                               }
142346 -                                               best_off = i - (opt[cur].mlen != 1);
142347 -                                               if (mlen > best_mlen)
142348 -                                                       best_mlen = mlen;
142350 -                                               do {
142351 -                                                       if (opt[cur].mlen == 1) {
142352 -                                                               litlen = opt[cur].litlen;
142353 -                                                               if (cur > litlen) {
142354 -                                                                       price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr - litlen,
142355 -                                                                                                                       best_off, mlen - MINMATCH, ultra);
142356 -                                                               } else
142357 -                                                                       price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
142358 -                                                       } else {
142359 -                                                               litlen = 0;
142360 -                                                               price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
142361 -                                                       }
142363 -                                                       if (cur + mlen > last_pos || price <= opt[cur + mlen].price)
142364 -                                                               SET_PRICE(cur + mlen, mlen, i, litlen, price);
142365 -                                                       mlen--;
142366 -                                               } while (mlen >= minMatch);
142367 -                                       }
142368 -                               }
142369 -                       }
142371 -                       match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, inr, iend, maxSearches, mls, matches, best_mlen);
142373 -                       if (match_num > 0 && (matches[match_num - 1].len > sufficient_len || cur + matches[match_num - 1].len >= ZSTD_OPT_NUM)) {
142374 -                               best_mlen = matches[match_num - 1].len;
142375 -                               best_off = matches[match_num - 1].off;
142376 -                               last_pos = cur + 1;
142377 -                               goto _storeSequence;
142378 -                       }
142380 -                       /* set prices using matches at position = cur */
142381 -                       for (u = 0; u < match_num; u++) {
142382 -                               mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen;
142383 -                               best_mlen = matches[u].len;
142385 -                               while (mlen <= best_mlen) {
142386 -                                       if (opt[cur].mlen == 1) {
142387 -                                               litlen = opt[cur].litlen;
142388 -                                               if (cur > litlen)
142389 -                                                       price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip + cur - litlen,
142390 -                                                                                                       matches[u].off - 1, mlen - MINMATCH, ultra);
142391 -                                               else
142392 -                                                       price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra);
142393 -                                       } else {
142394 -                                               litlen = 0;
142395 -                                               price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off - 1, mlen - MINMATCH, ultra);
142396 -                                       }
142398 -                                       if (cur + mlen > last_pos || (price < opt[cur + mlen].price))
142399 -                                               SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price);
142401 -                                       mlen++;
142402 -                               }
142403 -                       }
142404 -               }
142406 -               best_mlen = opt[last_pos].mlen;
142407 -               best_off = opt[last_pos].off;
142408 -               cur = last_pos - best_mlen;
142410 -       /* store sequence */
142411 -_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */
142412 -               opt[0].mlen = 1;
142414 -               while (1) {
142415 -                       mlen = opt[cur].mlen;
142416 -                       offset = opt[cur].off;
142417 -                       opt[cur].mlen = best_mlen;
142418 -                       opt[cur].off = best_off;
142419 -                       best_mlen = mlen;
142420 -                       best_off = offset;
142421 -                       if (mlen > cur)
142422 -                               break;
142423 -                       cur -= mlen;
142424 -               }
142426 -               for (u = 0; u <= last_pos;) {
142427 -                       u += opt[u].mlen;
142428 -               }
142430 -               for (cur = 0; cur < last_pos;) {
142431 -                       mlen = opt[cur].mlen;
142432 -                       if (mlen == 1) {
142433 -                               ip++;
142434 -                               cur++;
142435 -                               continue;
142436 -                       }
142437 -                       offset = opt[cur].off;
142438 -                       cur += mlen;
142439 -                       litLength = (U32)(ip - anchor);
142441 -                       if (offset > ZSTD_REP_MOVE_OPT) {
142442 -                               rep[2] = rep[1];
142443 -                               rep[1] = rep[0];
142444 -                               rep[0] = offset - ZSTD_REP_MOVE_OPT;
142445 -                               offset--;
142446 -                       } else {
142447 -                               if (offset != 0) {
142448 -                                       best_off = (offset == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]);
142449 -                                       if (offset != 1)
142450 -                                               rep[2] = rep[1];
142451 -                                       rep[1] = rep[0];
142452 -                                       rep[0] = best_off;
142453 -                               }
142454 -                               if (litLength == 0)
142455 -                                       offset--;
142456 -                       }
142458 -                       ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH);
142459 -                       ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH);
142460 -                       anchor = ip = ip + mlen;
142461 -               }
142462 -       } /* for (cur=0; cur < last_pos; ) */
142464 -       /* Save reps for next block */
142465 -       {
142466 -               int i;
142467 -               for (i = 0; i < ZSTD_REP_NUM; i++)
142468 -                       ctx->repToConfirm[i] = rep[i];
142469 -       }
142471 -       /* Last Literals */
142472 -       {
142473 -               size_t const lastLLSize = iend - anchor;
142474 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
142475 -               seqStorePtr->lit += lastLLSize;
142476 -       }
142479 -FORCE_INLINE
142480 -void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const int ultra)
142482 -       seqStore_t *seqStorePtr = &(ctx->seqStore);
142483 -       const BYTE *const istart = (const BYTE *)src;
142484 -       const BYTE *ip = istart;
142485 -       const BYTE *anchor = istart;
142486 -       const BYTE *const iend = istart + srcSize;
142487 -       const BYTE *const ilimit = iend - 8;
142488 -       const BYTE *const base = ctx->base;
142489 -       const U32 lowestIndex = ctx->lowLimit;
142490 -       const U32 dictLimit = ctx->dictLimit;
142491 -       const BYTE *const prefixStart = base + dictLimit;
142492 -       const BYTE *const dictBase = ctx->dictBase;
142493 -       const BYTE *const dictEnd = dictBase + dictLimit;
142495 -       const U32 maxSearches = 1U << ctx->params.cParams.searchLog;
142496 -       const U32 sufficient_len = ctx->params.cParams.targetLength;
142497 -       const U32 mls = ctx->params.cParams.searchLength;
142498 -       const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4;
142500 -       ZSTD_optimal_t *opt = seqStorePtr->priceTable;
142501 -       ZSTD_match_t *matches = seqStorePtr->matchTable;
142502 -       const BYTE *inr;
142504 -       /* init */
142505 -       U32 offset, rep[ZSTD_REP_NUM];
142506 -       {
142507 -               U32 i;
142508 -               for (i = 0; i < ZSTD_REP_NUM; i++)
142509 -                       rep[i] = ctx->rep[i];
142510 -       }
142512 -       ctx->nextToUpdate3 = ctx->nextToUpdate;
142513 -       ZSTD_rescaleFreqs(seqStorePtr, (const BYTE *)src, srcSize);
142514 -       ip += (ip == prefixStart);
142516 -       /* Match Loop */
142517 -       while (ip < ilimit) {
142518 -               U32 cur, match_num, last_pos, litlen, price;
142519 -               U32 u, mlen, best_mlen, best_off, litLength;
142520 -               U32 curr = (U32)(ip - base);
142521 -               memset(opt, 0, sizeof(ZSTD_optimal_t));
142522 -               last_pos = 0;
142523 -               opt[0].litlen = (U32)(ip - anchor);
142525 -               /* check repCode */
142526 -               {
142527 -                       U32 i, last_i = ZSTD_REP_CHECK + (ip == anchor);
142528 -                       for (i = (ip == anchor); i < last_i; i++) {
142529 -                               const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i];
142530 -                               const U32 repIndex = (U32)(curr - repCur);
142531 -                               const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
142532 -                               const BYTE *const repMatch = repBase + repIndex;
142533 -                               if ((repCur > 0 && repCur <= (S32)curr) &&
142534 -                                   (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
142535 -                                   && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch))) {
142536 -                                       /* repcode detected we should take it */
142537 -                                       const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
142538 -                                       mlen = (U32)ZSTD_count_2segments(ip + minMatch, repMatch + minMatch, iend, repEnd, prefixStart) + minMatch;
142540 -                                       if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) {
142541 -                                               best_mlen = mlen;
142542 -                                               best_off = i;
142543 -                                               cur = 0;
142544 -                                               last_pos = 1;
142545 -                                               goto _storeSequence;
142546 -                                       }
142548 -                                       best_off = i - (ip == anchor);
142549 -                                       litlen = opt[0].litlen;
142550 -                                       do {
142551 -                                               price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
142552 -                                               if (mlen > last_pos || price < opt[mlen].price)
142553 -                                                       SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */
142554 -                                               mlen--;
142555 -                                       } while (mlen >= minMatch);
142556 -                               }
142557 -                       }
142558 -               }
142560 -               match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, ip, iend, maxSearches, mls, matches, minMatch); /* first search (depth 0) */
142562 -               if (!last_pos && !match_num) {
142563 -                       ip++;
142564 -                       continue;
142565 -               }
142567 -               {
142568 -                       U32 i;
142569 -                       for (i = 0; i < ZSTD_REP_NUM; i++)
142570 -                               opt[0].rep[i] = rep[i];
142571 -               }
142572 -               opt[0].mlen = 1;
142574 -               if (match_num && (matches[match_num - 1].len > sufficient_len || matches[match_num - 1].len >= ZSTD_OPT_NUM)) {
142575 -                       best_mlen = matches[match_num - 1].len;
142576 -                       best_off = matches[match_num - 1].off;
142577 -                       cur = 0;
142578 -                       last_pos = 1;
142579 -                       goto _storeSequence;
142580 -               }
142582 -               best_mlen = (last_pos) ? last_pos : minMatch;
142584 -               /* set prices using matches at position = 0 */
142585 -               for (u = 0; u < match_num; u++) {
142586 -                       mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen;
142587 -                       best_mlen = matches[u].len;
142588 -                       litlen = opt[0].litlen;
142589 -                       while (mlen <= best_mlen) {
142590 -                               price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra);
142591 -                               if (mlen > last_pos || price < opt[mlen].price)
142592 -                                       SET_PRICE(mlen, mlen, matches[u].off, litlen, price);
142593 -                               mlen++;
142594 -                       }
142595 -               }
142597 -               if (last_pos < minMatch) {
142598 -                       ip++;
142599 -                       continue;
142600 -               }
142602 -               /* check further positions */
142603 -               for (cur = 1; cur <= last_pos; cur++) {
142604 -                       inr = ip + cur;
142606 -                       if (opt[cur - 1].mlen == 1) {
142607 -                               litlen = opt[cur - 1].litlen + 1;
142608 -                               if (cur > litlen) {
142609 -                                       price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - litlen);
142610 -                               } else
142611 -                                       price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor);
142612 -                       } else {
142613 -                               litlen = 1;
142614 -                               price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr - 1);
142615 -                       }
142617 -                       if (cur > last_pos || price <= opt[cur].price)
142618 -                               SET_PRICE(cur, 1, 0, litlen, price);
142620 -                       if (cur == last_pos)
142621 -                               break;
142623 -                       if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */
142624 -                               continue;
142626 -                       mlen = opt[cur].mlen;
142627 -                       if (opt[cur].off > ZSTD_REP_MOVE_OPT) {
142628 -                               opt[cur].rep[2] = opt[cur - mlen].rep[1];
142629 -                               opt[cur].rep[1] = opt[cur - mlen].rep[0];
142630 -                               opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT;
142631 -                       } else {
142632 -                               opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur - mlen].rep[1] : opt[cur - mlen].rep[2];
142633 -                               opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur - mlen].rep[0] : opt[cur - mlen].rep[1];
142634 -                               opt[cur].rep[0] =
142635 -                                   ((opt[cur].off == ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur - mlen].rep[0] - 1) : (opt[cur - mlen].rep[opt[cur].off]);
142636 -                       }
142638 -                       best_mlen = minMatch;
142639 -                       {
142640 -                               U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1);
142641 -                               for (i = (mlen != 1); i < last_i; i++) {
142642 -                                       const S32 repCur = (i == ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
142643 -                                       const U32 repIndex = (U32)(curr + cur - repCur);
142644 -                                       const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
142645 -                                       const BYTE *const repMatch = repBase + repIndex;
142646 -                                       if ((repCur > 0 && repCur <= (S32)(curr + cur)) &&
142647 -                                           (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
142648 -                                           && (ZSTD_readMINMATCH(inr, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch))) {
142649 -                                               /* repcode detected */
142650 -                                               const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
142651 -                                               mlen = (U32)ZSTD_count_2segments(inr + minMatch, repMatch + minMatch, iend, repEnd, prefixStart) + minMatch;
142653 -                                               if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) {
142654 -                                                       best_mlen = mlen;
142655 -                                                       best_off = i;
142656 -                                                       last_pos = cur + 1;
142657 -                                                       goto _storeSequence;
142658 -                                               }
142660 -                                               best_off = i - (opt[cur].mlen != 1);
142661 -                                               if (mlen > best_mlen)
142662 -                                                       best_mlen = mlen;
142664 -                                               do {
142665 -                                                       if (opt[cur].mlen == 1) {
142666 -                                                               litlen = opt[cur].litlen;
142667 -                                                               if (cur > litlen) {
142668 -                                                                       price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr - litlen,
142669 -                                                                                                                       best_off, mlen - MINMATCH, ultra);
142670 -                                                               } else
142671 -                                                                       price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
142672 -                                                       } else {
142673 -                                                               litlen = 0;
142674 -                                                               price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
142675 -                                                       }
142677 -                                                       if (cur + mlen > last_pos || price <= opt[cur + mlen].price)
142678 -                                                               SET_PRICE(cur + mlen, mlen, i, litlen, price);
142679 -                                                       mlen--;
142680 -                                               } while (mlen >= minMatch);
142681 -                                       }
142682 -                               }
142683 -                       }
142685 -                       match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, inr, iend, maxSearches, mls, matches, minMatch);
142687 -                       if (match_num > 0 && (matches[match_num - 1].len > sufficient_len || cur + matches[match_num - 1].len >= ZSTD_OPT_NUM)) {
142688 -                               best_mlen = matches[match_num - 1].len;
142689 -                               best_off = matches[match_num - 1].off;
142690 -                               last_pos = cur + 1;
142691 -                               goto _storeSequence;
142692 -                       }
142694 -                       /* set prices using matches at position = cur */
142695 -                       for (u = 0; u < match_num; u++) {
142696 -                               mlen = (u > 0) ? matches[u - 1].len + 1 : best_mlen;
142697 -                               best_mlen = matches[u].len;
142699 -                               while (mlen <= best_mlen) {
142700 -                                       if (opt[cur].mlen == 1) {
142701 -                                               litlen = opt[cur].litlen;
142702 -                                               if (cur > litlen)
142703 -                                                       price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip + cur - litlen,
142704 -                                                                                                       matches[u].off - 1, mlen - MINMATCH, ultra);
142705 -                                               else
142706 -                                                       price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off - 1, mlen - MINMATCH, ultra);
142707 -                                       } else {
142708 -                                               litlen = 0;
142709 -                                               price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off - 1, mlen - MINMATCH, ultra);
142710 -                                       }
142712 -                                       if (cur + mlen > last_pos || (price < opt[cur + mlen].price))
142713 -                                               SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price);
142715 -                                       mlen++;
142716 -                               }
142717 -                       }
142718 -               } /* for (cur = 1; cur <= last_pos; cur++) */
142720 -               best_mlen = opt[last_pos].mlen;
142721 -               best_off = opt[last_pos].off;
142722 -               cur = last_pos - best_mlen;
142724 -       /* store sequence */
142725 -_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */
142726 -               opt[0].mlen = 1;
142728 -               while (1) {
142729 -                       mlen = opt[cur].mlen;
142730 -                       offset = opt[cur].off;
142731 -                       opt[cur].mlen = best_mlen;
142732 -                       opt[cur].off = best_off;
142733 -                       best_mlen = mlen;
142734 -                       best_off = offset;
142735 -                       if (mlen > cur)
142736 -                               break;
142737 -                       cur -= mlen;
142738 -               }
142740 -               for (u = 0; u <= last_pos;) {
142741 -                       u += opt[u].mlen;
142742 -               }
142744 -               for (cur = 0; cur < last_pos;) {
142745 -                       mlen = opt[cur].mlen;
142746 -                       if (mlen == 1) {
142747 -                               ip++;
142748 -                               cur++;
142749 -                               continue;
142750 -                       }
142751 -                       offset = opt[cur].off;
142752 -                       cur += mlen;
142753 -                       litLength = (U32)(ip - anchor);
142755 -                       if (offset > ZSTD_REP_MOVE_OPT) {
142756 -                               rep[2] = rep[1];
142757 -                               rep[1] = rep[0];
142758 -                               rep[0] = offset - ZSTD_REP_MOVE_OPT;
142759 -                               offset--;
142760 -                       } else {
142761 -                               if (offset != 0) {
142762 -                                       best_off = (offset == ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]);
142763 -                                       if (offset != 1)
142764 -                                               rep[2] = rep[1];
142765 -                                       rep[1] = rep[0];
142766 -                                       rep[0] = best_off;
142767 -                               }
142769 -                               if (litLength == 0)
142770 -                                       offset--;
142771 -                       }
142773 -                       ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH);
142774 -                       ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen - MINMATCH);
142775 -                       anchor = ip = ip + mlen;
142776 -               }
142777 -       } /* for (cur=0; cur < last_pos; ) */
142779 -       /* Save reps for next block */
142780 -       {
142781 -               int i;
142782 -               for (i = 0; i < ZSTD_REP_NUM; i++)
142783 -                       ctx->repToConfirm[i] = rep[i];
142784 -       }
142786 -       /* Last Literals */
142787 -       {
142788 -               size_t lastLLSize = iend - anchor;
142789 -               memcpy(seqStorePtr->lit, anchor, lastLLSize);
142790 -               seqStorePtr->lit += lastLLSize;
142791 -       }
142794 -#endif /* ZSTD_OPT_H_91842398743 */
142795 diff --git a/localversion b/localversion
142796 new file mode 100644
142797 index 000000000000..c21af2f75ee0
142798 --- /dev/null
142799 +++ b/localversion
142800 @@ -0,0 +1 @@
142801 +-xanmod1
142802 diff --git a/localversion-cacule b/localversion-cacule
142803 new file mode 100644
142804 index 000000000000..585f177f9bba
142805 --- /dev/null
142806 +++ b/localversion-cacule
142807 @@ -0,0 +1 @@
142808 +-cacule
142809 diff --git a/mm/Kconfig b/mm/Kconfig
142810 index 24c045b24b95..5650c2d3c9c2 100644
142811 --- a/mm/Kconfig
142812 +++ b/mm/Kconfig
142813 @@ -122,6 +122,41 @@ config SPARSEMEM_VMEMMAP
142814           pfn_to_page and page_to_pfn operations.  This is the most
142815           efficient option when sufficient kernel resources are available.
142817 +config CLEAN_LOW_KBYTES
142818 +       int "Default value for vm.clean_low_kbytes"
142819 +       depends on SYSCTL
142820 +       default "150000"
142821 +       help
142822 +         The vm.clean_low_kbytes sysctl knob provides *best-effort*
142823 +         protection of clean file pages. The clean file pages on the current
142824 +         node won't be reclaimed under memory pressure when their amount is
142825 +         below vm.clean_low_kbytes *unless* we threaten to OOM or have
142826 +         no free swap space or vm.swappiness=0.
142828 +         Protection of clean file pages may be used to prevent thrashing and
142829 +         reducing I/O under low-memory conditions.
142831 +         Setting it to a high value may result in a early eviction of anonymous
142832 +         pages into the swap space by attempting to hold the protected amount of
142833 +         clean file pages in memory.
142835 +config CLEAN_MIN_KBYTES
142836 +       int "Default value for vm.clean_min_kbytes"
142837 +       depends on SYSCTL
142838 +       default "0"
142839 +       help
142840 +         The vm.clean_min_kbytes sysctl knob provides *hard* protection
142841 +         of clean file pages. The clean file pages on the current node won't be
142842 +         reclaimed under memory pressure when their amount is below
142843 +         vm.clean_min_kbytes.
142845 +         Hard protection of clean file pages may be used to avoid high latency and
142846 +         prevent livelock in near-OOM conditions.
142848 +         Setting it to a high value may result in a early out-of-memory condition
142849 +         due to the inability to reclaim the protected amount of clean file pages
142850 +         when other types of pages cannot be reclaimed.
142852  config HAVE_MEMBLOCK_PHYS_MAP
142853         bool
142855 @@ -872,4 +907,59 @@ config MAPPING_DIRTY_HELPERS
142856  config KMAP_LOCAL
142857         bool
142859 +config LRU_GEN
142860 +       bool "Multigenerational LRU"
142861 +       depends on MMU
142862 +       help
142863 +         A high performance LRU implementation to heavily overcommit workloads
142864 +         that are not IO bound. See Documentation/vm/multigen_lru.rst for
142865 +         details.
142867 +         Warning: do not enable this option unless you plan to use it because
142868 +         it introduces a small per-process and per-memcg and per-node memory
142869 +         overhead.
142871 +config NR_LRU_GENS
142872 +       int "Max number of generations"
142873 +       depends on LRU_GEN
142874 +       range 4 31
142875 +       default 7
142876 +       help
142877 +         This will use order_base_2(N+1) spare bits from page flags.
142879 +         Warning: do not use numbers larger than necessary because each
142880 +         generation introduces a small per-node and per-memcg memory overhead.
142882 +config TIERS_PER_GEN
142883 +       int "Number of tiers per generation"
142884 +       depends on LRU_GEN
142885 +       range 2 5
142886 +       default 4
142887 +       help
142888 +         This will use N-2 spare bits from page flags.
142890 +         Higher values generally offer better protection to active pages under
142891 +         heavy buffered I/O workloads.
142893 +config LRU_GEN_ENABLED
142894 +       bool "Turn on by default"
142895 +       depends on LRU_GEN
142896 +       help
142897 +         The default value of /sys/kernel/mm/lru_gen/enabled is 0. This option
142898 +         changes it to 1.
142900 +         Warning: the default value is the fast path. See
142901 +         Documentation/static-keys.txt for details.
142903 +config LRU_GEN_STATS
142904 +       bool "Full stats for debugging"
142905 +       depends on LRU_GEN
142906 +       help
142907 +         This option keeps full stats for each generation, which can be read
142908 +         from /sys/kernel/debug/lru_gen_full.
142910 +         Warning: do not enable this option unless you plan to use it because
142911 +         it introduces an additional small per-process and per-memcg and
142912 +         per-node memory overhead.
142914  endmenu
142915 diff --git a/mm/gup.c b/mm/gup.c
142916 index ef7d2da9f03f..333f5dfd8942 100644
142917 --- a/mm/gup.c
142918 +++ b/mm/gup.c
142919 @@ -1551,54 +1551,60 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
142920                                         struct vm_area_struct **vmas,
142921                                         unsigned int gup_flags)
142923 -       unsigned long i;
142924 -       unsigned long step;
142925 -       bool drain_allow = true;
142926 -       bool migrate_allow = true;
142927 +       unsigned long i, isolation_error_count;
142928 +       bool drain_allow;
142929         LIST_HEAD(cma_page_list);
142930         long ret = nr_pages;
142931 +       struct page *prev_head, *head;
142932         struct migration_target_control mtc = {
142933                 .nid = NUMA_NO_NODE,
142934                 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_NOWARN,
142935         };
142937  check_again:
142938 -       for (i = 0; i < nr_pages;) {
142940 -               struct page *head = compound_head(pages[i]);
142942 -               /*
142943 -                * gup may start from a tail page. Advance step by the left
142944 -                * part.
142945 -                */
142946 -               step = compound_nr(head) - (pages[i] - head);
142947 +       prev_head = NULL;
142948 +       isolation_error_count = 0;
142949 +       drain_allow = true;
142950 +       for (i = 0; i < nr_pages; i++) {
142951 +               head = compound_head(pages[i]);
142952 +               if (head == prev_head)
142953 +                       continue;
142954 +               prev_head = head;
142955                 /*
142956                  * If we get a page from the CMA zone, since we are going to
142957                  * be pinning these entries, we might as well move them out
142958                  * of the CMA zone if possible.
142959                  */
142960                 if (is_migrate_cma_page(head)) {
142961 -                       if (PageHuge(head))
142962 -                               isolate_huge_page(head, &cma_page_list);
142963 -                       else {
142964 +                       if (PageHuge(head)) {
142965 +                               if (!isolate_huge_page(head, &cma_page_list))
142966 +                                       isolation_error_count++;
142967 +                       } else {
142968                                 if (!PageLRU(head) && drain_allow) {
142969                                         lru_add_drain_all();
142970                                         drain_allow = false;
142971                                 }
142973 -                               if (!isolate_lru_page(head)) {
142974 -                                       list_add_tail(&head->lru, &cma_page_list);
142975 -                                       mod_node_page_state(page_pgdat(head),
142976 -                                                           NR_ISOLATED_ANON +
142977 -                                                           page_is_file_lru(head),
142978 -                                                           thp_nr_pages(head));
142979 +                               if (isolate_lru_page(head)) {
142980 +                                       isolation_error_count++;
142981 +                                       continue;
142982                                 }
142983 +                               list_add_tail(&head->lru, &cma_page_list);
142984 +                               mod_node_page_state(page_pgdat(head),
142985 +                                                   NR_ISOLATED_ANON +
142986 +                                                   page_is_file_lru(head),
142987 +                                                   thp_nr_pages(head));
142988                         }
142989                 }
142991 -               i += step;
142992         }
142994 +       /*
142995 +        * If list is empty, and no isolation errors, means that all pages are
142996 +        * in the correct zone.
142997 +        */
142998 +       if (list_empty(&cma_page_list) && !isolation_error_count)
142999 +               return ret;
143001         if (!list_empty(&cma_page_list)) {
143002                 /*
143003                  * drop the above get_user_pages reference.
143004 @@ -1609,34 +1615,28 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
143005                         for (i = 0; i < nr_pages; i++)
143006                                 put_page(pages[i]);
143008 -               if (migrate_pages(&cma_page_list, alloc_migration_target, NULL,
143009 -                       (unsigned long)&mtc, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
143010 -                       /*
143011 -                        * some of the pages failed migration. Do get_user_pages
143012 -                        * without migration.
143013 -                        */
143014 -                       migrate_allow = false;
143016 +               ret = migrate_pages(&cma_page_list, alloc_migration_target,
143017 +                                   NULL, (unsigned long)&mtc, MIGRATE_SYNC,
143018 +                                   MR_CONTIG_RANGE);
143019 +               if (ret) {
143020                         if (!list_empty(&cma_page_list))
143021                                 putback_movable_pages(&cma_page_list);
143022 +                       return ret > 0 ? -ENOMEM : ret;
143023                 }
143024 -               /*
143025 -                * We did migrate all the pages, Try to get the page references
143026 -                * again migrating any new CMA pages which we failed to isolate
143027 -                * earlier.
143028 -                */
143029 -               ret = __get_user_pages_locked(mm, start, nr_pages,
143030 -                                                  pages, vmas, NULL,
143031 -                                                  gup_flags);
143033 -               if ((ret > 0) && migrate_allow) {
143034 -                       nr_pages = ret;
143035 -                       drain_allow = true;
143036 -                       goto check_again;
143037 -               }
143039 +               /* We unpinned pages before migration, pin them again */
143040 +               ret = __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
143041 +                                             NULL, gup_flags);
143042 +               if (ret <= 0)
143043 +                       return ret;
143044 +               nr_pages = ret;
143045         }
143047 -       return ret;
143048 +       /*
143049 +        * check again because pages were unpinned, and we also might have
143050 +        * had isolation errors and need more pages to migrate.
143051 +        */
143052 +       goto check_again;
143054  #else
143055  static long check_and_migrate_cma_pages(struct mm_struct *mm,
143056 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
143057 index ae907a9c2050..2cf46270c84b 100644
143058 --- a/mm/huge_memory.c
143059 +++ b/mm/huge_memory.c
143060 @@ -637,7 +637,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
143061                 entry = mk_huge_pmd(page, vma->vm_page_prot);
143062                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
143063                 page_add_new_anon_rmap(page, vma, haddr, true);
143064 -               lru_cache_add_inactive_or_unevictable(page, vma);
143065 +               lru_cache_add_page_vma(page, vma, true);
143066                 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
143067                 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
143068                 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
143069 @@ -2418,7 +2418,8 @@ static void __split_huge_page_tail(struct page *head, int tail,
143070  #ifdef CONFIG_64BIT
143071                          (1L << PG_arch_2) |
143072  #endif
143073 -                        (1L << PG_dirty)));
143074 +                        (1L << PG_dirty) |
143075 +                        LRU_GEN_MASK | LRU_USAGE_MASK));
143077         /* ->mapping in first tail page is compound_mapcount */
143078         VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
143079 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
143080 index a86a58ef132d..96b722af092e 100644
143081 --- a/mm/hugetlb.c
143082 +++ b/mm/hugetlb.c
143083 @@ -743,13 +743,20 @@ void hugetlb_fix_reserve_counts(struct inode *inode)
143085         struct hugepage_subpool *spool = subpool_inode(inode);
143086         long rsv_adjust;
143087 +       bool reserved = false;
143089         rsv_adjust = hugepage_subpool_get_pages(spool, 1);
143090 -       if (rsv_adjust) {
143091 +       if (rsv_adjust > 0) {
143092                 struct hstate *h = hstate_inode(inode);
143094 -               hugetlb_acct_memory(h, 1);
143095 +               if (!hugetlb_acct_memory(h, 1))
143096 +                       reserved = true;
143097 +       } else if (!rsv_adjust) {
143098 +               reserved = true;
143099         }
143101 +       if (!reserved)
143102 +               pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
143106 @@ -3898,6 +3905,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
143107                                  * See Documentation/vm/mmu_notifier.rst
143108                                  */
143109                                 huge_ptep_set_wrprotect(src, addr, src_pte);
143110 +                               entry = huge_pte_wrprotect(entry);
143111                         }
143113                         page_dup_rmap(ptepage, true);
143114 diff --git a/mm/kfence/core.c b/mm/kfence/core.c
143115 index d53c91f881a4..f0be2c5038b5 100644
143116 --- a/mm/kfence/core.c
143117 +++ b/mm/kfence/core.c
143118 @@ -10,6 +10,7 @@
143119  #include <linux/atomic.h>
143120  #include <linux/bug.h>
143121  #include <linux/debugfs.h>
143122 +#include <linux/irq_work.h>
143123  #include <linux/kcsan-checks.h>
143124  #include <linux/kfence.h>
143125  #include <linux/kmemleak.h>
143126 @@ -586,6 +587,17 @@ late_initcall(kfence_debugfs_init);
143128  /* === Allocation Gate Timer ================================================ */
143130 +#ifdef CONFIG_KFENCE_STATIC_KEYS
143131 +/* Wait queue to wake up allocation-gate timer task. */
143132 +static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
143134 +static void wake_up_kfence_timer(struct irq_work *work)
143136 +       wake_up(&allocation_wait);
143138 +static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer);
143139 +#endif
143142   * Set up delayed work, which will enable and disable the static key. We need to
143143   * use a work queue (rather than a simple timer), since enabling and disabling a
143144 @@ -603,25 +615,13 @@ static void toggle_allocation_gate(struct work_struct *work)
143145         if (!READ_ONCE(kfence_enabled))
143146                 return;
143148 -       /* Enable static key, and await allocation to happen. */
143149         atomic_set(&kfence_allocation_gate, 0);
143150  #ifdef CONFIG_KFENCE_STATIC_KEYS
143151 +       /* Enable static key, and await allocation to happen. */
143152         static_branch_enable(&kfence_allocation_key);
143153 -       /*
143154 -        * Await an allocation. Timeout after 1 second, in case the kernel stops
143155 -        * doing allocations, to avoid stalling this worker task for too long.
143156 -        */
143157 -       {
143158 -               unsigned long end_wait = jiffies + HZ;
143160 -               do {
143161 -                       set_current_state(TASK_UNINTERRUPTIBLE);
143162 -                       if (atomic_read(&kfence_allocation_gate) != 0)
143163 -                               break;
143164 -                       schedule_timeout(1);
143165 -               } while (time_before(jiffies, end_wait));
143166 -               __set_current_state(TASK_RUNNING);
143167 -       }
143169 +       wait_event_timeout(allocation_wait, atomic_read(&kfence_allocation_gate), HZ);
143171         /* Disable static key and reset timer. */
143172         static_branch_disable(&kfence_allocation_key);
143173  #endif
143174 @@ -728,6 +728,19 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
143175          */
143176         if (atomic_read(&kfence_allocation_gate) || atomic_inc_return(&kfence_allocation_gate) > 1)
143177                 return NULL;
143178 +#ifdef CONFIG_KFENCE_STATIC_KEYS
143179 +       /*
143180 +        * waitqueue_active() is fully ordered after the update of
143181 +        * kfence_allocation_gate per atomic_inc_return().
143182 +        */
143183 +       if (waitqueue_active(&allocation_wait)) {
143184 +               /*
143185 +                * Calling wake_up() here may deadlock when allocations happen
143186 +                * from within timer code. Use an irq_work to defer it.
143187 +                */
143188 +               irq_work_queue(&wake_up_kfence_timer_work);
143189 +       }
143190 +#endif
143192         if (!READ_ONCE(kfence_enabled))
143193                 return NULL;
143194 diff --git a/mm/khugepaged.c b/mm/khugepaged.c
143195 index a7d6cb912b05..fd14b1e3c6f1 100644
143196 --- a/mm/khugepaged.c
143197 +++ b/mm/khugepaged.c
143198 @@ -716,17 +716,17 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
143199                 if (pte_write(pteval))
143200                         writable = true;
143201         }
143202 -       if (likely(writable)) {
143203 -               if (likely(referenced)) {
143204 -                       result = SCAN_SUCCEED;
143205 -                       trace_mm_collapse_huge_page_isolate(page, none_or_zero,
143206 -                                                           referenced, writable, result);
143207 -                       return 1;
143208 -               }
143209 -       } else {
143211 +       if (unlikely(!writable)) {
143212                 result = SCAN_PAGE_RO;
143213 +       } else if (unlikely(!referenced)) {
143214 +               result = SCAN_LACK_REFERENCED_PAGE;
143215 +       } else {
143216 +               result = SCAN_SUCCEED;
143217 +               trace_mm_collapse_huge_page_isolate(page, none_or_zero,
143218 +                                                   referenced, writable, result);
143219 +               return 1;
143220         }
143222  out:
143223         release_pte_pages(pte, _pte, compound_pagelist);
143224         trace_mm_collapse_huge_page_isolate(page, none_or_zero,
143225 @@ -1199,7 +1199,7 @@ static void collapse_huge_page(struct mm_struct *mm,
143226         spin_lock(pmd_ptl);
143227         BUG_ON(!pmd_none(*pmd));
143228         page_add_new_anon_rmap(new_page, vma, address, true);
143229 -       lru_cache_add_inactive_or_unevictable(new_page, vma);
143230 +       lru_cache_add_page_vma(new_page, vma, true);
143231         pgtable_trans_huge_deposit(mm, pmd, pgtable);
143232         set_pmd_at(mm, address, pmd, _pmd);
143233         update_mmu_cache_pmd(vma, address, pmd);
143234 diff --git a/mm/ksm.c b/mm/ksm.c
143235 index 9694ee2c71de..b32391ccf6d5 100644
143236 --- a/mm/ksm.c
143237 +++ b/mm/ksm.c
143238 @@ -794,6 +794,7 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
143239                 stable_node->rmap_hlist_len--;
143241                 put_anon_vma(rmap_item->anon_vma);
143242 +               rmap_item->head = NULL;
143243                 rmap_item->address &= PAGE_MASK;
143245         } else if (rmap_item->address & UNSTABLE_FLAG) {
143246 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
143247 index e064ac0d850a..594f99eba9c0 100644
143248 --- a/mm/memcontrol.c
143249 +++ b/mm/memcontrol.c
143250 @@ -3181,9 +3181,17 @@ static void drain_obj_stock(struct memcg_stock_pcp *stock)
143251                 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
143253                 if (nr_pages) {
143254 +                       struct mem_cgroup *memcg;
143256                         rcu_read_lock();
143257 -                       __memcg_kmem_uncharge(obj_cgroup_memcg(old), nr_pages);
143258 +retry:
143259 +                       memcg = obj_cgroup_memcg(old);
143260 +                       if (unlikely(!css_tryget(&memcg->css)))
143261 +                               goto retry;
143262                         rcu_read_unlock();
143264 +                       __memcg_kmem_uncharge(memcg, nr_pages);
143265 +                       css_put(&memcg->css);
143266                 }
143268                 /*
143269 @@ -5206,6 +5214,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
143270                 free_mem_cgroup_per_node_info(memcg, node);
143271         free_percpu(memcg->vmstats_percpu);
143272         free_percpu(memcg->vmstats_local);
143273 +       lru_gen_free_mm_list(memcg);
143274         kfree(memcg);
143277 @@ -5258,6 +5267,9 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
143278                 if (alloc_mem_cgroup_per_node_info(memcg, node))
143279                         goto fail;
143281 +       if (lru_gen_alloc_mm_list(memcg))
143282 +               goto fail;
143284         if (memcg_wb_domain_init(memcg, GFP_KERNEL))
143285                 goto fail;
143287 @@ -6162,6 +6174,29 @@ static void mem_cgroup_move_task(void)
143289  #endif
143291 +#ifdef CONFIG_LRU_GEN
143292 +static void mem_cgroup_attach(struct cgroup_taskset *tset)
143294 +       struct cgroup_subsys_state *css;
143295 +       struct task_struct *task = NULL;
143297 +       cgroup_taskset_for_each_leader(task, css, tset)
143298 +               ;
143300 +       if (!task)
143301 +               return;
143303 +       task_lock(task);
143304 +       if (task->mm && task->mm->owner == task)
143305 +               lru_gen_migrate_mm(task->mm);
143306 +       task_unlock(task);
143308 +#else
143309 +static void mem_cgroup_attach(struct cgroup_taskset *tset)
143312 +#endif
143314  static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
143316         if (value == PAGE_COUNTER_MAX)
143317 @@ -6502,6 +6537,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
143318         .css_free = mem_cgroup_css_free,
143319         .css_reset = mem_cgroup_css_reset,
143320         .can_attach = mem_cgroup_can_attach,
143321 +       .attach = mem_cgroup_attach,
143322         .cancel_attach = mem_cgroup_cancel_attach,
143323         .post_attach = mem_cgroup_move_task,
143324         .dfl_cftypes = memory_files,
143325 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
143326 index 24210c9bd843..bd3945446d47 100644
143327 --- a/mm/memory-failure.c
143328 +++ b/mm/memory-failure.c
143329 @@ -1368,7 +1368,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
143330                  * communicated in siginfo, see kill_proc()
143331                  */
143332                 start = (page->index << PAGE_SHIFT) & ~(size - 1);
143333 -               unmap_mapping_range(page->mapping, start, start + size, 0);
143334 +               unmap_mapping_range(page->mapping, start, size, 0);
143335         }
143336         kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, pfn, flags);
143337         rc = 0;
143338 diff --git a/mm/memory.c b/mm/memory.c
143339 index 550405fc3b5e..a1332ba9c0da 100644
143340 --- a/mm/memory.c
143341 +++ b/mm/memory.c
143342 @@ -73,6 +73,7 @@
143343  #include <linux/perf_event.h>
143344  #include <linux/ptrace.h>
143345  #include <linux/vmalloc.h>
143346 +#include <linux/mm_inline.h>
143348  #include <trace/events/kmem.h>
143350 @@ -839,7 +840,7 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
143351         copy_user_highpage(new_page, page, addr, src_vma);
143352         __SetPageUptodate(new_page);
143353         page_add_new_anon_rmap(new_page, dst_vma, addr, false);
143354 -       lru_cache_add_inactive_or_unevictable(new_page, dst_vma);
143355 +       lru_cache_add_page_vma(new_page, dst_vma, false);
143356         rss[mm_counter(new_page)]++;
143358         /* All done, just insert the new page copy in the child */
143359 @@ -1548,6 +1549,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
143360         mmu_notifier_invalidate_range_end(&range);
143361         tlb_finish_mmu(&tlb);
143363 +EXPORT_SYMBOL(zap_page_range);
143365  /**
143366   * zap_page_range_single - remove user pages in a given range
143367 @@ -2907,7 +2909,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
143368                  */
143369                 ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
143370                 page_add_new_anon_rmap(new_page, vma, vmf->address, false);
143371 -               lru_cache_add_inactive_or_unevictable(new_page, vma);
143372 +               lru_cache_add_page_vma(new_page, vma, true);
143373                 /*
143374                  * We call the notify macro here because, when using secondary
143375                  * mmu page tables (such as kvm shadow page tables), we want the
143376 @@ -3438,9 +3440,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
143377         /* ksm created a completely new copy */
143378         if (unlikely(page != swapcache && swapcache)) {
143379                 page_add_new_anon_rmap(page, vma, vmf->address, false);
143380 -               lru_cache_add_inactive_or_unevictable(page, vma);
143381 +               lru_cache_add_page_vma(page, vma, true);
143382         } else {
143383                 do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
143384 +               lru_gen_activation(page, vma);
143385         }
143387         swap_free(entry);
143388 @@ -3584,7 +3587,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
143390         inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
143391         page_add_new_anon_rmap(page, vma, vmf->address, false);
143392 -       lru_cache_add_inactive_or_unevictable(page, vma);
143393 +       lru_cache_add_page_vma(page, vma, true);
143394  setpte:
143395         set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
143397 @@ -3709,6 +3712,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
143399         add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
143400         page_add_file_rmap(page, true);
143401 +       lru_gen_activation(page, vma);
143402         /*
143403          * deposit and withdraw with pmd lock held
143404          */
143405 @@ -3752,10 +3756,11 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
143406         if (write && !(vma->vm_flags & VM_SHARED)) {
143407                 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
143408                 page_add_new_anon_rmap(page, vma, addr, false);
143409 -               lru_cache_add_inactive_or_unevictable(page, vma);
143410 +               lru_cache_add_page_vma(page, vma, true);
143411         } else {
143412                 inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
143413                 page_add_file_rmap(page, false);
143414 +               lru_gen_activation(page, vma);
143415         }
143416         set_pte_at(vma->vm_mm, addr, vmf->pte, entry);
143418 diff --git a/mm/migrate.c b/mm/migrate.c
143419 index 62b81d5257aa..9a50fd026236 100644
143420 --- a/mm/migrate.c
143421 +++ b/mm/migrate.c
143422 @@ -2973,6 +2973,13 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
143424                         swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE);
143425                         entry = swp_entry_to_pte(swp_entry);
143426 +               } else {
143427 +                       /*
143428 +                        * For now we only support migrating to un-addressable
143429 +                        * device memory.
143430 +                        */
143431 +                       pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
143432 +                       goto abort;
143433                 }
143434         } else {
143435                 entry = mk_pte(page, vma->vm_page_prot);
143436 @@ -3004,7 +3011,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
143437         inc_mm_counter(mm, MM_ANONPAGES);
143438         page_add_new_anon_rmap(page, vma, addr, false);
143439         if (!is_zone_device_page(page))
143440 -               lru_cache_add_inactive_or_unevictable(page, vma);
143441 +               lru_cache_add_page_vma(page, vma, false);
143442         get_page(page);
143444         if (flush) {
143445 diff --git a/mm/mm_init.c b/mm/mm_init.c
143446 index 8e02e865cc65..6303ed7aa511 100644
143447 --- a/mm/mm_init.c
143448 +++ b/mm/mm_init.c
143449 @@ -71,27 +71,33 @@ void __init mminit_verify_pageflags_layout(void)
143450         width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
143451                 - LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH;
143452         mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
143453 -               "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Flags %d\n",
143454 +               "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d lru gen %d tier %d Flags %d\n",
143455                 SECTIONS_WIDTH,
143456                 NODES_WIDTH,
143457                 ZONES_WIDTH,
143458                 LAST_CPUPID_WIDTH,
143459                 KASAN_TAG_WIDTH,
143460 +               LRU_GEN_WIDTH,
143461 +               LRU_USAGE_WIDTH,
143462                 NR_PAGEFLAGS);
143463         mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
143464 -               "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n",
143465 +               "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d lru gen %d tier %d\n",
143466                 SECTIONS_SHIFT,
143467                 NODES_SHIFT,
143468                 ZONES_SHIFT,
143469                 LAST_CPUPID_SHIFT,
143470 -               KASAN_TAG_WIDTH);
143471 +               KASAN_TAG_WIDTH,
143472 +               LRU_GEN_WIDTH,
143473 +               LRU_USAGE_WIDTH);
143474         mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
143475 -               "Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n",
143476 +               "Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu lru gen %lu tier %lu\n",
143477                 (unsigned long)SECTIONS_PGSHIFT,
143478                 (unsigned long)NODES_PGSHIFT,
143479                 (unsigned long)ZONES_PGSHIFT,
143480                 (unsigned long)LAST_CPUPID_PGSHIFT,
143481 -               (unsigned long)KASAN_TAG_PGSHIFT);
143482 +               (unsigned long)KASAN_TAG_PGSHIFT,
143483 +               (unsigned long)LRU_GEN_PGOFF,
143484 +               (unsigned long)LRU_USAGE_PGOFF);
143485         mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
143486                 "Node/Zone ID: %lu -> %lu\n",
143487                 (unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
143488 diff --git a/mm/mmzone.c b/mm/mmzone.c
143489 index eb89d6e018e2..2ec0d7793424 100644
143490 --- a/mm/mmzone.c
143491 +++ b/mm/mmzone.c
143492 @@ -81,6 +81,8 @@ void lruvec_init(struct lruvec *lruvec)
143494         for_each_lru(lru)
143495                 INIT_LIST_HEAD(&lruvec->lists[lru]);
143497 +       lru_gen_init_lruvec(lruvec);
143500  #if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
143501 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
143502 index cfc72873961d..4bb3cdfc47f8 100644
143503 --- a/mm/page_alloc.c
143504 +++ b/mm/page_alloc.c
143505 @@ -764,32 +764,36 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
143506   */
143507  void init_mem_debugging_and_hardening(void)
143509 +       bool page_poisoning_requested = false;
143511 +#ifdef CONFIG_PAGE_POISONING
143512 +       /*
143513 +        * Page poisoning is debug page alloc for some arches. If
143514 +        * either of those options are enabled, enable poisoning.
143515 +        */
143516 +       if (page_poisoning_enabled() ||
143517 +            (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
143518 +             debug_pagealloc_enabled())) {
143519 +               static_branch_enable(&_page_poisoning_enabled);
143520 +               page_poisoning_requested = true;
143521 +       }
143522 +#endif
143524         if (_init_on_alloc_enabled_early) {
143525 -               if (page_poisoning_enabled())
143526 +               if (page_poisoning_requested)
143527                         pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
143528                                 "will take precedence over init_on_alloc\n");
143529                 else
143530                         static_branch_enable(&init_on_alloc);
143531         }
143532         if (_init_on_free_enabled_early) {
143533 -               if (page_poisoning_enabled())
143534 +               if (page_poisoning_requested)
143535                         pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
143536                                 "will take precedence over init_on_free\n");
143537                 else
143538                         static_branch_enable(&init_on_free);
143539         }
143541 -#ifdef CONFIG_PAGE_POISONING
143542 -       /*
143543 -        * Page poisoning is debug page alloc for some arches. If
143544 -        * either of those options are enabled, enable poisoning.
143545 -        */
143546 -       if (page_poisoning_enabled() ||
143547 -            (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
143548 -             debug_pagealloc_enabled()))
143549 -               static_branch_enable(&_page_poisoning_enabled);
143550 -#endif
143552  #ifdef CONFIG_DEBUG_PAGEALLOC
143553         if (!debug_pagealloc_enabled())
143554                 return;
143555 diff --git a/mm/rmap.c b/mm/rmap.c
143556 index b0fc27e77d6d..d600b282ced5 100644
143557 --- a/mm/rmap.c
143558 +++ b/mm/rmap.c
143559 @@ -72,6 +72,7 @@
143560  #include <linux/page_idle.h>
143561  #include <linux/memremap.h>
143562  #include <linux/userfaultfd_k.h>
143563 +#include <linux/mm_inline.h>
143565  #include <asm/tlbflush.h>
143567 @@ -792,6 +793,11 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
143568                 }
143570                 if (pvmw.pte) {
143571 +                       /* the multigenerational lru exploits the spatial locality */
143572 +                       if (lru_gen_enabled() && pte_young(*pvmw.pte)) {
143573 +                               lru_gen_scan_around(&pvmw);
143574 +                               referenced++;
143575 +                       }
143576                         if (ptep_clear_flush_young_notify(vma, address,
143577                                                 pvmw.pte)) {
143578                                 /*
143579 diff --git a/mm/shmem.c b/mm/shmem.c
143580 index b2db4ed0fbc7..9dd24a2f0b7a 100644
143581 --- a/mm/shmem.c
143582 +++ b/mm/shmem.c
143583 @@ -2258,25 +2258,11 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
143584  static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
143586         struct shmem_inode_info *info = SHMEM_I(file_inode(file));
143587 +       int ret;
143589 -       if (info->seals & F_SEAL_FUTURE_WRITE) {
143590 -               /*
143591 -                * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
143592 -                * "future write" seal active.
143593 -                */
143594 -               if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
143595 -                       return -EPERM;
143597 -               /*
143598 -                * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
143599 -                * MAP_SHARED and read-only, take care to not allow mprotect to
143600 -                * revert protections on such mappings. Do this only for shared
143601 -                * mappings. For private mappings, don't need to mask
143602 -                * VM_MAYWRITE as we still want them to be COW-writable.
143603 -                */
143604 -               if (vma->vm_flags & VM_SHARED)
143605 -                       vma->vm_flags &= ~(VM_MAYWRITE);
143606 -       }
143607 +       ret = seal_check_future_write(info->seals, vma);
143608 +       if (ret)
143609 +               return ret;
143611         /* arm64 - allow memory tagging on RAM-based files */
143612         vma->vm_flags |= VM_MTE_ALLOWED;
143613 @@ -2375,8 +2361,18 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
143614         pgoff_t offset, max_off;
143616         ret = -ENOMEM;
143617 -       if (!shmem_inode_acct_block(inode, 1))
143618 +       if (!shmem_inode_acct_block(inode, 1)) {
143619 +               /*
143620 +                * We may have got a page, returned -ENOENT triggering a retry,
143621 +                * and now we find ourselves with -ENOMEM. Release the page, to
143622 +                * avoid a BUG_ON in our caller.
143623 +                */
143624 +               if (unlikely(*pagep)) {
143625 +                       put_page(*pagep);
143626 +                       *pagep = NULL;
143627 +               }
143628                 goto out;
143629 +       }
143631         if (!*pagep) {
143632                 page = shmem_alloc_page(gfp, info, pgoff);
143633 @@ -4233,6 +4229,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
143635         return 0;
143637 +EXPORT_SYMBOL_GPL(shmem_zero_setup);
143639  /**
143640   * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
143641 diff --git a/mm/sparse.c b/mm/sparse.c
143642 index 7bd23f9d6cef..33406ea2ecc4 100644
143643 --- a/mm/sparse.c
143644 +++ b/mm/sparse.c
143645 @@ -547,6 +547,7 @@ static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
143646                         pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.",
143647                                __func__, nid);
143648                         pnum_begin = pnum;
143649 +                       sparse_buffer_fini();
143650                         goto failed;
143651                 }
143652                 check_usemap_section_nr(nid, usage);
143653 diff --git a/mm/swap.c b/mm/swap.c
143654 index 31b844d4ed94..d6458ee1e9f8 100644
143655 --- a/mm/swap.c
143656 +++ b/mm/swap.c
143657 @@ -306,7 +306,7 @@ void lru_note_cost_page(struct page *page)
143659  static void __activate_page(struct page *page, struct lruvec *lruvec)
143661 -       if (!PageActive(page) && !PageUnevictable(page)) {
143662 +       if (!PageUnevictable(page) && !page_is_active(page, lruvec)) {
143663                 int nr_pages = thp_nr_pages(page);
143665                 del_page_from_lru_list(page, lruvec);
143666 @@ -334,10 +334,10 @@ static bool need_activate_page_drain(int cpu)
143667         return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0;
143670 -static void activate_page(struct page *page)
143671 +static void activate_page_on_lru(struct page *page)
143673         page = compound_head(page);
143674 -       if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
143675 +       if (PageLRU(page) && !PageUnevictable(page) && !page_is_active(page, NULL)) {
143676                 struct pagevec *pvec;
143678                 local_lock(&lru_pvecs.lock);
143679 @@ -354,7 +354,7 @@ static inline void activate_page_drain(int cpu)
143683 -static void activate_page(struct page *page)
143684 +static void activate_page_on_lru(struct page *page)
143686         struct lruvec *lruvec;
143688 @@ -368,11 +368,22 @@ static void activate_page(struct page *page)
143690  #endif
143692 -static void __lru_cache_activate_page(struct page *page)
143694 + * If the page is on the LRU, queue it for activation via
143695 + * lru_pvecs.activate_page. Otherwise, assume the page is on a
143696 + * pagevec, mark it active and it'll be moved to the active
143697 + * LRU on the next drain.
143698 + */
143699 +void activate_page(struct page *page)
143701         struct pagevec *pvec;
143702         int i;
143704 +       if (PageLRU(page)) {
143705 +               activate_page_on_lru(page);
143706 +               return;
143707 +       }
143709         local_lock(&lru_pvecs.lock);
143710         pvec = this_cpu_ptr(&lru_pvecs.lru_add);
143712 @@ -420,17 +431,8 @@ void mark_page_accessed(struct page *page)
143713                  * this list is never rotated or maintained, so marking an
143714                  * evictable page accessed has no effect.
143715                  */
143716 -       } else if (!PageActive(page)) {
143717 -               /*
143718 -                * If the page is on the LRU, queue it for activation via
143719 -                * lru_pvecs.activate_page. Otherwise, assume the page is on a
143720 -                * pagevec, mark it active and it'll be moved to the active
143721 -                * LRU on the next drain.
143722 -                */
143723 -               if (PageLRU(page))
143724 -                       activate_page(page);
143725 -               else
143726 -                       __lru_cache_activate_page(page);
143727 +       } else if (!page_inc_usage(page)) {
143728 +               activate_page(page);
143729                 ClearPageReferenced(page);
143730                 workingset_activation(page);
143731         }
143732 @@ -465,15 +467,14 @@ void lru_cache_add(struct page *page)
143733  EXPORT_SYMBOL(lru_cache_add);
143735  /**
143736 - * lru_cache_add_inactive_or_unevictable
143737 + * lru_cache_add_page_vma
143738   * @page:  the page to be added to LRU
143739   * @vma:   vma in which page is mapped for determining reclaimability
143740   *
143741 - * Place @page on the inactive or unevictable LRU list, depending on its
143742 - * evictability.
143743 + * Place @page on an LRU list, depending on its evictability.
143744   */
143745 -void lru_cache_add_inactive_or_unevictable(struct page *page,
143746 -                                        struct vm_area_struct *vma)
143747 +void lru_cache_add_page_vma(struct page *page, struct vm_area_struct *vma,
143748 +                           bool faulting)
143750         bool unevictable;
143752 @@ -490,6 +491,11 @@ void lru_cache_add_inactive_or_unevictable(struct page *page,
143753                 __mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
143754                 count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
143755         }
143757 +       /* tell the multigenerational lru that the page is being faulted in */
143758 +       if (lru_gen_enabled() && !unevictable && faulting)
143759 +               SetPageActive(page);
143761         lru_cache_add(page);
143764 @@ -516,7 +522,7 @@ void lru_cache_add_inactive_or_unevictable(struct page *page,
143765   */
143766  static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
143768 -       bool active = PageActive(page);
143769 +       bool active = page_is_active(page, lruvec);
143770         int nr_pages = thp_nr_pages(page);
143772         if (PageUnevictable(page))
143773 @@ -556,7 +562,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
143775  static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec)
143777 -       if (PageActive(page) && !PageUnevictable(page)) {
143778 +       if (!PageUnevictable(page) && page_is_active(page, lruvec)) {
143779                 int nr_pages = thp_nr_pages(page);
143781                 del_page_from_lru_list(page, lruvec);
143782 @@ -670,7 +676,7 @@ void deactivate_file_page(struct page *page)
143783   */
143784  void deactivate_page(struct page *page)
143786 -       if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
143787 +       if (PageLRU(page) && !PageUnevictable(page) && page_is_active(page, NULL)) {
143788                 struct pagevec *pvec;
143790                 local_lock(&lru_pvecs.lock);
143791 diff --git a/mm/swapfile.c b/mm/swapfile.c
143792 index 084a5b9a18e5..ab3b5ca404fd 100644
143793 --- a/mm/swapfile.c
143794 +++ b/mm/swapfile.c
143795 @@ -1936,7 +1936,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
143796                 page_add_anon_rmap(page, vma, addr, false);
143797         } else { /* ksm created a completely new copy */
143798                 page_add_new_anon_rmap(page, vma, addr, false);
143799 -               lru_cache_add_inactive_or_unevictable(page, vma);
143800 +               lru_cache_add_page_vma(page, vma, false);
143801         }
143802         swap_free(entry);
143803  out:
143804 @@ -2702,6 +2702,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
143805         err = 0;
143806         atomic_inc(&proc_poll_event);
143807         wake_up_interruptible(&proc_poll_wait);
143808 +       /* stop tracking anon if the multigenerational lru is enabled */
143809 +       lru_gen_set_state(false, false, true);
143811  out_dput:
143812         filp_close(victim, NULL);
143813 @@ -3348,6 +3350,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
143814         mutex_unlock(&swapon_mutex);
143815         atomic_inc(&proc_poll_event);
143816         wake_up_interruptible(&proc_poll_wait);
143817 +       /* start tracking anon if the multigenerational lru is enabled */
143818 +       lru_gen_set_state(true, false, true);
143820         error = 0;
143821         goto out;
143822 diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
143823 index 9a3d451402d7..e1d4cd3103b8 100644
143824 --- a/mm/userfaultfd.c
143825 +++ b/mm/userfaultfd.c
143826 @@ -123,7 +123,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
143828         inc_mm_counter(dst_mm, MM_ANONPAGES);
143829         page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
143830 -       lru_cache_add_inactive_or_unevictable(page, dst_vma);
143831 +       lru_cache_add_page_vma(page, dst_vma, true);
143833         set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
143835 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
143836 index 4f5f8c907897..64ab133ee816 100644
143837 --- a/mm/vmalloc.c
143838 +++ b/mm/vmalloc.c
143839 @@ -316,6 +316,7 @@ int map_kernel_range_noflush(unsigned long addr, unsigned long size,
143841         return 0;
143843 +EXPORT_SYMBOL(map_kernel_range_noflush);
143845  int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
143846                 struct page **pages)
143847 @@ -2131,6 +2132,7 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
143848                                   NUMA_NO_NODE, GFP_KERNEL,
143849                                   __builtin_return_address(0));
143851 +EXPORT_SYMBOL(get_vm_area);
143853  struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
143854                                 const void *caller)
143855 diff --git a/mm/vmscan.c b/mm/vmscan.c
143856 index 562e87cbd7a1..4a34cc622681 100644
143857 --- a/mm/vmscan.c
143858 +++ b/mm/vmscan.c
143859 @@ -49,6 +49,11 @@
143860  #include <linux/printk.h>
143861  #include <linux/dax.h>
143862  #include <linux/psi.h>
143863 +#include <linux/memory.h>
143864 +#include <linux/pagewalk.h>
143865 +#include <linux/shmem_fs.h>
143866 +#include <linux/ctype.h>
143867 +#include <linux/debugfs.h>
143869  #include <asm/tlbflush.h>
143870  #include <asm/div64.h>
143871 @@ -118,6 +123,19 @@ struct scan_control {
143872         /* The file pages on the current node are dangerously low */
143873         unsigned int file_is_tiny:1;
143875 +       /*
143876 +        * The clean file pages on the current node won't be reclaimed when
143877 +        * their amount is below vm.clean_low_kbytes *unless* we threaten
143878 +        * to OOM or have no free swap space or vm.swappiness=0.
143879 +        */
143880 +       unsigned int clean_below_low:1;
143882 +       /*
143883 +        * The clean file pages on the current node won't be reclaimed when
143884 +        * their amount is below vm.clean_min_kbytes.
143885 +        */
143886 +       unsigned int clean_below_min:1;
143888         /* Allocation order */
143889         s8 order;
143891 @@ -164,10 +182,21 @@ struct scan_control {
143892  #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
143893  #endif
143895 +#if CONFIG_CLEAN_LOW_KBYTES < 0
143896 +#error "CONFIG_CLEAN_LOW_KBYTES must be >= 0"
143897 +#endif
143899 +#if CONFIG_CLEAN_MIN_KBYTES < 0
143900 +#error "CONFIG_CLEAN_MIN_KBYTES must be >= 0"
143901 +#endif
143903 +unsigned long sysctl_clean_low_kbytes __read_mostly = CONFIG_CLEAN_LOW_KBYTES;
143904 +unsigned long sysctl_clean_min_kbytes __read_mostly = CONFIG_CLEAN_MIN_KBYTES;
143907   * From 0 .. 200.  Higher means more swappy.
143908   */
143909 -int vm_swappiness = 60;
143910 +int vm_swappiness = 30;
143912  static void set_task_reclaim_state(struct task_struct *task,
143913                                    struct reclaim_state *rs)
143914 @@ -897,9 +926,11 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
143916         if (PageSwapCache(page)) {
143917                 swp_entry_t swap = { .val = page_private(page) };
143918 -               mem_cgroup_swapout(page, swap);
143920 +               /* get a shadow entry before page_memcg() is cleared */
143921                 if (reclaimed && !mapping_exiting(mapping))
143922                         shadow = workingset_eviction(page, target_memcg);
143923 +               mem_cgroup_swapout(page, swap);
143924                 __delete_from_swap_cache(page, swap, shadow);
143925                 xa_unlock_irqrestore(&mapping->i_pages, flags);
143926                 put_swap_page(page, swap);
143927 @@ -1110,6 +1141,10 @@ static unsigned int shrink_page_list(struct list_head *page_list,
143928                 if (!sc->may_unmap && page_mapped(page))
143929                         goto keep_locked;
143931 +               /* in case the page was found accessed by lru_gen_scan_around() */
143932 +               if (lru_gen_enabled() && !ignore_references && PageReferenced(page))
143933 +                       goto keep_locked;
143935                 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
143936                         (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
143938 @@ -2224,6 +2259,135 @@ enum scan_balance {
143939         SCAN_FILE,
143942 +static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc)
143944 +       unsigned long file;
143945 +       struct lruvec *target_lruvec;
143947 +       /* the multigenerational lru doesn't use these counters */
143948 +       if (lru_gen_enabled())
143949 +               return;
143951 +       target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
143953 +       /*
143954 +        * Determine the scan balance between anon and file LRUs.
143955 +        */
143956 +       spin_lock_irq(&target_lruvec->lru_lock);
143957 +       sc->anon_cost = target_lruvec->anon_cost;
143958 +       sc->file_cost = target_lruvec->file_cost;
143959 +       spin_unlock_irq(&target_lruvec->lru_lock);
143961 +       /*
143962 +        * Target desirable inactive:active list ratios for the anon
143963 +        * and file LRU lists.
143964 +        */
143965 +       if (!sc->force_deactivate) {
143966 +               unsigned long refaults;
143968 +               refaults = lruvec_page_state(target_lruvec,
143969 +                               WORKINGSET_ACTIVATE_ANON);
143970 +               if (refaults != target_lruvec->refaults[0] ||
143971 +                       inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
143972 +                       sc->may_deactivate |= DEACTIVATE_ANON;
143973 +               else
143974 +                       sc->may_deactivate &= ~DEACTIVATE_ANON;
143976 +               /*
143977 +                * When refaults are being observed, it means a new
143978 +                * workingset is being established. Deactivate to get
143979 +                * rid of any stale active pages quickly.
143980 +                */
143981 +               refaults = lruvec_page_state(target_lruvec,
143982 +                               WORKINGSET_ACTIVATE_FILE);
143983 +               if (refaults != target_lruvec->refaults[1] ||
143984 +                   inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
143985 +                       sc->may_deactivate |= DEACTIVATE_FILE;
143986 +               else
143987 +                       sc->may_deactivate &= ~DEACTIVATE_FILE;
143988 +       } else
143989 +               sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
143991 +       /*
143992 +        * If we have plenty of inactive file pages that aren't
143993 +        * thrashing, try to reclaim those first before touching
143994 +        * anonymous pages.
143995 +        */
143996 +       file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
143997 +       if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
143998 +               sc->cache_trim_mode = 1;
143999 +       else
144000 +               sc->cache_trim_mode = 0;
144002 +       /*
144003 +        * Prevent the reclaimer from falling into the cache trap: as
144004 +        * cache pages start out inactive, every cache fault will tip
144005 +        * the scan balance towards the file LRU.  And as the file LRU
144006 +        * shrinks, so does the window for rotation from references.
144007 +        * This means we have a runaway feedback loop where a tiny
144008 +        * thrashing file LRU becomes infinitely more attractive than
144009 +        * anon pages.  Try to detect this based on file LRU size.
144010 +        */
144011 +       if (!cgroup_reclaim(sc)) {
144012 +               unsigned long total_high_wmark = 0;
144013 +               unsigned long free, anon;
144014 +               int z;
144016 +               free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
144017 +               file = node_page_state(pgdat, NR_ACTIVE_FILE) +
144018 +                          node_page_state(pgdat, NR_INACTIVE_FILE);
144020 +               for (z = 0; z < MAX_NR_ZONES; z++) {
144021 +                       struct zone *zone = &pgdat->node_zones[z];
144023 +                       if (!managed_zone(zone))
144024 +                               continue;
144026 +                       total_high_wmark += high_wmark_pages(zone);
144027 +               }
144029 +               /*
144030 +                * Consider anon: if that's low too, this isn't a
144031 +                * runaway file reclaim problem, but rather just
144032 +                * extreme pressure. Reclaim as per usual then.
144033 +                */
144034 +               anon = node_page_state(pgdat, NR_INACTIVE_ANON);
144036 +               sc->file_is_tiny =
144037 +                       file + free <= total_high_wmark &&
144038 +                       !(sc->may_deactivate & DEACTIVATE_ANON) &&
144039 +                       anon >> sc->priority;
144041 +               /*
144042 +               * Check the number of clean file pages to protect them from
144043 +               * reclaiming if their amount is below the specified.
144044 +               */
144045 +               if (sysctl_clean_low_kbytes || sysctl_clean_min_kbytes) {
144046 +                       unsigned long reclaimable_file, dirty, clean;
144048 +                       reclaimable_file =
144049 +                               node_page_state(pgdat, NR_ACTIVE_FILE) +
144050 +                               node_page_state(pgdat, NR_INACTIVE_FILE) +
144051 +                               node_page_state(pgdat, NR_ISOLATED_FILE);
144052 +                       dirty = node_page_state(pgdat, NR_FILE_DIRTY);
144053 +                       /*
144054 +                       * node_page_state() sum can go out of sync since
144055 +                       * all the values are not read at once.
144056 +                       */
144057 +                       if (likely(reclaimable_file > dirty))
144058 +                               clean = (reclaimable_file - dirty) << (PAGE_SHIFT - 10);
144059 +                       else
144060 +                               clean = 0;
144062 +                       sc->clean_below_low = clean < sysctl_clean_low_kbytes;
144063 +                       sc->clean_below_min = clean < sysctl_clean_min_kbytes;
144064 +               } else {
144065 +                       sc->clean_below_low = false;
144066 +                       sc->clean_below_min = false;
144067 +               }
144068 +       }
144072   * Determine how aggressively the anon and file LRU lists should be
144073   * scanned.  The relative value of each set of LRU lists is determined
144074 @@ -2281,6 +2445,16 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
144075                 goto out;
144076         }
144078 +       /*
144079 +        * Force-scan anon if clean file pages is under vm.clean_min_kbytes
144080 +        * or vm.clean_low_kbytes (unless the swappiness setting
144081 +        * disagrees with swapping).
144082 +        */
144083 +       if ((sc->clean_below_low || sc->clean_below_min) && swappiness) {
144084 +               scan_balance = SCAN_ANON;
144085 +               goto out;
144086 +       }
144088         /*
144089          * If there is enough inactive page cache, we do not reclaim
144090          * anything from the anonymous working right now.
144091 @@ -2417,10 +2591,30 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
144092                         BUG();
144093                 }
144095 +               /*
144096 +                * Don't reclaim clean file pages when their amount is below
144097 +                * vm.clean_min_kbytes.
144098 +                */
144099 +               if (file && sc->clean_below_min)
144100 +                       scan = 0;
144102                 nr[lru] = scan;
144103         }
144106 +#ifdef CONFIG_LRU_GEN
144107 +static void age_lru_gens(struct pglist_data *pgdat, struct scan_control *sc);
144108 +static void shrink_lru_gens(struct lruvec *lruvec, struct scan_control *sc);
144109 +#else
144110 +static void age_lru_gens(struct pglist_data *pgdat, struct scan_control *sc)
144114 +static void shrink_lru_gens(struct lruvec *lruvec, struct scan_control *sc)
144117 +#endif
144119  static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
144121         unsigned long nr[NR_LRU_LISTS];
144122 @@ -2432,6 +2626,11 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
144123         struct blk_plug plug;
144124         bool scan_adjusted;
144126 +       if (lru_gen_enabled()) {
144127 +               shrink_lru_gens(lruvec, sc);
144128 +               return;
144129 +       }
144131         get_scan_count(lruvec, sc, nr);
144133         /* Record the original scan target for proportional adjustments later */
144134 @@ -2669,7 +2868,6 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
144135         unsigned long nr_reclaimed, nr_scanned;
144136         struct lruvec *target_lruvec;
144137         bool reclaimable = false;
144138 -       unsigned long file;
144140         target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
144142 @@ -2679,93 +2877,7 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
144143         nr_reclaimed = sc->nr_reclaimed;
144144         nr_scanned = sc->nr_scanned;
144146 -       /*
144147 -        * Determine the scan balance between anon and file LRUs.
144148 -        */
144149 -       spin_lock_irq(&target_lruvec->lru_lock);
144150 -       sc->anon_cost = target_lruvec->anon_cost;
144151 -       sc->file_cost = target_lruvec->file_cost;
144152 -       spin_unlock_irq(&target_lruvec->lru_lock);
144154 -       /*
144155 -        * Target desirable inactive:active list ratios for the anon
144156 -        * and file LRU lists.
144157 -        */
144158 -       if (!sc->force_deactivate) {
144159 -               unsigned long refaults;
144161 -               refaults = lruvec_page_state(target_lruvec,
144162 -                               WORKINGSET_ACTIVATE_ANON);
144163 -               if (refaults != target_lruvec->refaults[0] ||
144164 -                       inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
144165 -                       sc->may_deactivate |= DEACTIVATE_ANON;
144166 -               else
144167 -                       sc->may_deactivate &= ~DEACTIVATE_ANON;
144169 -               /*
144170 -                * When refaults are being observed, it means a new
144171 -                * workingset is being established. Deactivate to get
144172 -                * rid of any stale active pages quickly.
144173 -                */
144174 -               refaults = lruvec_page_state(target_lruvec,
144175 -                               WORKINGSET_ACTIVATE_FILE);
144176 -               if (refaults != target_lruvec->refaults[1] ||
144177 -                   inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
144178 -                       sc->may_deactivate |= DEACTIVATE_FILE;
144179 -               else
144180 -                       sc->may_deactivate &= ~DEACTIVATE_FILE;
144181 -       } else
144182 -               sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
144184 -       /*
144185 -        * If we have plenty of inactive file pages that aren't
144186 -        * thrashing, try to reclaim those first before touching
144187 -        * anonymous pages.
144188 -        */
144189 -       file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
144190 -       if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
144191 -               sc->cache_trim_mode = 1;
144192 -       else
144193 -               sc->cache_trim_mode = 0;
144195 -       /*
144196 -        * Prevent the reclaimer from falling into the cache trap: as
144197 -        * cache pages start out inactive, every cache fault will tip
144198 -        * the scan balance towards the file LRU.  And as the file LRU
144199 -        * shrinks, so does the window for rotation from references.
144200 -        * This means we have a runaway feedback loop where a tiny
144201 -        * thrashing file LRU becomes infinitely more attractive than
144202 -        * anon pages.  Try to detect this based on file LRU size.
144203 -        */
144204 -       if (!cgroup_reclaim(sc)) {
144205 -               unsigned long total_high_wmark = 0;
144206 -               unsigned long free, anon;
144207 -               int z;
144209 -               free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
144210 -               file = node_page_state(pgdat, NR_ACTIVE_FILE) +
144211 -                          node_page_state(pgdat, NR_INACTIVE_FILE);
144213 -               for (z = 0; z < MAX_NR_ZONES; z++) {
144214 -                       struct zone *zone = &pgdat->node_zones[z];
144215 -                       if (!managed_zone(zone))
144216 -                               continue;
144218 -                       total_high_wmark += high_wmark_pages(zone);
144219 -               }
144221 -               /*
144222 -                * Consider anon: if that's low too, this isn't a
144223 -                * runaway file reclaim problem, but rather just
144224 -                * extreme pressure. Reclaim as per usual then.
144225 -                */
144226 -               anon = node_page_state(pgdat, NR_INACTIVE_ANON);
144228 -               sc->file_is_tiny =
144229 -                       file + free <= total_high_wmark &&
144230 -                       !(sc->may_deactivate & DEACTIVATE_ANON) &&
144231 -                       anon >> sc->priority;
144232 -       }
144233 +       prepare_scan_count(pgdat, sc);
144235         shrink_node_memcgs(pgdat, sc);
144237 @@ -2985,6 +3097,10 @@ static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat)
144238         struct lruvec *target_lruvec;
144239         unsigned long refaults;
144241 +       /* the multigenerational lru doesn't use these counters */
144242 +       if (lru_gen_enabled())
144243 +               return;
144245         target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
144246         refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON);
144247         target_lruvec->refaults[0] = refaults;
144248 @@ -3359,6 +3475,11 @@ static void age_active_anon(struct pglist_data *pgdat,
144249         struct mem_cgroup *memcg;
144250         struct lruvec *lruvec;
144252 +       if (lru_gen_enabled()) {
144253 +               age_lru_gens(pgdat, sc);
144254 +               return;
144255 +       }
144257         if (!total_swap_pages)
144258                 return;
144260 @@ -4304,3 +4425,2365 @@ void check_move_unevictable_pages(struct pagevec *pvec)
144261         }
144263  EXPORT_SYMBOL_GPL(check_move_unevictable_pages);
144265 +#ifdef CONFIG_LRU_GEN
144268 + * After pages are faulted in, the aging must scan them twice before the
144269 + * eviction can. The first scan clears the accessed bit set during initial
144270 + * faults. And the second scan makes sure they haven't been used since the
144271 + * first.
144272 + */
144273 +#define MIN_NR_GENS    2
144275 +#define MAX_BATCH_SIZE 8192
144277 +/******************************************************************************
144278 + *                          shorthand helpers
144279 + ******************************************************************************/
144281 +#define DEFINE_MAX_SEQ()                                               \
144282 +       unsigned long max_seq = READ_ONCE(lruvec->evictable.max_seq)
144284 +#define DEFINE_MIN_SEQ()                                               \
144285 +       unsigned long min_seq[ANON_AND_FILE] = {                        \
144286 +               READ_ONCE(lruvec->evictable.min_seq[0]),                \
144287 +               READ_ONCE(lruvec->evictable.min_seq[1]),                \
144288 +       }
144290 +#define for_each_type_zone(file, zone)                                 \
144291 +       for ((file) = 0; (file) < ANON_AND_FILE; (file)++)              \
144292 +               for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
144294 +#define for_each_gen_type_zone(gen, file, zone)                                \
144295 +       for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++)                   \
144296 +               for ((file) = 0; (file) < ANON_AND_FILE; (file)++)      \
144297 +                       for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
144299 +static int get_nr_gens(struct lruvec *lruvec, int file)
144301 +       return lruvec->evictable.max_seq - lruvec->evictable.min_seq[file] + 1;
144304 +static int min_nr_gens(unsigned long max_seq, unsigned long *min_seq, int swappiness)
144306 +       return max_seq - max(min_seq[!swappiness], min_seq[1]) + 1;
144309 +static int max_nr_gens(unsigned long max_seq, unsigned long *min_seq, int swappiness)
144311 +       return max_seq - min(min_seq[!swappiness], min_seq[1]) + 1;
144314 +static bool __maybe_unused seq_is_valid(struct lruvec *lruvec)
144316 +       lockdep_assert_held(&lruvec->lru_lock);
144318 +       return get_nr_gens(lruvec, 0) >= MIN_NR_GENS &&
144319 +              get_nr_gens(lruvec, 0) <= MAX_NR_GENS &&
144320 +              get_nr_gens(lruvec, 1) >= MIN_NR_GENS &&
144321 +              get_nr_gens(lruvec, 1) <= MAX_NR_GENS;
144324 +/******************************************************************************
144325 + *                          refault feedback loop
144326 + ******************************************************************************/
144329 + * A feedback loop modeled after the PID controller. Currently supports the
144330 + * proportional (P) and the integral (I) terms; the derivative (D) term can be
144331 + * added if necessary. The setpoint (SP) is the desired position; the process
144332 + * variable (PV) is the measured position. The error is the difference between
144333 + * the SP and the PV. A positive error results in a positive control output
144334 + * correction, which, in our case, is to allow eviction.
144336 + * The P term is the current refault rate refaulted/(evicted+activated), which
144337 + * has a weight of 1. The I term is the arithmetic mean of the last N refault
144338 + * rates, weighted by geometric series 1/2, 1/4, ..., 1/(1<<N).
144340 + * Our goal is to make sure upper tiers have similar refault rates as the base
144341 + * tier. That is we try to be fair to all tiers by maintaining similar refault
144342 + * rates across them.
144343 + */
144344 +struct controller_pos {
144345 +       unsigned long refaulted;
144346 +       unsigned long total;
144347 +       int gain;
144350 +static void read_controller_pos(struct controller_pos *pos, struct lruvec *lruvec,
144351 +                               int file, int tier, int gain)
144353 +       struct lrugen *lrugen = &lruvec->evictable;
144354 +       int sid = sid_from_seq_or_gen(lrugen->min_seq[file]);
144356 +       pos->refaulted = lrugen->avg_refaulted[file][tier] +
144357 +                        atomic_long_read(&lrugen->refaulted[sid][file][tier]);
144358 +       pos->total = lrugen->avg_total[file][tier] +
144359 +                    atomic_long_read(&lrugen->evicted[sid][file][tier]);
144360 +       if (tier)
144361 +               pos->total += lrugen->activated[sid][file][tier - 1];
144362 +       pos->gain = gain;
144365 +static void reset_controller_pos(struct lruvec *lruvec, int gen, int file)
144367 +       int tier;
144368 +       int sid = sid_from_seq_or_gen(gen);
144369 +       struct lrugen *lrugen = &lruvec->evictable;
144370 +       bool carryover = gen == lru_gen_from_seq(lrugen->min_seq[file]);
144372 +       if (!carryover && NR_STAT_GENS == 1)
144373 +               return;
144375 +       for (tier = 0; tier < MAX_NR_TIERS; tier++) {
144376 +               if (carryover) {
144377 +                       unsigned long sum;
144379 +                       sum = lrugen->avg_refaulted[file][tier] +
144380 +                             atomic_long_read(&lrugen->refaulted[sid][file][tier]);
144381 +                       WRITE_ONCE(lrugen->avg_refaulted[file][tier], sum >> 1);
144383 +                       sum = lrugen->avg_total[file][tier] +
144384 +                             atomic_long_read(&lrugen->evicted[sid][file][tier]);
144385 +                       if (tier)
144386 +                               sum += lrugen->activated[sid][file][tier - 1];
144387 +                       WRITE_ONCE(lrugen->avg_total[file][tier], sum >> 1);
144389 +                       if (NR_STAT_GENS > 1)
144390 +                               continue;
144391 +               }
144393 +               atomic_long_set(&lrugen->refaulted[sid][file][tier], 0);
144394 +               atomic_long_set(&lrugen->evicted[sid][file][tier], 0);
144395 +               if (tier)
144396 +                       WRITE_ONCE(lrugen->activated[sid][file][tier - 1], 0);
144397 +       }
144400 +static bool positive_ctrl_err(struct controller_pos *sp, struct controller_pos *pv)
144402 +       /*
144403 +        * Allow eviction if the PV has a limited number of refaulted pages or a
144404 +        * lower refault rate than the SP.
144405 +        */
144406 +       return pv->refaulted < SWAP_CLUSTER_MAX ||
144407 +              pv->refaulted * max(sp->total, 1UL) * sp->gain <=
144408 +              sp->refaulted * max(pv->total, 1UL) * pv->gain;
144411 +/******************************************************************************
144412 + *                          mm_struct list
144413 + ******************************************************************************/
144415 +enum {
144416 +       MM_SCHED_ACTIVE,        /* running processes */
144417 +       MM_SCHED_INACTIVE,      /* sleeping processes */
144418 +       MM_LOCK_CONTENTION,     /* lock contentions */
144419 +       MM_VMA_INTERVAL,        /* VMAs within the range of current table */
144420 +       MM_LEAF_OTHER_NODE,     /* entries not from node under reclaim */
144421 +       MM_LEAF_OTHER_MEMCG,    /* entries not from memcg under reclaim */
144422 +       MM_LEAF_OLD,            /* old entries */
144423 +       MM_LEAF_YOUNG,          /* young entries */
144424 +       MM_LEAF_DIRTY,          /* dirty entries */
144425 +       MM_LEAF_HOLE,           /* non-present entries */
144426 +       MM_NONLEAF_OLD,         /* old non-leaf pmd entries */
144427 +       MM_NONLEAF_YOUNG,       /* young non-leaf pmd entries */
144428 +       NR_MM_STATS
144431 +/* mnemonic codes for the stats above */
144432 +#define MM_STAT_CODES          "aicvnmoydhlu"
144434 +struct lru_gen_mm_list {
144435 +       /* the head of a global or per-memcg mm_struct list */
144436 +       struct list_head head;
144437 +       /* protects the list */
144438 +       spinlock_t lock;
144439 +       struct {
144440 +               /* set to max_seq after each round of walk */
144441 +               unsigned long cur_seq;
144442 +               /* the next mm on the list to walk */
144443 +               struct list_head *iter;
144444 +               /* to wait for the last worker to finish */
144445 +               struct wait_queue_head wait;
144446 +               /* the number of concurrent workers */
144447 +               int nr_workers;
144448 +               /* stats for debugging */
144449 +               unsigned long stats[NR_STAT_GENS][NR_MM_STATS];
144450 +       } nodes[0];
144453 +static struct lru_gen_mm_list *global_mm_list;
144455 +static struct lru_gen_mm_list *alloc_mm_list(void)
144457 +       int nid;
144458 +       struct lru_gen_mm_list *mm_list;
144460 +       mm_list = kzalloc(struct_size(mm_list, nodes, nr_node_ids), GFP_KERNEL);
144461 +       if (!mm_list)
144462 +               return NULL;
144464 +       INIT_LIST_HEAD(&mm_list->head);
144465 +       spin_lock_init(&mm_list->lock);
144467 +       for_each_node(nid) {
144468 +               mm_list->nodes[nid].cur_seq = MIN_NR_GENS;
144469 +               mm_list->nodes[nid].iter = &mm_list->head;
144470 +               init_waitqueue_head(&mm_list->nodes[nid].wait);
144471 +       }
144473 +       return mm_list;
144476 +static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg)
144478 +#ifdef CONFIG_MEMCG
144479 +       if (!mem_cgroup_disabled())
144480 +               return memcg ? memcg->mm_list : root_mem_cgroup->mm_list;
144481 +#endif
144482 +       VM_BUG_ON(memcg);
144484 +       return global_mm_list;
144487 +void lru_gen_init_mm(struct mm_struct *mm)
144489 +       int file;
144491 +       INIT_LIST_HEAD(&mm->lrugen.list);
144492 +#ifdef CONFIG_MEMCG
144493 +       mm->lrugen.memcg = NULL;
144494 +#endif
144495 +#ifndef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
144496 +       atomic_set(&mm->lrugen.nr_cpus, 0);
144497 +#endif
144498 +       for (file = 0; file < ANON_AND_FILE; file++)
144499 +               nodes_clear(mm->lrugen.nodes[file]);
144502 +void lru_gen_add_mm(struct mm_struct *mm)
144504 +       struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm);
144505 +       struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
144507 +       VM_BUG_ON_MM(!list_empty(&mm->lrugen.list), mm);
144508 +#ifdef CONFIG_MEMCG
144509 +       VM_BUG_ON_MM(mm->lrugen.memcg, mm);
144510 +       WRITE_ONCE(mm->lrugen.memcg, memcg);
144511 +#endif
144512 +       spin_lock(&mm_list->lock);
144513 +       list_add_tail(&mm->lrugen.list, &mm_list->head);
144514 +       spin_unlock(&mm_list->lock);
144517 +void lru_gen_del_mm(struct mm_struct *mm)
144519 +       int nid;
144520 +#ifdef CONFIG_MEMCG
144521 +       struct lru_gen_mm_list *mm_list = get_mm_list(mm->lrugen.memcg);
144522 +#else
144523 +       struct lru_gen_mm_list *mm_list = get_mm_list(NULL);
144524 +#endif
144526 +       spin_lock(&mm_list->lock);
144528 +       for_each_node(nid) {
144529 +               if (mm_list->nodes[nid].iter != &mm->lrugen.list)
144530 +                       continue;
144532 +               mm_list->nodes[nid].iter = mm_list->nodes[nid].iter->next;
144533 +               if (mm_list->nodes[nid].iter == &mm_list->head)
144534 +                       WRITE_ONCE(mm_list->nodes[nid].cur_seq,
144535 +                                  mm_list->nodes[nid].cur_seq + 1);
144536 +       }
144538 +       list_del_init(&mm->lrugen.list);
144540 +       spin_unlock(&mm_list->lock);
144542 +#ifdef CONFIG_MEMCG
144543 +       mem_cgroup_put(mm->lrugen.memcg);
144544 +       WRITE_ONCE(mm->lrugen.memcg, NULL);
144545 +#endif
144548 +#ifdef CONFIG_MEMCG
144549 +int lru_gen_alloc_mm_list(struct mem_cgroup *memcg)
144551 +       if (mem_cgroup_disabled())
144552 +               return 0;
144554 +       memcg->mm_list = alloc_mm_list();
144556 +       return memcg->mm_list ? 0 : -ENOMEM;
144559 +void lru_gen_free_mm_list(struct mem_cgroup *memcg)
144561 +       kfree(memcg->mm_list);
144562 +       memcg->mm_list = NULL;
144565 +void lru_gen_migrate_mm(struct mm_struct *mm)
144567 +       struct mem_cgroup *memcg;
144569 +       lockdep_assert_held(&mm->owner->alloc_lock);
144571 +       if (mem_cgroup_disabled())
144572 +               return;
144574 +       rcu_read_lock();
144575 +       memcg = mem_cgroup_from_task(mm->owner);
144576 +       rcu_read_unlock();
144577 +       if (memcg == mm->lrugen.memcg)
144578 +               return;
144580 +       VM_BUG_ON_MM(!mm->lrugen.memcg, mm);
144581 +       VM_BUG_ON_MM(list_empty(&mm->lrugen.list), mm);
144583 +       lru_gen_del_mm(mm);
144584 +       lru_gen_add_mm(mm);
144587 +static bool mm_has_migrated(struct mm_struct *mm, struct mem_cgroup *memcg)
144589 +       return READ_ONCE(mm->lrugen.memcg) != memcg;
144591 +#else
144592 +static bool mm_has_migrated(struct mm_struct *mm, struct mem_cgroup *memcg)
144594 +       return false;
144596 +#endif
144598 +struct mm_walk_args {
144599 +       struct mem_cgroup *memcg;
144600 +       unsigned long max_seq;
144601 +       unsigned long next_addr;
144602 +       unsigned long start_pfn;
144603 +       unsigned long end_pfn;
144604 +       int node_id;
144605 +       int batch_size;
144606 +       int mm_stats[NR_MM_STATS];
144607 +       int nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
144608 +       bool should_walk[ANON_AND_FILE];
144609 +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG)
144610 +       unsigned long bitmap[BITS_TO_LONGS(PTRS_PER_PMD)];
144611 +#endif
144614 +static void reset_mm_stats(struct lru_gen_mm_list *mm_list, bool last,
144615 +                          struct mm_walk_args *args)
144617 +       int i;
144618 +       int nid = args->node_id;
144619 +       int sid = sid_from_seq_or_gen(args->max_seq);
144621 +       lockdep_assert_held(&mm_list->lock);
144623 +       for (i = 0; i < NR_MM_STATS; i++) {
144624 +               WRITE_ONCE(mm_list->nodes[nid].stats[sid][i],
144625 +                          mm_list->nodes[nid].stats[sid][i] + args->mm_stats[i]);
144626 +               args->mm_stats[i] = 0;
144627 +       }
144629 +       if (!last || NR_STAT_GENS == 1)
144630 +               return;
144632 +       sid = sid_from_seq_or_gen(args->max_seq + 1);
144633 +       for (i = 0; i < NR_MM_STATS; i++)
144634 +               WRITE_ONCE(mm_list->nodes[nid].stats[sid][i], 0);
144637 +static bool should_skip_mm(struct mm_struct *mm, int nid, int swappiness)
144639 +       int file;
144640 +       unsigned long size = 0;
144642 +       if (mm_is_oom_victim(mm))
144643 +               return true;
144645 +       for (file = !swappiness; file < ANON_AND_FILE; file++) {
144646 +               if (lru_gen_mm_is_active(mm) || node_isset(nid, mm->lrugen.nodes[file]))
144647 +                       size += file ? get_mm_counter(mm, MM_FILEPAGES) :
144648 +                                      get_mm_counter(mm, MM_ANONPAGES) +
144649 +                                      get_mm_counter(mm, MM_SHMEMPAGES);
144650 +       }
144652 +       /* leave the legwork to the rmap if mapped pages are too sparse */
144653 +       if (size < max(SWAP_CLUSTER_MAX, mm_pgtables_bytes(mm) / PAGE_SIZE))
144654 +               return true;
144656 +       return !mmget_not_zero(mm);
144659 +/* To support multiple workers that concurrently walk mm_struct list. */
144660 +static bool get_next_mm(struct mm_walk_args *args, int swappiness, struct mm_struct **iter)
144662 +       bool last = true;
144663 +       struct mm_struct *mm = NULL;
144664 +       int nid = args->node_id;
144665 +       struct lru_gen_mm_list *mm_list = get_mm_list(args->memcg);
144667 +       if (*iter)
144668 +               mmput_async(*iter);
144669 +       else if (args->max_seq <= READ_ONCE(mm_list->nodes[nid].cur_seq))
144670 +               return false;
144672 +       spin_lock(&mm_list->lock);
144674 +       VM_BUG_ON(args->max_seq > mm_list->nodes[nid].cur_seq + 1);
144675 +       VM_BUG_ON(*iter && args->max_seq < mm_list->nodes[nid].cur_seq);
144676 +       VM_BUG_ON(*iter && !mm_list->nodes[nid].nr_workers);
144678 +       if (args->max_seq <= mm_list->nodes[nid].cur_seq) {
144679 +               last = *iter;
144680 +               goto done;
144681 +       }
144683 +       if (mm_list->nodes[nid].iter == &mm_list->head) {
144684 +               VM_BUG_ON(*iter || mm_list->nodes[nid].nr_workers);
144685 +               mm_list->nodes[nid].iter = mm_list->nodes[nid].iter->next;
144686 +       }
144688 +       while (!mm && mm_list->nodes[nid].iter != &mm_list->head) {
144689 +               mm = list_entry(mm_list->nodes[nid].iter, struct mm_struct, lrugen.list);
144690 +               mm_list->nodes[nid].iter = mm_list->nodes[nid].iter->next;
144691 +               if (should_skip_mm(mm, nid, swappiness))
144692 +                       mm = NULL;
144694 +               args->mm_stats[mm ? MM_SCHED_ACTIVE : MM_SCHED_INACTIVE]++;
144695 +       }
144697 +       if (mm_list->nodes[nid].iter == &mm_list->head)
144698 +               WRITE_ONCE(mm_list->nodes[nid].cur_seq,
144699 +                          mm_list->nodes[nid].cur_seq + 1);
144700 +done:
144701 +       if (*iter && !mm)
144702 +               mm_list->nodes[nid].nr_workers--;
144703 +       if (!*iter && mm)
144704 +               mm_list->nodes[nid].nr_workers++;
144706 +       last = last && !mm_list->nodes[nid].nr_workers &&
144707 +              mm_list->nodes[nid].iter == &mm_list->head;
144709 +       reset_mm_stats(mm_list, last, args);
144711 +       spin_unlock(&mm_list->lock);
144713 +       *iter = mm;
144715 +       return last;
144718 +/******************************************************************************
144719 + *                          the aging
144720 + ******************************************************************************/
144722 +static void update_batch_size(struct page *page, int old_gen, int new_gen,
144723 +                             struct mm_walk_args *args)
144725 +       int file = page_is_file_lru(page);
144726 +       int zone = page_zonenum(page);
144727 +       int delta = thp_nr_pages(page);
144729 +       VM_BUG_ON(old_gen >= MAX_NR_GENS);
144730 +       VM_BUG_ON(new_gen >= MAX_NR_GENS);
144732 +       args->batch_size++;
144734 +       args->nr_pages[old_gen][file][zone] -= delta;
144735 +       args->nr_pages[new_gen][file][zone] += delta;
144738 +static void reset_batch_size(struct lruvec *lruvec, struct mm_walk_args *args)
144740 +       int gen, file, zone;
144741 +       struct lrugen *lrugen = &lruvec->evictable;
144743 +       args->batch_size = 0;
144745 +       spin_lock_irq(&lruvec->lru_lock);
144747 +       for_each_gen_type_zone(gen, file, zone) {
144748 +               enum lru_list lru = LRU_FILE * file;
144749 +               int total = args->nr_pages[gen][file][zone];
144751 +               if (!total)
144752 +                       continue;
144754 +               args->nr_pages[gen][file][zone] = 0;
144755 +               WRITE_ONCE(lrugen->sizes[gen][file][zone],
144756 +                          lrugen->sizes[gen][file][zone] + total);
144758 +               if (lru_gen_is_active(lruvec, gen))
144759 +                       lru += LRU_ACTIVE;
144760 +               update_lru_size(lruvec, lru, zone, total);
144761 +       }
144763 +       spin_unlock_irq(&lruvec->lru_lock);
144766 +static int page_update_gen(struct page *page, int new_gen)
144768 +       int old_gen;
144769 +       unsigned long old_flags, new_flags;
144771 +       VM_BUG_ON(new_gen >= MAX_NR_GENS);
144773 +       do {
144774 +               old_flags = READ_ONCE(page->flags);
144776 +               old_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
144777 +               if (old_gen < 0)
144778 +                       new_flags = old_flags | BIT(PG_referenced);
144779 +               else
144780 +                       new_flags = (old_flags & ~(LRU_GEN_MASK | LRU_USAGE_MASK |
144781 +                                    LRU_TIER_FLAGS)) | ((new_gen + 1UL) << LRU_GEN_PGOFF);
144783 +               if (old_flags == new_flags)
144784 +                       break;
144785 +       } while (cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
144787 +       return old_gen;
144790 +static int should_skip_vma(unsigned long start, unsigned long end, struct mm_walk *walk)
144792 +       struct address_space *mapping;
144793 +       struct vm_area_struct *vma = walk->vma;
144794 +       struct mm_walk_args *args = walk->private;
144796 +       if (!vma_is_accessible(vma) || is_vm_hugetlb_page(vma) ||
144797 +           (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)))
144798 +               return true;
144800 +       if (vma_is_anonymous(vma))
144801 +               return !args->should_walk[0];
144803 +       if (WARN_ON_ONCE(!vma->vm_file || !vma->vm_file->f_mapping))
144804 +               return true;
144806 +       mapping = vma->vm_file->f_mapping;
144807 +       if (!mapping->a_ops->writepage)
144808 +               return true;
144810 +       if (shmem_mapping(mapping))
144811 +               return !args->should_walk[0] ||
144812 +                      mapping_unevictable(vma->vm_file->f_mapping);
144814 +       return !args->should_walk[1] || mapping_unevictable(mapping);
144818 + * Some userspace memory allocators create many single-page VMAs. So instead of
144819 + * returning back to the PGD table for each of such VMAs, we finish at least an
144820 + * entire PMD table and therefore avoid many zigzags. This optimizes page table
144821 + * walks for workloads that have large numbers of tiny VMAs.
144823 + * We scan PMD tables in two pass. The first pass reaches to PTE tables and
144824 + * doesn't take the PMD lock. The second pass clears the accessed bit on PMD
144825 + * entries and needs to take the PMD lock. The second pass is only done on the
144826 + * PMD entries that first pass has found the accessed bit is set, and they must
144827 + * be:
144828 + *   1) leaf entries mapping huge pages from the node under reclaim
144829 + *   2) non-leaf entries whose leaf entries only map pages from the node under
144830 + *   reclaim, when CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG=y.
144831 + */
144832 +static bool get_next_interval(struct mm_walk *walk, unsigned long mask, unsigned long size,
144833 +                             unsigned long *start, unsigned long *end)
144835 +       unsigned long next = round_up(*end, size);
144836 +       struct mm_walk_args *args = walk->private;
144838 +       VM_BUG_ON(mask & size);
144839 +       VM_BUG_ON(*start != *end);
144840 +       VM_BUG_ON(!(*end & ~mask));
144841 +       VM_BUG_ON((*end & mask) != (next & mask));
144843 +       while (walk->vma) {
144844 +               if (next >= walk->vma->vm_end) {
144845 +                       walk->vma = walk->vma->vm_next;
144846 +                       continue;
144847 +               }
144849 +               if ((next & mask) != (walk->vma->vm_start & mask))
144850 +                       return false;
144852 +               if (should_skip_vma(walk->vma->vm_start, walk->vma->vm_end, walk)) {
144853 +                       walk->vma = walk->vma->vm_next;
144854 +                       continue;
144855 +               }
144857 +               args->mm_stats[MM_VMA_INTERVAL]++;
144859 +               *start = max(next, walk->vma->vm_start);
144860 +               next = (next | ~mask) + 1;
144861 +               /* rounded-up boundaries can wrap to 0 */
144862 +               *end = next && next < walk->vma->vm_end ? next : walk->vma->vm_end;
144864 +               return true;
144865 +       }
144867 +       return false;
144870 +static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end,
144871 +                          struct mm_walk *walk)
144873 +       int i;
144874 +       pte_t *pte;
144875 +       spinlock_t *ptl;
144876 +       int remote = 0;
144877 +       struct mm_walk_args *args = walk->private;
144878 +       int old_gen, new_gen = lru_gen_from_seq(args->max_seq);
144880 +       VM_BUG_ON(pmd_leaf(*pmd));
144882 +       pte = pte_offset_map_lock(walk->mm, pmd, start & PMD_MASK, &ptl);
144883 +       arch_enter_lazy_mmu_mode();
144884 +restart:
144885 +       for (i = pte_index(start); start != end; i++, start += PAGE_SIZE) {
144886 +               struct page *page;
144887 +               unsigned long pfn = pte_pfn(pte[i]);
144889 +               if (!pte_present(pte[i]) || is_zero_pfn(pfn)) {
144890 +                       args->mm_stats[MM_LEAF_HOLE]++;
144891 +                       continue;
144892 +               }
144894 +               if (WARN_ON_ONCE(pte_devmap(pte[i]) || pte_special(pte[i])))
144895 +                       continue;
144897 +               if (!pte_young(pte[i])) {
144898 +                       args->mm_stats[MM_LEAF_OLD]++;
144899 +                       continue;
144900 +               }
144902 +               if (pfn < args->start_pfn || pfn >= args->end_pfn) {
144903 +                       remote++;
144904 +                       args->mm_stats[MM_LEAF_OTHER_NODE]++;
144905 +                       continue;
144906 +               }
144908 +               page = compound_head(pfn_to_page(pfn));
144909 +               if (page_to_nid(page) != args->node_id) {
144910 +                       remote++;
144911 +                       args->mm_stats[MM_LEAF_OTHER_NODE]++;
144912 +                       continue;
144913 +               }
144915 +               if (!ptep_test_and_clear_young(walk->vma, start, pte + i))
144916 +                       continue;
144918 +               if (pte_dirty(pte[i]) && !PageDirty(page) &&
144919 +                   !(PageAnon(page) && PageSwapBacked(page) && !PageSwapCache(page))) {
144920 +                       set_page_dirty(page);
144921 +                       args->mm_stats[MM_LEAF_DIRTY]++;
144922 +               }
144924 +               if (page_memcg_rcu(page) != args->memcg) {
144925 +                       args->mm_stats[MM_LEAF_OTHER_MEMCG]++;
144926 +                       continue;
144927 +               }
144929 +               old_gen = page_update_gen(page, new_gen);
144930 +               if (old_gen >= 0 && old_gen != new_gen)
144931 +                       update_batch_size(page, old_gen, new_gen, args);
144932 +               args->mm_stats[MM_LEAF_YOUNG]++;
144933 +       }
144935 +       if (i < PTRS_PER_PTE && get_next_interval(walk, PMD_MASK, PAGE_SIZE, &start, &end))
144936 +               goto restart;
144938 +       arch_leave_lazy_mmu_mode();
144939 +       pte_unmap_unlock(pte, ptl);
144941 +       return !remote;
144944 +static bool walk_pmd_range_unlocked(pud_t *pud, unsigned long start, unsigned long end,
144945 +                                   struct mm_walk *walk)
144947 +       int i;
144948 +       pmd_t *pmd;
144949 +       unsigned long next;
144950 +       int young = 0;
144951 +       struct mm_walk_args *args = walk->private;
144953 +       VM_BUG_ON(pud_leaf(*pud));
144955 +       pmd = pmd_offset(pud, start & PUD_MASK);
144956 +restart:
144957 +       for (i = pmd_index(start); start != end; i++, start = next) {
144958 +               pmd_t val = pmd_read_atomic(pmd + i);
144960 +               next = pmd_addr_end(start, end);
144962 +               barrier();
144963 +               if (!pmd_present(val) || is_huge_zero_pmd(val)) {
144964 +                       args->mm_stats[MM_LEAF_HOLE]++;
144965 +                       continue;
144966 +               }
144968 +               if (pmd_trans_huge(val)) {
144969 +                       unsigned long pfn = pmd_pfn(val);
144971 +                       if (!pmd_young(val)) {
144972 +                               args->mm_stats[MM_LEAF_OLD]++;
144973 +                               continue;
144974 +                       }
144976 +                       if (pfn < args->start_pfn || pfn >= args->end_pfn) {
144977 +                               args->mm_stats[MM_LEAF_OTHER_NODE]++;
144978 +                               continue;
144979 +                       }
144981 +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
144982 +                       young++;
144983 +                       __set_bit(i, args->bitmap);
144984 +#endif
144985 +                       continue;
144986 +               }
144988 +#ifdef CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG
144989 +               if (!pmd_young(val)) {
144990 +                       args->mm_stats[MM_NONLEAF_OLD]++;
144991 +                       continue;
144992 +               }
144993 +#endif
144995 +               if (walk_pte_range(&val, start, next, walk)) {
144996 +#ifdef CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG
144997 +                       young++;
144998 +                       __set_bit(i, args->bitmap);
144999 +#endif
145000 +               }
145001 +       }
145003 +       if (i < PTRS_PER_PMD && get_next_interval(walk, PUD_MASK, PMD_SIZE, &start, &end))
145004 +               goto restart;
145006 +       return young;
145009 +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG)
145010 +static void walk_pmd_range_locked(pud_t *pud, unsigned long start, unsigned long end,
145011 +                                 struct mm_walk *walk)
145013 +       int i;
145014 +       pmd_t *pmd;
145015 +       spinlock_t *ptl;
145016 +       struct mm_walk_args *args = walk->private;
145017 +       int old_gen, new_gen = lru_gen_from_seq(args->max_seq);
145019 +       VM_BUG_ON(pud_leaf(*pud));
145021 +       start &= PUD_MASK;
145022 +       pmd = pmd_offset(pud, start);
145023 +       ptl = pmd_lock(walk->mm, pmd);
145024 +       arch_enter_lazy_mmu_mode();
145026 +       for_each_set_bit(i, args->bitmap, PTRS_PER_PMD) {
145027 +               struct page *page;
145028 +               unsigned long pfn = pmd_pfn(pmd[i]);
145029 +               unsigned long addr = start + PMD_SIZE * i;
145031 +               if (!pmd_present(pmd[i]) || is_huge_zero_pmd(pmd[i])) {
145032 +                       args->mm_stats[MM_LEAF_HOLE]++;
145033 +                       continue;
145034 +               }
145036 +               if (WARN_ON_ONCE(pmd_devmap(pmd[i])))
145037 +                       continue;
145039 +               if (!pmd_young(pmd[i])) {
145040 +                       args->mm_stats[MM_LEAF_OLD]++;
145041 +                       continue;
145042 +               }
145044 +               if (!pmd_trans_huge(pmd[i])) {
145045 +#ifdef CONFIG_HAVE_ARCH_PARENT_PMD_YOUNG
145046 +                       args->mm_stats[MM_NONLEAF_YOUNG]++;
145047 +                       pmdp_test_and_clear_young(walk->vma, addr, pmd + i);
145048 +#endif
145049 +                       continue;
145050 +               }
145052 +               if (pfn < args->start_pfn || pfn >= args->end_pfn) {
145053 +                       args->mm_stats[MM_LEAF_OTHER_NODE]++;
145054 +                       continue;
145055 +               }
145057 +               page = pfn_to_page(pfn);
145058 +               VM_BUG_ON_PAGE(PageTail(page), page);
145059 +               if (page_to_nid(page) != args->node_id) {
145060 +                       args->mm_stats[MM_LEAF_OTHER_NODE]++;
145061 +                       continue;
145062 +               }
145064 +               if (!pmdp_test_and_clear_young(walk->vma, addr, pmd + i))
145065 +                       continue;
145067 +               if (pmd_dirty(pmd[i]) && !PageDirty(page) &&
145068 +                   !(PageAnon(page) && PageSwapBacked(page) && !PageSwapCache(page))) {
145069 +                       set_page_dirty(page);
145070 +                       args->mm_stats[MM_LEAF_DIRTY]++;
145071 +               }
145073 +               if (page_memcg_rcu(page) != args->memcg) {
145074 +                       args->mm_stats[MM_LEAF_OTHER_MEMCG]++;
145075 +                       continue;
145076 +               }
145078 +               old_gen = page_update_gen(page, new_gen);
145079 +               if (old_gen >= 0 && old_gen != new_gen)
145080 +                       update_batch_size(page, old_gen, new_gen, args);
145081 +               args->mm_stats[MM_LEAF_YOUNG]++;
145082 +       }
145084 +       arch_leave_lazy_mmu_mode();
145085 +       spin_unlock(ptl);
145087 +       memset(args->bitmap, 0, sizeof(args->bitmap));
145089 +#else
145090 +static void walk_pmd_range_locked(pud_t *pud, unsigned long start, unsigned long end,
145091 +                                 struct mm_walk *walk)
145094 +#endif
145096 +static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end,
145097 +                         struct mm_walk *walk)
145099 +       int i;
145100 +       pud_t *pud;
145101 +       unsigned long next;
145102 +       struct mm_walk_args *args = walk->private;
145104 +       VM_BUG_ON(p4d_leaf(*p4d));
145106 +       pud = pud_offset(p4d, start & P4D_MASK);
145107 +restart:
145108 +       for (i = pud_index(start); start != end; i++, start = next) {
145109 +               pud_t val = READ_ONCE(pud[i]);
145111 +               next = pud_addr_end(start, end);
145113 +               if (!pud_present(val) || WARN_ON_ONCE(pud_leaf(val)))
145114 +                       continue;
145116 +               if (walk_pmd_range_unlocked(&val, start, next, walk))
145117 +                       walk_pmd_range_locked(&val, start, next, walk);
145119 +               if (args->batch_size >= MAX_BATCH_SIZE) {
145120 +                       end = (start | ~PUD_MASK) + 1;
145121 +                       goto done;
145122 +               }
145123 +       }
145125 +       if (i < PTRS_PER_PUD && get_next_interval(walk, P4D_MASK, PUD_SIZE, &start, &end))
145126 +               goto restart;
145128 +       end = round_up(end, P4D_SIZE);
145129 +done:
145130 +       /* rounded-up boundaries can wrap to 0 */
145131 +       args->next_addr = end && walk->vma ? max(end, walk->vma->vm_start) : 0;
145133 +       return -EAGAIN;
145136 +static void walk_mm(struct mm_walk_args *args, int swappiness, struct mm_struct *mm)
145138 +       static const struct mm_walk_ops mm_walk_ops = {
145139 +               .test_walk = should_skip_vma,
145140 +               .p4d_entry = walk_pud_range,
145141 +       };
145143 +       int err;
145144 +       int file;
145145 +       int nid = args->node_id;
145146 +       struct mem_cgroup *memcg = args->memcg;
145147 +       struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
145149 +       args->next_addr = FIRST_USER_ADDRESS;
145150 +       for (file = !swappiness; file < ANON_AND_FILE; file++)
145151 +               args->should_walk[file] = lru_gen_mm_is_active(mm) ||
145152 +                                         node_isset(nid, mm->lrugen.nodes[file]);
145154 +       do {
145155 +               unsigned long start = args->next_addr;
145156 +               unsigned long end = mm->highest_vm_end;
145158 +               err = -EBUSY;
145160 +               preempt_disable();
145161 +               rcu_read_lock();
145163 +#ifdef CONFIG_MEMCG
145164 +               if (memcg && atomic_read(&memcg->moving_account)) {
145165 +                       args->mm_stats[MM_LOCK_CONTENTION]++;
145166 +                       goto contended;
145167 +               }
145168 +#endif
145169 +               if (!mmap_read_trylock(mm)) {
145170 +                       args->mm_stats[MM_LOCK_CONTENTION]++;
145171 +                       goto contended;
145172 +               }
145174 +               err = walk_page_range(mm, start, end, &mm_walk_ops, args);
145176 +               mmap_read_unlock(mm);
145178 +               if (args->batch_size)
145179 +                       reset_batch_size(lruvec, args);
145180 +contended:
145181 +               rcu_read_unlock();
145182 +               preempt_enable();
145184 +               cond_resched();
145185 +       } while (err == -EAGAIN && args->next_addr &&
145186 +                !mm_is_oom_victim(mm) && !mm_has_migrated(mm, memcg));
145188 +       if (err == -EBUSY)
145189 +               return;
145191 +       for (file = !swappiness; file < ANON_AND_FILE; file++) {
145192 +               if (args->should_walk[file])
145193 +                       node_clear(nid, mm->lrugen.nodes[file]);
145194 +       }
145197 +static void page_inc_gen(struct page *page, struct lruvec *lruvec, bool front)
145199 +       int old_gen, new_gen;
145200 +       unsigned long old_flags, new_flags;
145201 +       int file = page_is_file_lru(page);
145202 +       int zone = page_zonenum(page);
145203 +       struct lrugen *lrugen = &lruvec->evictable;
145205 +       old_gen = lru_gen_from_seq(lrugen->min_seq[file]);
145207 +       do {
145208 +               old_flags = READ_ONCE(page->flags);
145209 +               new_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
145210 +               VM_BUG_ON_PAGE(new_gen < 0, page);
145211 +               if (new_gen >= 0 && new_gen != old_gen)
145212 +                       goto sort;
145214 +               new_gen = (old_gen + 1) % MAX_NR_GENS;
145215 +               new_flags = (old_flags & ~(LRU_GEN_MASK | LRU_USAGE_MASK | LRU_TIER_FLAGS)) |
145216 +                           ((new_gen + 1UL) << LRU_GEN_PGOFF);
145217 +               /* mark the page for reclaim if it's pending writeback */
145218 +               if (front)
145219 +                       new_flags |= BIT(PG_reclaim);
145220 +       } while (cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
145222 +       lru_gen_update_size(page, lruvec, old_gen, new_gen);
145223 +sort:
145224 +       if (front)
145225 +               list_move(&page->lru, &lrugen->lists[new_gen][file][zone]);
145226 +       else
145227 +               list_move_tail(&page->lru, &lrugen->lists[new_gen][file][zone]);
145230 +static bool try_inc_min_seq(struct lruvec *lruvec, int file)
145232 +       int gen, zone;
145233 +       bool success = false;
145234 +       struct lrugen *lrugen = &lruvec->evictable;
145236 +       VM_BUG_ON(!seq_is_valid(lruvec));
145238 +       while (get_nr_gens(lruvec, file) > MIN_NR_GENS) {
145239 +               gen = lru_gen_from_seq(lrugen->min_seq[file]);
145241 +               for (zone = 0; zone < MAX_NR_ZONES; zone++) {
145242 +                       if (!list_empty(&lrugen->lists[gen][file][zone]))
145243 +                               return success;
145244 +               }
145246 +               reset_controller_pos(lruvec, gen, file);
145247 +               WRITE_ONCE(lrugen->min_seq[file], lrugen->min_seq[file] + 1);
145249 +               success = true;
145250 +       }
145252 +       return success;
145255 +static bool inc_min_seq(struct lruvec *lruvec, int file)
145257 +       int gen, zone;
145258 +       int batch_size = 0;
145259 +       struct lrugen *lrugen = &lruvec->evictable;
145261 +       VM_BUG_ON(!seq_is_valid(lruvec));
145263 +       if (get_nr_gens(lruvec, file) != MAX_NR_GENS)
145264 +               return true;
145266 +       gen = lru_gen_from_seq(lrugen->min_seq[file]);
145268 +       for (zone = 0; zone < MAX_NR_ZONES; zone++) {
145269 +               struct list_head *head = &lrugen->lists[gen][file][zone];
145271 +               while (!list_empty(head)) {
145272 +                       struct page *page = lru_to_page(head);
145274 +                       VM_BUG_ON_PAGE(PageTail(page), page);
145275 +                       VM_BUG_ON_PAGE(PageUnevictable(page), page);
145276 +                       VM_BUG_ON_PAGE(PageActive(page), page);
145277 +                       VM_BUG_ON_PAGE(page_is_file_lru(page) != file, page);
145278 +                       VM_BUG_ON_PAGE(page_zonenum(page) != zone, page);
145280 +                       prefetchw_prev_lru_page(page, head, flags);
145282 +                       page_inc_gen(page, lruvec, false);
145284 +                       if (++batch_size == MAX_BATCH_SIZE)
145285 +                               return false;
145286 +               }
145288 +               VM_BUG_ON(lrugen->sizes[gen][file][zone]);
145289 +       }
145291 +       reset_controller_pos(lruvec, gen, file);
145292 +       WRITE_ONCE(lrugen->min_seq[file], lrugen->min_seq[file] + 1);
145294 +       return true;
145297 +static void inc_max_seq(struct lruvec *lruvec)
145299 +       int gen, file, zone;
145300 +       struct lrugen *lrugen = &lruvec->evictable;
145302 +       spin_lock_irq(&lruvec->lru_lock);
145304 +       VM_BUG_ON(!seq_is_valid(lruvec));
145306 +       for (file = 0; file < ANON_AND_FILE; file++) {
145307 +               if (try_inc_min_seq(lruvec, file))
145308 +                       continue;
145310 +               while (!inc_min_seq(lruvec, file)) {
145311 +                       spin_unlock_irq(&lruvec->lru_lock);
145312 +                       cond_resched();
145313 +                       spin_lock_irq(&lruvec->lru_lock);
145314 +               }
145315 +       }
145317 +       gen = lru_gen_from_seq(lrugen->max_seq - 1);
145318 +       for_each_type_zone(file, zone) {
145319 +               enum lru_list lru = LRU_FILE * file;
145320 +               long total = lrugen->sizes[gen][file][zone];
145322 +               if (!total)
145323 +                       continue;
145325 +               WARN_ON_ONCE(total != (int)total);
145327 +               update_lru_size(lruvec, lru, zone, total);
145328 +               update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -total);
145329 +       }
145331 +       gen = lru_gen_from_seq(lrugen->max_seq + 1);
145332 +       for_each_type_zone(file, zone) {
145333 +               VM_BUG_ON(lrugen->sizes[gen][file][zone]);
145334 +               VM_BUG_ON(!list_empty(&lrugen->lists[gen][file][zone]));
145335 +       }
145337 +       for (file = 0; file < ANON_AND_FILE; file++)
145338 +               reset_controller_pos(lruvec, gen, file);
145340 +       WRITE_ONCE(lrugen->timestamps[gen], jiffies);
145341 +       /* make sure all preceding modifications appear first */
145342 +       smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1);
145344 +       spin_unlock_irq(&lruvec->lru_lock);
145347 +/* Main function used by foreground, background and user-triggered aging. */
145348 +static bool walk_mm_list(struct lruvec *lruvec, unsigned long max_seq,
145349 +                        struct scan_control *sc, int swappiness, struct mm_walk_args *args)
145351 +       bool last;
145352 +       bool alloc = !args;
145353 +       struct mm_struct *mm = NULL;
145354 +       struct lrugen *lrugen = &lruvec->evictable;
145355 +       struct pglist_data *pgdat = lruvec_pgdat(lruvec);
145356 +       int nid = pgdat->node_id;
145357 +       struct mem_cgroup *memcg = lruvec_memcg(lruvec);
145358 +       struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
145360 +       VM_BUG_ON(max_seq > READ_ONCE(lrugen->max_seq));
145362 +       /*
145363 +        * For each walk of the mm_struct list of a memcg, we decrement the
145364 +        * priority of its lrugen. For each walk of all memcgs in kswapd, we
145365 +        * increment the priority of every lrugen.
145366 +        *
145367 +        * So if this lrugen has a higher priority (smaller value), it means
145368 +        * other concurrent reclaimers have walked its mm list, and we skip it
145369 +        * for this priority in order to balance the pressure on all memcgs.
145370 +        */
145371 +       if (!mem_cgroup_disabled() && !cgroup_reclaim(sc) &&
145372 +           sc->priority > atomic_read(&lrugen->priority))
145373 +               return false;
145375 +       if (alloc) {
145376 +               args = kvzalloc_node(sizeof(*args), GFP_KERNEL, nid);
145377 +               if (!args)
145378 +                       return false;
145379 +       }
145381 +       args->memcg = memcg;
145382 +       args->max_seq = max_seq;
145383 +       args->start_pfn = pgdat->node_start_pfn;
145384 +       args->end_pfn = pgdat_end_pfn(pgdat);
145385 +       args->node_id = nid;
145387 +       do {
145388 +               last = get_next_mm(args, swappiness, &mm);
145389 +               if (mm)
145390 +                       walk_mm(args, swappiness, mm);
145392 +               cond_resched();
145393 +       } while (mm);
145395 +       if (alloc)
145396 +               kvfree(args);
145398 +       if (!last) {
145399 +               /* foreground aging prefers not to wait unless "necessary" */
145400 +               if (!current_is_kswapd() && sc->priority < DEF_PRIORITY - 2)
145401 +                       wait_event_killable(mm_list->nodes[nid].wait,
145402 +                                           max_seq < READ_ONCE(lrugen->max_seq));
145404 +               return max_seq < READ_ONCE(lrugen->max_seq);
145405 +       }
145407 +       VM_BUG_ON(max_seq != READ_ONCE(lrugen->max_seq));
145409 +       inc_max_seq(lruvec);
145411 +       if (!mem_cgroup_disabled())
145412 +               atomic_add_unless(&lrugen->priority, -1, 0);
145414 +       /* order against inc_max_seq() */
145415 +       smp_mb();
145416 +       /* either we see any waiters or they will see the updated max_seq */
145417 +       if (waitqueue_active(&mm_list->nodes[nid].wait))
145418 +               wake_up_all(&mm_list->nodes[nid].wait);
145420 +       wakeup_flusher_threads(WB_REASON_VMSCAN);
145422 +       return true;
145425 +void lru_gen_scan_around(struct page_vma_mapped_walk *pvmw)
145427 +       pte_t *pte;
145428 +       unsigned long start, end;
145429 +       int old_gen, new_gen;
145430 +       unsigned long flags;
145431 +       struct lruvec *lruvec;
145432 +       struct mem_cgroup *memcg;
145433 +       struct pglist_data *pgdat = page_pgdat(pvmw->page);
145435 +       lockdep_assert_held(pvmw->ptl);
145437 +       start = max(pvmw->address & PMD_MASK, pvmw->vma->vm_start);
145438 +       end = pmd_addr_end(pvmw->address, pvmw->vma->vm_end);
145439 +       pte = pvmw->pte - ((pvmw->address - start) >> PAGE_SHIFT);
145441 +       memcg = lock_page_memcg(pvmw->page);
145442 +       lruvec = lock_page_lruvec_irqsave(pvmw->page, &flags);
145444 +       new_gen = lru_gen_from_seq(lruvec->evictable.max_seq);
145446 +       for (; start != end; pte++, start += PAGE_SIZE) {
145447 +               struct page *page;
145448 +               unsigned long pfn = pte_pfn(*pte);
145450 +               if (!pte_present(*pte) || !pte_young(*pte) || is_zero_pfn(pfn))
145451 +                       continue;
145453 +               if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat))
145454 +                       continue;
145456 +               page = compound_head(pfn_to_page(pfn));
145457 +               if (page_to_nid(page) != pgdat->node_id)
145458 +                       continue;
145460 +               if (page_memcg_rcu(page) != memcg)
145461 +                       continue;
145462 +               /*
145463 +                * We may be holding many locks. So try to finish as fast as
145464 +                * possible and leave the accessed and the dirty bits to page
145465 +                * table walks.
145466 +                */
145467 +               old_gen = page_update_gen(page, new_gen);
145468 +               if (old_gen >= 0 && old_gen != new_gen)
145469 +                       lru_gen_update_size(page, lruvec, old_gen, new_gen);
145470 +       }
145472 +       unlock_page_lruvec_irqrestore(lruvec, flags);
145473 +       unlock_page_memcg(pvmw->page);
145476 +/******************************************************************************
145477 + *                          the eviction
145478 + ******************************************************************************/
145480 +static bool sort_page(struct page *page, struct lruvec *lruvec, int tier_to_isolate)
145482 +       bool success;
145483 +       int gen = page_lru_gen(page);
145484 +       int file = page_is_file_lru(page);
145485 +       int zone = page_zonenum(page);
145486 +       int tier = lru_tier_from_usage(page_tier_usage(page));
145487 +       struct lrugen *lrugen = &lruvec->evictable;
145489 +       VM_BUG_ON_PAGE(gen == -1, page);
145490 +       VM_BUG_ON_PAGE(tier_to_isolate < 0, page);
145492 +       /* a lazy-free page that has been written into? */
145493 +       if (file && PageDirty(page) && PageAnon(page)) {
145494 +               success = lru_gen_deletion(page, lruvec);
145495 +               VM_BUG_ON_PAGE(!success, page);
145496 +               SetPageSwapBacked(page);
145497 +               add_page_to_lru_list_tail(page, lruvec);
145498 +               return true;
145499 +       }
145501 +       /* page_update_gen() has updated the page? */
145502 +       if (gen != lru_gen_from_seq(lrugen->min_seq[file])) {
145503 +               list_move(&page->lru, &lrugen->lists[gen][file][zone]);
145504 +               return true;
145505 +       }
145507 +       /* activate the page if its tier has a higher refault rate */
145508 +       if (tier_to_isolate < tier) {
145509 +               int sid = sid_from_seq_or_gen(gen);
145511 +               page_inc_gen(page, lruvec, false);
145512 +               WRITE_ONCE(lrugen->activated[sid][file][tier - 1],
145513 +                          lrugen->activated[sid][file][tier - 1] + thp_nr_pages(page));
145514 +               inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + file);
145515 +               return true;
145516 +       }
145518 +       /*
145519 +        * A page can't be immediately evicted, and page_inc_gen() will mark it
145520 +        * for reclaim and hopefully writeback will write it soon if it's dirty.
145521 +        */
145522 +       if (PageLocked(page) || PageWriteback(page) || (file && PageDirty(page))) {
145523 +               page_inc_gen(page, lruvec, true);
145524 +               return true;
145525 +       }
145527 +       return false;
145530 +static bool should_skip_page(struct page *page, struct scan_control *sc)
145532 +       if (!sc->may_unmap && page_mapped(page))
145533 +               return true;
145535 +       if (!(sc->may_writepage && (sc->gfp_mask & __GFP_IO)) &&
145536 +           (PageDirty(page) || (PageAnon(page) && !PageSwapCache(page))))
145537 +               return true;
145539 +       if (!get_page_unless_zero(page))
145540 +               return true;
145542 +       if (!TestClearPageLRU(page)) {
145543 +               put_page(page);
145544 +               return true;
145545 +       }
145547 +       return false;
145550 +static void isolate_page(struct page *page, struct lruvec *lruvec)
145552 +       bool success;
145554 +       success = lru_gen_deletion(page, lruvec);
145555 +       VM_BUG_ON_PAGE(!success, page);
145557 +       if (PageActive(page)) {
145558 +               ClearPageActive(page);
145559 +               /* make sure shrink_page_list() rejects this page */
145560 +               SetPageReferenced(page);
145561 +               return;
145562 +       }
145564 +       /* make sure shrink_page_list() doesn't try to write this page */
145565 +       ClearPageReclaim(page);
145566 +       /* make sure shrink_page_list() doesn't reject this page */
145567 +       ClearPageReferenced(page);
145570 +static int scan_lru_gen_pages(struct lruvec *lruvec, struct scan_control *sc,
145571 +                             long *nr_to_scan, int file, int tier,
145572 +                             struct list_head *list)
145574 +       bool success;
145575 +       int gen, zone;
145576 +       enum vm_event_item item;
145577 +       int sorted = 0;
145578 +       int scanned = 0;
145579 +       int isolated = 0;
145580 +       int batch_size = 0;
145581 +       struct lrugen *lrugen = &lruvec->evictable;
145583 +       VM_BUG_ON(!list_empty(list));
145585 +       if (get_nr_gens(lruvec, file) == MIN_NR_GENS)
145586 +               return -ENOENT;
145588 +       gen = lru_gen_from_seq(lrugen->min_seq[file]);
145590 +       for (zone = sc->reclaim_idx; zone >= 0; zone--) {
145591 +               LIST_HEAD(moved);
145592 +               int skipped = 0;
145593 +               struct list_head *head = &lrugen->lists[gen][file][zone];
145595 +               while (!list_empty(head)) {
145596 +                       struct page *page = lru_to_page(head);
145597 +                       int delta = thp_nr_pages(page);
145599 +                       VM_BUG_ON_PAGE(PageTail(page), page);
145600 +                       VM_BUG_ON_PAGE(PageUnevictable(page), page);
145601 +                       VM_BUG_ON_PAGE(PageActive(page), page);
145602 +                       VM_BUG_ON_PAGE(page_is_file_lru(page) != file, page);
145603 +                       VM_BUG_ON_PAGE(page_zonenum(page) != zone, page);
145605 +                       prefetchw_prev_lru_page(page, head, flags);
145607 +                       scanned += delta;
145609 +                       if (sort_page(page, lruvec, tier))
145610 +                               sorted += delta;
145611 +                       else if (should_skip_page(page, sc)) {
145612 +                               list_move(&page->lru, &moved);
145613 +                               skipped += delta;
145614 +                       } else {
145615 +                               isolate_page(page, lruvec);
145616 +                               list_add(&page->lru, list);
145617 +                               isolated += delta;
145618 +                       }
145620 +                       if (scanned >= *nr_to_scan || isolated >= SWAP_CLUSTER_MAX ||
145621 +                           ++batch_size == MAX_BATCH_SIZE)
145622 +                               break;
145623 +               }
145625 +               list_splice(&moved, head);
145626 +               __count_zid_vm_events(PGSCAN_SKIP, zone, skipped);
145628 +               if (scanned >= *nr_to_scan || isolated >= SWAP_CLUSTER_MAX ||
145629 +                   batch_size == MAX_BATCH_SIZE)
145630 +                       break;
145631 +       }
145633 +       success = try_inc_min_seq(lruvec, file);
145635 +       item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT;
145636 +       if (!cgroup_reclaim(sc))
145637 +               __count_vm_events(item, scanned);
145638 +       __count_memcg_events(lruvec_memcg(lruvec), item, scanned);
145639 +       __count_vm_events(PGSCAN_ANON + file, scanned);
145641 +       *nr_to_scan -= scanned;
145643 +       if (*nr_to_scan <= 0 || success || isolated)
145644 +               return isolated;
145645 +       /*
145646 +        * We may have trouble finding eligible pages due to reclaim_idx,
145647 +        * may_unmap and may_writepage. The following check makes sure we won't
145648 +        * be stuck if we aren't making enough progress.
145649 +        */
145650 +       return batch_size == MAX_BATCH_SIZE && sorted >= SWAP_CLUSTER_MAX ? 0 : -ENOENT;
145653 +static int get_tier_to_isolate(struct lruvec *lruvec, int file)
145655 +       int tier;
145656 +       struct controller_pos sp, pv;
145658 +       /*
145659 +        * Ideally we don't want to evict upper tiers that have higher refault
145660 +        * rates. However, we need to leave some margin for the fluctuation in
145661 +        * refault rates. So we use a larger gain factor to make sure upper
145662 +        * tiers are indeed more active. We choose 2 because the lowest upper
145663 +        * tier would have twice of the refault rate of the base tier, according
145664 +        * to their numbers of accesses.
145665 +        */
145666 +       read_controller_pos(&sp, lruvec, file, 0, 1);
145667 +       for (tier = 1; tier < MAX_NR_TIERS; tier++) {
145668 +               read_controller_pos(&pv, lruvec, file, tier, 2);
145669 +               if (!positive_ctrl_err(&sp, &pv))
145670 +                       break;
145671 +       }
145673 +       return tier - 1;
145676 +static int get_type_to_scan(struct lruvec *lruvec, int swappiness, int *tier_to_isolate)
145678 +       int file, tier;
145679 +       struct controller_pos sp, pv;
145680 +       int gain[ANON_AND_FILE] = { swappiness, 200 - swappiness };
145682 +       /*
145683 +        * Compare the refault rates between the base tiers of anon and file to
145684 +        * determine which type to evict. Also need to compare the refault rates
145685 +        * of the upper tiers of the selected type with that of the base tier to
145686 +        * determine which tier of the selected type to evict.
145687 +        */
145688 +       read_controller_pos(&sp, lruvec, 0, 0, gain[0]);
145689 +       read_controller_pos(&pv, lruvec, 1, 0, gain[1]);
145690 +       file = positive_ctrl_err(&sp, &pv);
145692 +       read_controller_pos(&sp, lruvec, !file, 0, gain[!file]);
145693 +       for (tier = 1; tier < MAX_NR_TIERS; tier++) {
145694 +               read_controller_pos(&pv, lruvec, file, tier, gain[file]);
145695 +               if (!positive_ctrl_err(&sp, &pv))
145696 +                       break;
145697 +       }
145699 +       *tier_to_isolate = tier - 1;
145701 +       return file;
145704 +static int isolate_lru_gen_pages(struct lruvec *lruvec, struct scan_control *sc,
145705 +                                int swappiness, long *nr_to_scan, int *type_to_scan,
145706 +                                struct list_head *list)
145708 +       int i;
145709 +       int file;
145710 +       int isolated;
145711 +       int tier = -1;
145712 +       DEFINE_MAX_SEQ();
145713 +       DEFINE_MIN_SEQ();
145715 +       VM_BUG_ON(!seq_is_valid(lruvec));
145717 +       if (max_nr_gens(max_seq, min_seq, swappiness) == MIN_NR_GENS)
145718 +               return 0;
145719 +       /*
145720 +        * Try to select a type based on generations and swappiness, and if that
145721 +        * fails, fall back to get_type_to_scan(). When anon and file are both
145722 +        * available from the same generation, swappiness 200 is interpreted as
145723 +        * anon first and swappiness 1 is interpreted as file first.
145724 +        */
145725 +       file = !swappiness || min_seq[0] > min_seq[1] ||
145726 +              (min_seq[0] == min_seq[1] && swappiness != 200 &&
145727 +               (swappiness == 1 || get_type_to_scan(lruvec, swappiness, &tier)));
145729 +       if (tier == -1)
145730 +               tier = get_tier_to_isolate(lruvec, file);
145732 +       for (i = !swappiness; i < ANON_AND_FILE; i++) {
145733 +               isolated = scan_lru_gen_pages(lruvec, sc, nr_to_scan, file, tier, list);
145734 +               if (isolated >= 0)
145735 +                       break;
145737 +               file = !file;
145738 +               tier = get_tier_to_isolate(lruvec, file);
145739 +       }
145741 +       if (isolated < 0)
145742 +               isolated = *nr_to_scan = 0;
145744 +       *type_to_scan = file;
145746 +       return isolated;
145749 +/* Main function used by foreground, background and user-triggered eviction. */
145750 +static bool evict_lru_gen_pages(struct lruvec *lruvec, struct scan_control *sc,
145751 +                               int swappiness, long *nr_to_scan)
145753 +       int file;
145754 +       int isolated;
145755 +       int reclaimed;
145756 +       LIST_HEAD(list);
145757 +       struct page *page;
145758 +       enum vm_event_item item;
145759 +       struct reclaim_stat stat;
145760 +       struct pglist_data *pgdat = lruvec_pgdat(lruvec);
145762 +       spin_lock_irq(&lruvec->lru_lock);
145764 +       isolated = isolate_lru_gen_pages(lruvec, sc, swappiness, nr_to_scan, &file, &list);
145765 +       VM_BUG_ON(list_empty(&list) == !!isolated);
145767 +       if (isolated)
145768 +               __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, isolated);
145770 +       spin_unlock_irq(&lruvec->lru_lock);
145772 +       if (!isolated)
145773 +               goto done;
145775 +       reclaimed = shrink_page_list(&list, pgdat, sc, &stat, false);
145776 +       /*
145777 +        * We need to prevent rejected pages from being added back to the same
145778 +        * lists they were isolated from. Otherwise we may risk looping on them
145779 +        * forever. We use PageActive() or !PageReferenced() && PageWorkingset()
145780 +        * to tell lru_gen_addition() not to add them to the oldest generation.
145781 +        */
145782 +       list_for_each_entry(page, &list, lru) {
145783 +               if (PageMlocked(page))
145784 +                       continue;
145786 +               if (PageReferenced(page)) {
145787 +                       SetPageActive(page);
145788 +                       ClearPageReferenced(page);
145789 +               } else {
145790 +                       ClearPageActive(page);
145791 +                       SetPageWorkingset(page);
145792 +               }
145793 +       }
145795 +       spin_lock_irq(&lruvec->lru_lock);
145797 +       move_pages_to_lru(lruvec, &list);
145799 +       __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -isolated);
145801 +       item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
145802 +       if (!cgroup_reclaim(sc))
145803 +               __count_vm_events(item, reclaimed);
145804 +       __count_memcg_events(lruvec_memcg(lruvec), item, reclaimed);
145805 +       __count_vm_events(PGSTEAL_ANON + file, reclaimed);
145807 +       spin_unlock_irq(&lruvec->lru_lock);
145809 +       mem_cgroup_uncharge_list(&list);
145810 +       free_unref_page_list(&list);
145812 +       sc->nr_reclaimed += reclaimed;
145813 +done:
145814 +       return *nr_to_scan > 0 && sc->nr_reclaimed < sc->nr_to_reclaim;
145817 +/******************************************************************************
145818 + *                          page reclaim
145819 + ******************************************************************************/
145821 +static int get_swappiness(struct lruvec *lruvec)
145823 +       struct mem_cgroup *memcg = lruvec_memcg(lruvec);
145824 +       int swappiness = mem_cgroup_get_nr_swap_pages(memcg) >= (long)SWAP_CLUSTER_MAX ?
145825 +                        mem_cgroup_swappiness(memcg) : 0;
145827 +       VM_BUG_ON(swappiness > 200U);
145829 +       return swappiness;
145832 +static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc,
145833 +                                   int swappiness)
145835 +       int gen, file, zone;
145836 +       long nr_to_scan = 0;
145837 +       struct lrugen *lrugen = &lruvec->evictable;
145838 +       DEFINE_MAX_SEQ();
145839 +       DEFINE_MIN_SEQ();
145841 +       lru_add_drain();
145843 +       for (file = !swappiness; file < ANON_AND_FILE; file++) {
145844 +               unsigned long seq;
145846 +               for (seq = min_seq[file]; seq <= max_seq; seq++) {
145847 +                       gen = lru_gen_from_seq(seq);
145849 +                       for (zone = 0; zone <= sc->reclaim_idx; zone++)
145850 +                               nr_to_scan += READ_ONCE(lrugen->sizes[gen][file][zone]);
145851 +               }
145852 +       }
145854 +       nr_to_scan = max(nr_to_scan, 0L);
145855 +       nr_to_scan = round_up(nr_to_scan >> sc->priority, SWAP_CLUSTER_MAX);
145857 +       if (max_nr_gens(max_seq, min_seq, swappiness) > MIN_NR_GENS)
145858 +               return nr_to_scan;
145860 +       /* kswapd uses age_lru_gens() */
145861 +       if (current_is_kswapd())
145862 +               return 0;
145864 +       return walk_mm_list(lruvec, max_seq, sc, swappiness, NULL) ? nr_to_scan : 0;
145867 +static void shrink_lru_gens(struct lruvec *lruvec, struct scan_control *sc)
145869 +       struct blk_plug plug;
145870 +       unsigned long scanned = 0;
145871 +       struct mem_cgroup *memcg = lruvec_memcg(lruvec);
145873 +       blk_start_plug(&plug);
145875 +       while (true) {
145876 +               long nr_to_scan;
145877 +               int swappiness = sc->may_swap ? get_swappiness(lruvec) : 0;
145879 +               nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness) - scanned;
145880 +               if (nr_to_scan < (long)SWAP_CLUSTER_MAX)
145881 +                       break;
145883 +               scanned += nr_to_scan;
145885 +               if (!evict_lru_gen_pages(lruvec, sc, swappiness, &nr_to_scan))
145886 +                       break;
145888 +               scanned -= nr_to_scan;
145890 +               if (mem_cgroup_below_min(memcg) ||
145891 +                   (mem_cgroup_below_low(memcg) && !sc->memcg_low_reclaim))
145892 +                       break;
145894 +               cond_resched();
145895 +       }
145897 +       blk_finish_plug(&plug);
145900 +/******************************************************************************
145901 + *                          the background aging
145902 + ******************************************************************************/
145904 +static int lru_gen_spread = MIN_NR_GENS;
145906 +static void try_walk_mm_list(struct lruvec *lruvec, struct scan_control *sc)
145908 +       int gen, file, zone;
145909 +       long old_and_young[2] = {};
145910 +       struct mm_walk_args args = {};
145911 +       int spread = READ_ONCE(lru_gen_spread);
145912 +       int swappiness = get_swappiness(lruvec);
145913 +       struct lrugen *lrugen = &lruvec->evictable;
145914 +       DEFINE_MAX_SEQ();
145915 +       DEFINE_MIN_SEQ();
145917 +       lru_add_drain();
145919 +       for (file = !swappiness; file < ANON_AND_FILE; file++) {
145920 +               unsigned long seq;
145922 +               for (seq = min_seq[file]; seq <= max_seq; seq++) {
145923 +                       gen = lru_gen_from_seq(seq);
145925 +                       for (zone = 0; zone < MAX_NR_ZONES; zone++)
145926 +                               old_and_young[seq == max_seq] +=
145927 +                                       READ_ONCE(lrugen->sizes[gen][file][zone]);
145928 +               }
145929 +       }
145931 +       old_and_young[0] = max(old_and_young[0], 0L);
145932 +       old_and_young[1] = max(old_and_young[1], 0L);
145934 +       if (old_and_young[0] + old_and_young[1] < SWAP_CLUSTER_MAX)
145935 +               return;
145937 +       /* try to spread pages out across spread+1 generations */
145938 +       if (old_and_young[0] >= old_and_young[1] * spread &&
145939 +           min_nr_gens(max_seq, min_seq, swappiness) > max(spread, MIN_NR_GENS))
145940 +               return;
145942 +       walk_mm_list(lruvec, max_seq, sc, swappiness, &args);
145945 +static void age_lru_gens(struct pglist_data *pgdat, struct scan_control *sc)
145947 +       struct mem_cgroup *memcg;
145949 +       VM_BUG_ON(!current_is_kswapd());
145951 +       memcg = mem_cgroup_iter(NULL, NULL, NULL);
145952 +       do {
145953 +               struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
145954 +               struct lrugen *lrugen = &lruvec->evictable;
145956 +               if (!mem_cgroup_below_min(memcg) &&
145957 +                   (!mem_cgroup_below_low(memcg) || sc->memcg_low_reclaim))
145958 +                       try_walk_mm_list(lruvec, sc);
145960 +               if (!mem_cgroup_disabled())
145961 +                       atomic_add_unless(&lrugen->priority, 1, DEF_PRIORITY);
145963 +               cond_resched();
145964 +       } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
145967 +/******************************************************************************
145968 + *                          state change
145969 + ******************************************************************************/
145971 +#ifdef CONFIG_LRU_GEN_ENABLED
145972 +DEFINE_STATIC_KEY_TRUE(lru_gen_static_key);
145973 +#else
145974 +DEFINE_STATIC_KEY_FALSE(lru_gen_static_key);
145975 +#endif
145977 +static DEFINE_MUTEX(lru_gen_state_mutex);
145978 +static int lru_gen_nr_swapfiles __read_mostly;
145980 +static bool __maybe_unused state_is_valid(struct lruvec *lruvec)
145982 +       int gen, file, zone;
145983 +       enum lru_list lru;
145984 +       struct lrugen *lrugen = &lruvec->evictable;
145986 +       for_each_evictable_lru(lru) {
145987 +               file = is_file_lru(lru);
145989 +               if (lrugen->enabled[file] && !list_empty(&lruvec->lists[lru]))
145990 +                       return false;
145991 +       }
145993 +       for_each_gen_type_zone(gen, file, zone) {
145994 +               if (!lrugen->enabled[file] && !list_empty(&lrugen->lists[gen][file][zone]))
145995 +                       return false;
145997 +               VM_WARN_ONCE(!lrugen->enabled[file] && lrugen->sizes[gen][file][zone],
145998 +                            "lru_gen: possible unbalanced number of pages");
145999 +       }
146001 +       return true;
146004 +static bool fill_lru_gen_lists(struct lruvec *lruvec)
146006 +       enum lru_list lru;
146007 +       int batch_size = 0;
146009 +       for_each_evictable_lru(lru) {
146010 +               int file = is_file_lru(lru);
146011 +               bool active = is_active_lru(lru);
146012 +               struct list_head *head = &lruvec->lists[lru];
146014 +               if (!lruvec->evictable.enabled[file])
146015 +                       continue;
146017 +               while (!list_empty(head)) {
146018 +                       bool success;
146019 +                       struct page *page = lru_to_page(head);
146021 +                       VM_BUG_ON_PAGE(PageTail(page), page);
146022 +                       VM_BUG_ON_PAGE(PageUnevictable(page), page);
146023 +                       VM_BUG_ON_PAGE(PageActive(page) != active, page);
146024 +                       VM_BUG_ON_PAGE(page_lru_gen(page) != -1, page);
146025 +                       VM_BUG_ON_PAGE(page_is_file_lru(page) != file, page);
146027 +                       prefetchw_prev_lru_page(page, head, flags);
146029 +                       del_page_from_lru_list(page, lruvec);
146030 +                       success = lru_gen_addition(page, lruvec, true);
146031 +                       VM_BUG_ON(!success);
146033 +                       if (++batch_size == MAX_BATCH_SIZE)
146034 +                               return false;
146035 +               }
146036 +       }
146038 +       return true;
146041 +static bool drain_lru_gen_lists(struct lruvec *lruvec)
146043 +       int gen, file, zone;
146044 +       int batch_size = 0;
146046 +       for_each_gen_type_zone(gen, file, zone) {
146047 +               struct list_head *head = &lruvec->evictable.lists[gen][file][zone];
146049 +               if (lruvec->evictable.enabled[file])
146050 +                       continue;
146052 +               while (!list_empty(head)) {
146053 +                       bool success;
146054 +                       struct page *page = lru_to_page(head);
146056 +                       VM_BUG_ON_PAGE(PageTail(page), page);
146057 +                       VM_BUG_ON_PAGE(PageUnevictable(page), page);
146058 +                       VM_BUG_ON_PAGE(PageActive(page), page);
146059 +                       VM_BUG_ON_PAGE(page_is_file_lru(page) != file, page);
146060 +                       VM_BUG_ON_PAGE(page_zonenum(page) != zone, page);
146062 +                       prefetchw_prev_lru_page(page, head, flags);
146064 +                       success = lru_gen_deletion(page, lruvec);
146065 +                       VM_BUG_ON(!success);
146066 +                       add_page_to_lru_list(page, lruvec);
146068 +                       if (++batch_size == MAX_BATCH_SIZE)
146069 +                               return false;
146070 +               }
146071 +       }
146073 +       return true;
146077 + * For file page tracking, we enable/disable it according to the main switch.
146078 + * For anon page tracking, we only enabled it when the main switch is on and
146079 + * there is at least one swapfile; we disable it when there are no swapfiles
146080 + * regardless of the value of the main switch. Otherwise, we will eventually
146081 + * reach the max size of the sliding window and have to call inc_min_seq(),
146082 + * which brings an unnecessary overhead.
146083 + */
146084 +void lru_gen_set_state(bool enable, bool main, bool swap)
146086 +       struct mem_cgroup *memcg;
146088 +       mem_hotplug_begin();
146089 +       mutex_lock(&lru_gen_state_mutex);
146090 +       cgroup_lock();
146092 +       main = main && enable != lru_gen_enabled();
146093 +       swap = swap && !(enable ? lru_gen_nr_swapfiles++ : --lru_gen_nr_swapfiles);
146094 +       swap = swap && lru_gen_enabled();
146095 +       if (!main && !swap)
146096 +               goto unlock;
146098 +       if (main) {
146099 +               if (enable)
146100 +                       static_branch_enable(&lru_gen_static_key);
146101 +               else
146102 +                       static_branch_disable(&lru_gen_static_key);
146103 +       }
146105 +       memcg = mem_cgroup_iter(NULL, NULL, NULL);
146106 +       do {
146107 +               int nid;
146109 +               for_each_node_state(nid, N_MEMORY) {
146110 +                       struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
146111 +                       struct lrugen *lrugen = &lruvec->evictable;
146113 +                       spin_lock_irq(&lruvec->lru_lock);
146115 +                       VM_BUG_ON(!seq_is_valid(lruvec));
146116 +                       VM_BUG_ON(!state_is_valid(lruvec));
146118 +                       WRITE_ONCE(lrugen->enabled[0], lru_gen_enabled() && lru_gen_nr_swapfiles);
146119 +                       WRITE_ONCE(lrugen->enabled[1], lru_gen_enabled());
146121 +                       while (!(enable ? fill_lru_gen_lists(lruvec) :
146122 +                                         drain_lru_gen_lists(lruvec))) {
146123 +                               spin_unlock_irq(&lruvec->lru_lock);
146124 +                               cond_resched();
146125 +                               spin_lock_irq(&lruvec->lru_lock);
146126 +                       }
146128 +                       spin_unlock_irq(&lruvec->lru_lock);
146129 +               }
146131 +               cond_resched();
146132 +       } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
146133 +unlock:
146134 +       cgroup_unlock();
146135 +       mutex_unlock(&lru_gen_state_mutex);
146136 +       mem_hotplug_done();
146139 +static int __meminit __maybe_unused lru_gen_online_mem(struct notifier_block *self,
146140 +                                                      unsigned long action, void *arg)
146142 +       struct mem_cgroup *memcg;
146143 +       struct memory_notify *mnb = arg;
146144 +       int nid = mnb->status_change_nid;
146146 +       if (action != MEM_GOING_ONLINE || nid == NUMA_NO_NODE)
146147 +               return NOTIFY_DONE;
146149 +       mutex_lock(&lru_gen_state_mutex);
146150 +       cgroup_lock();
146152 +       memcg = mem_cgroup_iter(NULL, NULL, NULL);
146153 +       do {
146154 +               struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
146155 +               struct lrugen *lrugen = &lruvec->evictable;
146157 +               VM_BUG_ON(!seq_is_valid(lruvec));
146158 +               VM_BUG_ON(!state_is_valid(lruvec));
146160 +               WRITE_ONCE(lrugen->enabled[0], lru_gen_enabled() && lru_gen_nr_swapfiles);
146161 +               WRITE_ONCE(lrugen->enabled[1], lru_gen_enabled());
146162 +       } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
146164 +       cgroup_unlock();
146165 +       mutex_unlock(&lru_gen_state_mutex);
146167 +       return NOTIFY_DONE;
146170 +/******************************************************************************
146171 + *                          sysfs interface
146172 + ******************************************************************************/
146174 +static ssize_t show_lru_gen_spread(struct kobject *kobj, struct kobj_attribute *attr,
146175 +                                  char *buf)
146177 +       return sprintf(buf, "%d\n", READ_ONCE(lru_gen_spread));
146180 +static ssize_t store_lru_gen_spread(struct kobject *kobj, struct kobj_attribute *attr,
146181 +                                   const char *buf, size_t len)
146183 +       int spread;
146185 +       if (kstrtoint(buf, 10, &spread) || spread >= MAX_NR_GENS)
146186 +               return -EINVAL;
146188 +       WRITE_ONCE(lru_gen_spread, spread);
146190 +       return len;
146193 +static struct kobj_attribute lru_gen_spread_attr = __ATTR(
146194 +       spread, 0644, show_lru_gen_spread, store_lru_gen_spread
146197 +static ssize_t show_lru_gen_enabled(struct kobject *kobj, struct kobj_attribute *attr,
146198 +                                   char *buf)
146200 +       return snprintf(buf, PAGE_SIZE, "%ld\n", lru_gen_enabled());
146203 +static ssize_t store_lru_gen_enabled(struct kobject *kobj, struct kobj_attribute *attr,
146204 +                                    const char *buf, size_t len)
146206 +       int enable;
146208 +       if (kstrtoint(buf, 10, &enable))
146209 +               return -EINVAL;
146211 +       lru_gen_set_state(enable, true, false);
146213 +       return len;
146216 +static struct kobj_attribute lru_gen_enabled_attr = __ATTR(
146217 +       enabled, 0644, show_lru_gen_enabled, store_lru_gen_enabled
146220 +static struct attribute *lru_gen_attrs[] = {
146221 +       &lru_gen_spread_attr.attr,
146222 +       &lru_gen_enabled_attr.attr,
146223 +       NULL
146226 +static struct attribute_group lru_gen_attr_group = {
146227 +       .name = "lru_gen",
146228 +       .attrs = lru_gen_attrs,
146231 +/******************************************************************************
146232 + *                          debugfs interface
146233 + ******************************************************************************/
146235 +static void *lru_gen_seq_start(struct seq_file *m, loff_t *pos)
146237 +       struct mem_cgroup *memcg;
146238 +       loff_t nr_to_skip = *pos;
146240 +       m->private = kzalloc(PATH_MAX, GFP_KERNEL);
146241 +       if (!m->private)
146242 +               return ERR_PTR(-ENOMEM);
146244 +       memcg = mem_cgroup_iter(NULL, NULL, NULL);
146245 +       do {
146246 +               int nid;
146248 +               for_each_node_state(nid, N_MEMORY) {
146249 +                       if (!nr_to_skip--)
146250 +                               return mem_cgroup_lruvec(memcg, NODE_DATA(nid));
146251 +               }
146252 +       } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
146254 +       return NULL;
146257 +static void lru_gen_seq_stop(struct seq_file *m, void *v)
146259 +       if (!IS_ERR_OR_NULL(v))
146260 +               mem_cgroup_iter_break(NULL, lruvec_memcg(v));
146262 +       kfree(m->private);
146263 +       m->private = NULL;
146266 +static void *lru_gen_seq_next(struct seq_file *m, void *v, loff_t *pos)
146268 +       int nid = lruvec_pgdat(v)->node_id;
146269 +       struct mem_cgroup *memcg = lruvec_memcg(v);
146271 +       ++*pos;
146273 +       nid = next_memory_node(nid);
146274 +       if (nid == MAX_NUMNODES) {
146275 +               memcg = mem_cgroup_iter(NULL, memcg, NULL);
146276 +               if (!memcg)
146277 +                       return NULL;
146279 +               nid = first_memory_node;
146280 +       }
146282 +       return mem_cgroup_lruvec(memcg, NODE_DATA(nid));
146285 +static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec,
146286 +                                 unsigned long max_seq, unsigned long *min_seq,
146287 +                                 unsigned long seq)
146289 +       int i;
146290 +       int file, tier;
146291 +       int sid = sid_from_seq_or_gen(seq);
146292 +       struct lrugen *lrugen = &lruvec->evictable;
146293 +       int nid = lruvec_pgdat(lruvec)->node_id;
146294 +       struct mem_cgroup *memcg = lruvec_memcg(lruvec);
146295 +       struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
146297 +       for (tier = 0; tier < MAX_NR_TIERS; tier++) {
146298 +               seq_printf(m, "            %10d", tier);
146299 +               for (file = 0; file < ANON_AND_FILE; file++) {
146300 +                       unsigned long n[3] = {};
146302 +                       if (seq == max_seq) {
146303 +                               n[0] = READ_ONCE(lrugen->avg_refaulted[file][tier]);
146304 +                               n[1] = READ_ONCE(lrugen->avg_total[file][tier]);
146306 +                               seq_printf(m, " %10luR %10luT %10lu ", n[0], n[1], n[2]);
146307 +                       } else if (seq == min_seq[file] || NR_STAT_GENS > 1) {
146308 +                               n[0] = atomic_long_read(&lrugen->refaulted[sid][file][tier]);
146309 +                               n[1] = atomic_long_read(&lrugen->evicted[sid][file][tier]);
146310 +                               if (tier)
146311 +                                       n[2] = READ_ONCE(lrugen->activated[sid][file][tier - 1]);
146313 +                               seq_printf(m, " %10lur %10lue %10lua", n[0], n[1], n[2]);
146314 +                       } else
146315 +                               seq_puts(m, "          0           0           0 ");
146316 +               }
146317 +               seq_putc(m, '\n');
146318 +       }
146320 +       seq_puts(m, "                      ");
146321 +       for (i = 0; i < NR_MM_STATS; i++) {
146322 +               if (seq == max_seq && NR_STAT_GENS == 1)
146323 +                       seq_printf(m, " %10lu%c", READ_ONCE(mm_list->nodes[nid].stats[sid][i]),
146324 +                                  toupper(MM_STAT_CODES[i]));
146325 +               else if (seq != max_seq && NR_STAT_GENS > 1)
146326 +                       seq_printf(m, " %10lu%c", READ_ONCE(mm_list->nodes[nid].stats[sid][i]),
146327 +                                  MM_STAT_CODES[i]);
146328 +               else
146329 +                       seq_puts(m, "          0 ");
146330 +       }
146331 +       seq_putc(m, '\n');
146334 +static int lru_gen_seq_show(struct seq_file *m, void *v)
146336 +       unsigned long seq;
146337 +       bool full = !debugfs_real_fops(m->file)->write;
146338 +       struct lruvec *lruvec = v;
146339 +       struct lrugen *lrugen = &lruvec->evictable;
146340 +       int nid = lruvec_pgdat(lruvec)->node_id;
146341 +       struct mem_cgroup *memcg = lruvec_memcg(lruvec);
146342 +       DEFINE_MAX_SEQ();
146343 +       DEFINE_MIN_SEQ();
146345 +       if (nid == first_memory_node) {
146346 +#ifdef CONFIG_MEMCG
146347 +               if (memcg)
146348 +                       cgroup_path(memcg->css.cgroup, m->private, PATH_MAX);
146349 +#endif
146350 +               seq_printf(m, "memcg %5hu %s\n",
146351 +                          mem_cgroup_id(memcg), (char *)m->private);
146352 +       }
146354 +       seq_printf(m, " node %5d %10d\n", nid, atomic_read(&lrugen->priority));
146356 +       seq = full ? (max_seq < MAX_NR_GENS ? 0 : max_seq - MAX_NR_GENS + 1) :
146357 +                    min(min_seq[0], min_seq[1]);
146359 +       for (; seq <= max_seq; seq++) {
146360 +               int gen, file, zone;
146361 +               unsigned int msecs;
146363 +               gen = lru_gen_from_seq(seq);
146364 +               msecs = jiffies_to_msecs(jiffies - READ_ONCE(lrugen->timestamps[gen]));
146366 +               seq_printf(m, " %10lu %10u", seq, msecs);
146368 +               for (file = 0; file < ANON_AND_FILE; file++) {
146369 +                       long size = 0;
146371 +                       if (seq < min_seq[file]) {
146372 +                               seq_puts(m, "         -0 ");
146373 +                               continue;
146374 +                       }
146376 +                       for (zone = 0; zone < MAX_NR_ZONES; zone++)
146377 +                               size += READ_ONCE(lrugen->sizes[gen][file][zone]);
146379 +                       seq_printf(m, " %10lu ", max(size, 0L));
146380 +               }
146382 +               seq_putc(m, '\n');
146384 +               if (full)
146385 +                       lru_gen_seq_show_full(m, lruvec, max_seq, min_seq, seq);
146386 +       }
146388 +       return 0;
146391 +static const struct seq_operations lru_gen_seq_ops = {
146392 +       .start = lru_gen_seq_start,
146393 +       .stop = lru_gen_seq_stop,
146394 +       .next = lru_gen_seq_next,
146395 +       .show = lru_gen_seq_show,
146398 +static int advance_max_seq(struct lruvec *lruvec, unsigned long seq, int swappiness)
146400 +       struct mm_walk_args args = {};
146401 +       struct scan_control sc = {
146402 +               .target_mem_cgroup = lruvec_memcg(lruvec),
146403 +       };
146404 +       DEFINE_MAX_SEQ();
146406 +       if (seq == max_seq)
146407 +               walk_mm_list(lruvec, max_seq, &sc, swappiness, &args);
146409 +       return seq > max_seq ? -EINVAL : 0;
146412 +static int advance_min_seq(struct lruvec *lruvec, unsigned long seq, int swappiness,
146413 +                          unsigned long nr_to_reclaim)
146415 +       struct blk_plug plug;
146416 +       int err = -EINTR;
146417 +       long nr_to_scan = LONG_MAX;
146418 +       struct scan_control sc = {
146419 +               .nr_to_reclaim = nr_to_reclaim,
146420 +               .target_mem_cgroup = lruvec_memcg(lruvec),
146421 +               .may_writepage = 1,
146422 +               .may_unmap = 1,
146423 +               .may_swap = 1,
146424 +               .reclaim_idx = MAX_NR_ZONES - 1,
146425 +               .gfp_mask = GFP_KERNEL,
146426 +       };
146427 +       DEFINE_MAX_SEQ();
146429 +       if (seq >= max_seq - 1)
146430 +               return -EINVAL;
146432 +       blk_start_plug(&plug);
146434 +       while (!signal_pending(current)) {
146435 +               DEFINE_MIN_SEQ();
146437 +               if (seq < min(min_seq[!swappiness], min_seq[swappiness < 200]) ||
146438 +                   !evict_lru_gen_pages(lruvec, &sc, swappiness, &nr_to_scan)) {
146439 +                       err = 0;
146440 +                       break;
146441 +               }
146443 +               cond_resched();
146444 +       }
146446 +       blk_finish_plug(&plug);
146448 +       return err;
146451 +static int advance_seq(char cmd, int memcg_id, int nid, unsigned long seq,
146452 +                      int swappiness, unsigned long nr_to_reclaim)
146454 +       struct lruvec *lruvec;
146455 +       int err = -EINVAL;
146456 +       struct mem_cgroup *memcg = NULL;
146458 +       if (!mem_cgroup_disabled()) {
146459 +               rcu_read_lock();
146460 +               memcg = mem_cgroup_from_id(memcg_id);
146461 +#ifdef CONFIG_MEMCG
146462 +               if (memcg && !css_tryget(&memcg->css))
146463 +                       memcg = NULL;
146464 +#endif
146465 +               rcu_read_unlock();
146467 +               if (!memcg)
146468 +                       goto done;
146469 +       }
146470 +       if (memcg_id != mem_cgroup_id(memcg))
146471 +               goto done;
146473 +       if (nid < 0 || nid >= MAX_NUMNODES || !node_state(nid, N_MEMORY))
146474 +               goto done;
146476 +       lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
146478 +       if (swappiness == -1)
146479 +               swappiness = get_swappiness(lruvec);
146480 +       else if (swappiness > 200U)
146481 +               goto done;
146483 +       switch (cmd) {
146484 +       case '+':
146485 +               err = advance_max_seq(lruvec, seq, swappiness);
146486 +               break;
146487 +       case '-':
146488 +               err = advance_min_seq(lruvec, seq, swappiness, nr_to_reclaim);
146489 +               break;
146490 +       }
146491 +done:
146492 +       mem_cgroup_put(memcg);
146494 +       return err;
146497 +static ssize_t lru_gen_seq_write(struct file *file, const char __user *src,
146498 +                                size_t len, loff_t *pos)
146500 +       void *buf;
146501 +       char *cur, *next;
146502 +       int err = 0;
146504 +       buf = kvmalloc(len + 1, GFP_USER);
146505 +       if (!buf)
146506 +               return -ENOMEM;
146508 +       if (copy_from_user(buf, src, len)) {
146509 +               kvfree(buf);
146510 +               return -EFAULT;
146511 +       }
146513 +       next = buf;
146514 +       next[len] = '\0';
146516 +       while ((cur = strsep(&next, ",;\n"))) {
146517 +               int n;
146518 +               int end;
146519 +               char cmd;
146520 +               int memcg_id;
146521 +               int nid;
146522 +               unsigned long seq;
146523 +               int swappiness = -1;
146524 +               unsigned long nr_to_reclaim = -1;
146526 +               cur = skip_spaces(cur);
146527 +               if (!*cur)
146528 +                       continue;
146530 +               n = sscanf(cur, "%c %u %u %lu %n %u %n %lu %n", &cmd, &memcg_id, &nid,
146531 +                          &seq, &end, &swappiness, &end, &nr_to_reclaim, &end);
146532 +               if (n < 4 || cur[end]) {
146533 +                       err = -EINVAL;
146534 +                       break;
146535 +               }
146537 +               err = advance_seq(cmd, memcg_id, nid, seq, swappiness, nr_to_reclaim);
146538 +               if (err)
146539 +                       break;
146540 +       }
146542 +       kvfree(buf);
146544 +       return err ? : len;
146547 +static int lru_gen_seq_open(struct inode *inode, struct file *file)
146549 +       return seq_open(file, &lru_gen_seq_ops);
146552 +static const struct file_operations lru_gen_rw_fops = {
146553 +       .open = lru_gen_seq_open,
146554 +       .read = seq_read,
146555 +       .write = lru_gen_seq_write,
146556 +       .llseek = seq_lseek,
146557 +       .release = seq_release,
146560 +static const struct file_operations lru_gen_ro_fops = {
146561 +       .open = lru_gen_seq_open,
146562 +       .read = seq_read,
146563 +       .llseek = seq_lseek,
146564 +       .release = seq_release,
146567 +/******************************************************************************
146568 + *                          initialization
146569 + ******************************************************************************/
146571 +void lru_gen_init_lruvec(struct lruvec *lruvec)
146573 +       int i;
146574 +       int gen, file, zone;
146575 +       struct lrugen *lrugen = &lruvec->evictable;
146577 +       atomic_set(&lrugen->priority, DEF_PRIORITY);
146579 +       lrugen->max_seq = MIN_NR_GENS + 1;
146580 +       lrugen->enabled[0] = lru_gen_enabled() && lru_gen_nr_swapfiles;
146581 +       lrugen->enabled[1] = lru_gen_enabled();
146583 +       for (i = 0; i <= MIN_NR_GENS + 1; i++)
146584 +               lrugen->timestamps[i] = jiffies;
146586 +       for_each_gen_type_zone(gen, file, zone)
146587 +               INIT_LIST_HEAD(&lrugen->lists[gen][file][zone]);
146590 +static int __init init_lru_gen(void)
146592 +       BUILD_BUG_ON(MIN_NR_GENS + 1 >= MAX_NR_GENS);
146593 +       BUILD_BUG_ON(BIT(LRU_GEN_WIDTH) <= MAX_NR_GENS);
146594 +       BUILD_BUG_ON(sizeof(MM_STAT_CODES) != NR_MM_STATS + 1);
146595 +       BUILD_BUG_ON(PMD_SIZE / PAGE_SIZE != PTRS_PER_PTE);
146596 +       BUILD_BUG_ON(PUD_SIZE / PMD_SIZE != PTRS_PER_PMD);
146597 +       BUILD_BUG_ON(P4D_SIZE / PUD_SIZE != PTRS_PER_PUD);
146599 +       if (mem_cgroup_disabled()) {
146600 +               global_mm_list = alloc_mm_list();
146601 +               if (!global_mm_list) {
146602 +                       pr_err("lru_gen: failed to allocate global mm_struct list\n");
146603 +                       return -ENOMEM;
146604 +               }
146605 +       }
146607 +       if (hotplug_memory_notifier(lru_gen_online_mem, 0))
146608 +               pr_err("lru_gen: failed to subscribe hotplug notifications\n");
146610 +       if (sysfs_create_group(mm_kobj, &lru_gen_attr_group))
146611 +               pr_err("lru_gen: failed to create sysfs group\n");
146613 +       debugfs_create_file("lru_gen", 0644, NULL, NULL, &lru_gen_rw_fops);
146614 +       debugfs_create_file("lru_gen_full", 0444, NULL, NULL, &lru_gen_ro_fops);
146616 +       return 0;
146619 + * We want to run as early as possible because some debug code, e.g.,
146620 + * dma_resv_lockdep(), calls mm_alloc() and mmput(). We only depend on mm_kobj,
146621 + * which is initialized one stage earlier.
146622 + */
146623 +arch_initcall(init_lru_gen);
146625 +#endif /* CONFIG_LRU_GEN */
146626 diff --git a/mm/workingset.c b/mm/workingset.c
146627 index cd39902c1062..df363f9419fc 100644
146628 --- a/mm/workingset.c
146629 +++ b/mm/workingset.c
146630 @@ -168,9 +168,9 @@
146631   * refault distance will immediately activate the refaulting page.
146632   */
146634 -#define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) +  \
146635 -                        1 + NODES_SHIFT + MEM_CGROUP_ID_SHIFT)
146636 -#define EVICTION_MASK  (~0UL >> EVICTION_SHIFT)
146637 +#define EVICTION_SHIFT         (BITS_PER_XA_VALUE - MEM_CGROUP_ID_SHIFT - NODES_SHIFT)
146638 +#define EVICTION_MASK          (BIT(EVICTION_SHIFT) - 1)
146639 +#define WORKINGSET_WIDTH       1
146642   * Eviction timestamps need to be able to cover the full range of
146643 @@ -182,38 +182,139 @@
146644   */
146645  static unsigned int bucket_order __read_mostly;
146647 -static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction,
146648 -                        bool workingset)
146649 +static void *pack_shadow(int memcg_id, struct pglist_data *pgdat, unsigned long val)
146651 -       eviction >>= bucket_order;
146652 -       eviction &= EVICTION_MASK;
146653 -       eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
146654 -       eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
146655 -       eviction = (eviction << 1) | workingset;
146656 +       val = (val << MEM_CGROUP_ID_SHIFT) | memcg_id;
146657 +       val = (val << NODES_SHIFT) | pgdat->node_id;
146659 -       return xa_mk_value(eviction);
146660 +       return xa_mk_value(val);
146663 -static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
146664 -                         unsigned long *evictionp, bool *workingsetp)
146665 +static unsigned long unpack_shadow(void *shadow, int *memcg_id, struct pglist_data **pgdat)
146667 -       unsigned long entry = xa_to_value(shadow);
146668 -       int memcgid, nid;
146669 -       bool workingset;
146670 +       unsigned long val = xa_to_value(shadow);
146672 +       *pgdat = NODE_DATA(val & (BIT(NODES_SHIFT) - 1));
146673 +       val >>= NODES_SHIFT;
146674 +       *memcg_id = val & (BIT(MEM_CGROUP_ID_SHIFT) - 1);
146676 +       return val >> MEM_CGROUP_ID_SHIFT;
146679 +#ifdef CONFIG_LRU_GEN
146681 +#if LRU_GEN_SHIFT + LRU_USAGE_SHIFT >= EVICTION_SHIFT
146682 +#error "Please try smaller NODES_SHIFT, NR_LRU_GENS and TIERS_PER_GEN configurations"
146683 +#endif
146685 +static void page_set_usage(struct page *page, int usage)
146687 +       unsigned long old_flags, new_flags;
146689 +       VM_BUG_ON(usage > BIT(LRU_USAGE_WIDTH));
146691 +       if (!usage)
146692 +               return;
146694 +       do {
146695 +               old_flags = READ_ONCE(page->flags);
146696 +               new_flags = (old_flags & ~LRU_USAGE_MASK) | LRU_TIER_FLAGS |
146697 +                           ((usage - 1UL) << LRU_USAGE_PGOFF);
146698 +               if (old_flags == new_flags)
146699 +                       break;
146700 +       } while (cmpxchg(&page->flags, old_flags, new_flags) != old_flags);
146703 +/* Return a token to be stored in the shadow entry of a page being evicted. */
146704 +static void *lru_gen_eviction(struct page *page)
146706 +       int sid, tier;
146707 +       unsigned long token;
146708 +       unsigned long min_seq;
146709 +       struct lruvec *lruvec;
146710 +       struct lrugen *lrugen;
146711 +       int file = page_is_file_lru(page);
146712 +       int usage = page_tier_usage(page);
146713 +       struct mem_cgroup *memcg = page_memcg(page);
146714 +       struct pglist_data *pgdat = page_pgdat(page);
146716 +       if (!lru_gen_enabled())
146717 +               return NULL;
146719 +       lruvec = mem_cgroup_lruvec(memcg, pgdat);
146720 +       lrugen = &lruvec->evictable;
146721 +       min_seq = READ_ONCE(lrugen->min_seq[file]);
146722 +       token = (min_seq << LRU_USAGE_SHIFT) | usage;
146724 +       sid = sid_from_seq_or_gen(min_seq);
146725 +       tier = lru_tier_from_usage(usage);
146726 +       atomic_long_add(thp_nr_pages(page), &lrugen->evicted[sid][file][tier]);
146728 +       return pack_shadow(mem_cgroup_id(memcg), pgdat, token);
146731 +/* Account a refaulted page based on the token stored in its shadow entry. */
146732 +static bool lru_gen_refault(struct page *page, void *shadow)
146734 +       int sid, tier, usage;
146735 +       int memcg_id;
146736 +       unsigned long token;
146737 +       unsigned long min_seq;
146738 +       struct lruvec *lruvec;
146739 +       struct lrugen *lrugen;
146740 +       struct pglist_data *pgdat;
146741 +       struct mem_cgroup *memcg;
146742 +       int file = page_is_file_lru(page);
146744 +       if (!lru_gen_enabled())
146745 +               return false;
146747 +       token = unpack_shadow(shadow, &memcg_id, &pgdat);
146748 +       if (page_pgdat(page) != pgdat)
146749 +               return true;
146751 +       rcu_read_lock();
146752 +       memcg = page_memcg_rcu(page);
146753 +       if (mem_cgroup_id(memcg) != memcg_id)
146754 +               goto unlock;
146756 +       usage = token & (BIT(LRU_USAGE_SHIFT) - 1);
146757 +       token >>= LRU_USAGE_SHIFT;
146759 +       lruvec = mem_cgroup_lruvec(memcg, pgdat);
146760 +       lrugen = &lruvec->evictable;
146761 +       min_seq = READ_ONCE(lrugen->min_seq[file]);
146762 +       if (token != (min_seq & (EVICTION_MASK >> LRU_USAGE_SHIFT)))
146763 +               goto unlock;
146765 -       workingset = entry & 1;
146766 -       entry >>= 1;
146767 -       nid = entry & ((1UL << NODES_SHIFT) - 1);
146768 -       entry >>= NODES_SHIFT;
146769 -       memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
146770 -       entry >>= MEM_CGROUP_ID_SHIFT;
146772 -       *memcgidp = memcgid;
146773 -       *pgdat = NODE_DATA(nid);
146774 -       *evictionp = entry << bucket_order;
146775 -       *workingsetp = workingset;
146776 +       page_set_usage(page, usage);
146778 +       sid = sid_from_seq_or_gen(min_seq);
146779 +       tier = lru_tier_from_usage(usage);
146780 +       atomic_long_add(thp_nr_pages(page), &lrugen->refaulted[sid][file][tier]);
146781 +       inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file);
146782 +       if (tier)
146783 +               inc_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file);
146784 +unlock:
146785 +       rcu_read_unlock();
146787 +       return true;
146790 +#else /* CONFIG_LRU_GEN */
146792 +static void *lru_gen_eviction(struct page *page)
146794 +       return NULL;
146797 +static bool lru_gen_refault(struct page *page, void *shadow)
146799 +       return false;
146802 +#endif /* CONFIG_LRU_GEN */
146804  /**
146805   * workingset_age_nonresident - age non-resident entries as LRU ages
146806   * @lruvec: the lruvec that was aged
146807 @@ -256,18 +357,25 @@ void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg)
146808         unsigned long eviction;
146809         struct lruvec *lruvec;
146810         int memcgid;
146811 +       void *shadow;
146813         /* Page is fully exclusive and pins page's memory cgroup pointer */
146814         VM_BUG_ON_PAGE(PageLRU(page), page);
146815         VM_BUG_ON_PAGE(page_count(page), page);
146816         VM_BUG_ON_PAGE(!PageLocked(page), page);
146818 +       shadow = lru_gen_eviction(page);
146819 +       if (shadow)
146820 +               return shadow;
146822         lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
146823         /* XXX: target_memcg can be NULL, go through lruvec */
146824         memcgid = mem_cgroup_id(lruvec_memcg(lruvec));
146825         eviction = atomic_long_read(&lruvec->nonresident_age);
146826 +       eviction >>= bucket_order;
146827 +       eviction = (eviction << WORKINGSET_WIDTH) | PageWorkingset(page);
146828         workingset_age_nonresident(lruvec, thp_nr_pages(page));
146829 -       return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page));
146830 +       return pack_shadow(memcgid, pgdat, eviction);
146833  /**
146834 @@ -294,7 +402,10 @@ void workingset_refault(struct page *page, void *shadow)
146835         bool workingset;
146836         int memcgid;
146838 -       unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset);
146839 +       if (lru_gen_refault(page, shadow))
146840 +               return;
146842 +       eviction = unpack_shadow(shadow, &memcgid, &pgdat);
146844         rcu_read_lock();
146845         /*
146846 @@ -318,6 +429,8 @@ void workingset_refault(struct page *page, void *shadow)
146847                 goto out;
146848         eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat);
146849         refault = atomic_long_read(&eviction_lruvec->nonresident_age);
146850 +       workingset = eviction & (BIT(WORKINGSET_WIDTH) - 1);
146851 +       eviction = (eviction >> WORKINGSET_WIDTH) << bucket_order;
146853         /*
146854          * Calculate the refault distance
146855 @@ -335,7 +448,7 @@ void workingset_refault(struct page *page, void *shadow)
146856          * longest time, so the occasional inappropriate activation
146857          * leading to pressure on the active list is not a problem.
146858          */
146859 -       refault_distance = (refault - eviction) & EVICTION_MASK;
146860 +       refault_distance = (refault - eviction) & (EVICTION_MASK >> WORKINGSET_WIDTH);
146862         /*
146863          * The activation decision for this page is made at the level
146864 @@ -594,7 +707,7 @@ static int __init workingset_init(void)
146865         unsigned int max_order;
146866         int ret;
146868 -       BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT);
146869 +       BUILD_BUG_ON(EVICTION_SHIFT < WORKINGSET_WIDTH);
146870         /*
146871          * Calculate the eviction bucket size to cover the longest
146872          * actionable refault distance, which is currently half of
146873 @@ -602,7 +715,7 @@ static int __init workingset_init(void)
146874          * some more pages at runtime, so keep working with up to
146875          * double the initial memory by using totalram_pages as-is.
146876          */
146877 -       timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
146878 +       timestamp_bits = EVICTION_SHIFT - WORKINGSET_WIDTH;
146879         max_order = fls_long(totalram_pages() - 1);
146880         if (max_order > timestamp_bits)
146881                 bucket_order = max_order - timestamp_bits;
146882 diff --git a/net/bluetooth/ecdh_helper.h b/net/bluetooth/ecdh_helper.h
146883 index a6f8d03d4aaf..830723971cf8 100644
146884 --- a/net/bluetooth/ecdh_helper.h
146885 +++ b/net/bluetooth/ecdh_helper.h
146886 @@ -25,6 +25,6 @@
146888  int compute_ecdh_secret(struct crypto_kpp *tfm, const u8 pair_public_key[64],
146889                         u8 secret[32]);
146890 -int set_ecdh_privkey(struct crypto_kpp *tfm, const u8 *private_key);
146891 +int set_ecdh_privkey(struct crypto_kpp *tfm, const u8 private_key[32]);
146892  int generate_ecdh_public_key(struct crypto_kpp *tfm, u8 public_key[64]);
146893  int generate_ecdh_keys(struct crypto_kpp *tfm, u8 public_key[64]);
146894 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
146895 index 6ffa89e3ba0a..f72646690539 100644
146896 --- a/net/bluetooth/hci_conn.c
146897 +++ b/net/bluetooth/hci_conn.c
146898 @@ -1830,8 +1830,6 @@ u32 hci_conn_get_phy(struct hci_conn *conn)
146900         u32 phys = 0;
146902 -       hci_dev_lock(conn->hdev);
146904         /* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
146905          * Table 6.2: Packets defined for synchronous, asynchronous, and
146906          * CSB logical transport types.
146907 @@ -1928,7 +1926,5 @@ u32 hci_conn_get_phy(struct hci_conn *conn)
146908                 break;
146909         }
146911 -       hci_dev_unlock(conn->hdev);
146913         return phys;
146915 diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
146916 index 67668be3461e..82f4973a011d 100644
146917 --- a/net/bluetooth/hci_event.c
146918 +++ b/net/bluetooth/hci_event.c
146919 @@ -5005,6 +5005,7 @@ static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
146920                 return;
146922         hchan->handle = le16_to_cpu(ev->handle);
146923 +       hchan->amp = true;
146925         BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
146927 @@ -5037,7 +5038,7 @@ static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
146928         hci_dev_lock(hdev);
146930         hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
146931 -       if (!hchan)
146932 +       if (!hchan || !hchan->amp)
146933                 goto unlock;
146935         amp_destroy_logical_link(hchan, ev->reason);
146936 @@ -5911,7 +5912,7 @@ static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
146938         BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
146940 -       if (!ev->status)
146941 +       if (ev->status)
146942                 return;
146944         hci_dev_lock(hdev);
146945 diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
146946 index e55976db4403..805ce546b813 100644
146947 --- a/net/bluetooth/hci_request.c
146948 +++ b/net/bluetooth/hci_request.c
146949 @@ -272,12 +272,16 @@ int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
146951         int ret;
146953 -       if (!test_bit(HCI_UP, &hdev->flags))
146954 -               return -ENETDOWN;
146956         /* Serialize all requests */
146957         hci_req_sync_lock(hdev);
146958 -       ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
146959 +       /* check the state after obtaing the lock to protect the HCI_UP
146960 +        * against any races from hci_dev_do_close when the controller
146961 +        * gets removed.
146962 +        */
146963 +       if (test_bit(HCI_UP, &hdev->flags))
146964 +               ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
146965 +       else
146966 +               ret = -ENETDOWN;
146967         hci_req_sync_unlock(hdev);
146969         return ret;
146970 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
146971 index 72c2f5226d67..53ddbee459b9 100644
146972 --- a/net/bluetooth/l2cap_core.c
146973 +++ b/net/bluetooth/l2cap_core.c
146974 @@ -451,6 +451,8 @@ struct l2cap_chan *l2cap_chan_create(void)
146975         if (!chan)
146976                 return NULL;
146978 +       skb_queue_head_init(&chan->tx_q);
146979 +       skb_queue_head_init(&chan->srej_q);
146980         mutex_init(&chan->lock);
146982         /* Set default lock nesting level */
146983 @@ -516,7 +518,9 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan)
146984         chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
146985         chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
146986         chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
146988         chan->conf_state = 0;
146989 +       set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
146991         set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
146993 diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
146994 index f1b1edd0b697..c99d65ef13b1 100644
146995 --- a/net/bluetooth/l2cap_sock.c
146996 +++ b/net/bluetooth/l2cap_sock.c
146997 @@ -179,9 +179,17 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
146998         struct l2cap_chan *chan = l2cap_pi(sk)->chan;
146999         struct sockaddr_l2 la;
147000         int len, err = 0;
147001 +       bool zapped;
147003         BT_DBG("sk %p", sk);
147005 +       lock_sock(sk);
147006 +       zapped = sock_flag(sk, SOCK_ZAPPED);
147007 +       release_sock(sk);
147009 +       if (zapped)
147010 +               return -EINVAL;
147012         if (!addr || alen < offsetofend(struct sockaddr, sa_family) ||
147013             addr->sa_family != AF_BLUETOOTH)
147014                 return -EINVAL;
147015 diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
147016 index 74971b4bd457..939c6f77fecc 100644
147017 --- a/net/bluetooth/mgmt.c
147018 +++ b/net/bluetooth/mgmt.c
147019 @@ -7976,7 +7976,6 @@ static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
147020                 goto unlock;
147021         }
147023 -       hdev->cur_adv_instance = cp->instance;
147024         /* Submit request for advertising params if ext adv available */
147025         if (ext_adv_capable(hdev)) {
147026                 hci_req_init(&req, hdev);
147027 diff --git a/net/bridge/br_arp_nd_proxy.c b/net/bridge/br_arp_nd_proxy.c
147028 index dfec65eca8a6..3db1def4437b 100644
147029 --- a/net/bridge/br_arp_nd_proxy.c
147030 +++ b/net/bridge/br_arp_nd_proxy.c
147031 @@ -160,7 +160,9 @@ void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br,
147032         if (br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) {
147033                 if (p && (p->flags & BR_NEIGH_SUPPRESS))
147034                         return;
147035 -               if (ipv4_is_zeronet(sip) || sip == tip) {
147036 +               if (parp->ar_op != htons(ARPOP_RREQUEST) &&
147037 +                   parp->ar_op != htons(ARPOP_RREPLY) &&
147038 +                   (ipv4_is_zeronet(sip) || sip == tip)) {
147039                         /* prevent flooding to neigh suppress ports */
147040                         BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1;
147041                         return;
147042 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
147043 index 9d265447d654..226bb05c3b42 100644
147044 --- a/net/bridge/br_multicast.c
147045 +++ b/net/bridge/br_multicast.c
147046 @@ -1593,7 +1593,8 @@ static void br_multicast_port_group_rexmit(struct timer_list *t)
147047         spin_unlock(&br->multicast_lock);
147050 -static void br_mc_disabled_update(struct net_device *dev, bool value)
147051 +static int br_mc_disabled_update(struct net_device *dev, bool value,
147052 +                                struct netlink_ext_ack *extack)
147054         struct switchdev_attr attr = {
147055                 .orig_dev = dev,
147056 @@ -1602,11 +1603,13 @@ static void br_mc_disabled_update(struct net_device *dev, bool value)
147057                 .u.mc_disabled = !value,
147058         };
147060 -       switchdev_port_attr_set(dev, &attr, NULL);
147061 +       return switchdev_port_attr_set(dev, &attr, extack);
147064  int br_multicast_add_port(struct net_bridge_port *port)
147066 +       int err;
147068         port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
147069         port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT;
147071 @@ -1618,8 +1621,12 @@ int br_multicast_add_port(struct net_bridge_port *port)
147072         timer_setup(&port->ip6_own_query.timer,
147073                     br_ip6_multicast_port_query_expired, 0);
147074  #endif
147075 -       br_mc_disabled_update(port->dev,
147076 -                             br_opt_get(port->br, BROPT_MULTICAST_ENABLED));
147077 +       err = br_mc_disabled_update(port->dev,
147078 +                                   br_opt_get(port->br,
147079 +                                              BROPT_MULTICAST_ENABLED),
147080 +                                   NULL);
147081 +       if (err && err != -EOPNOTSUPP)
147082 +               return err;
147084         port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
147085         if (!port->mcast_stats)
147086 @@ -3152,25 +3159,14 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
147089  #if IS_ENABLED(CONFIG_IPV6)
147090 -static int br_ip6_multicast_mrd_rcv(struct net_bridge *br,
147091 -                                   struct net_bridge_port *port,
147092 -                                   struct sk_buff *skb)
147093 +static void br_ip6_multicast_mrd_rcv(struct net_bridge *br,
147094 +                                    struct net_bridge_port *port,
147095 +                                    struct sk_buff *skb)
147097 -       int ret;
147099 -       if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
147100 -               return -ENOMSG;
147102 -       ret = ipv6_mc_check_icmpv6(skb);
147103 -       if (ret < 0)
147104 -               return ret;
147106         if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
147107 -               return -ENOMSG;
147108 +               return;
147110         br_multicast_mark_router(br, port);
147112 -       return 0;
147115  static int br_multicast_ipv6_rcv(struct net_bridge *br,
147116 @@ -3184,18 +3180,12 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
147118         err = ipv6_mc_check_mld(skb);
147120 -       if (err == -ENOMSG) {
147121 +       if (err == -ENOMSG || err == -ENODATA) {
147122                 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
147123                         BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
147125 -               if (ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) {
147126 -                       err = br_ip6_multicast_mrd_rcv(br, port, skb);
147128 -                       if (err < 0 && err != -ENOMSG) {
147129 -                               br_multicast_err_count(br, port, skb->protocol);
147130 -                               return err;
147131 -                       }
147132 -               }
147133 +               if (err == -ENODATA &&
147134 +                   ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr))
147135 +                       br_ip6_multicast_mrd_rcv(br, port, skb);
147137                 return 0;
147138         } else if (err < 0) {
147139 @@ -3560,16 +3550,23 @@ static void br_multicast_start_querier(struct net_bridge *br,
147140         rcu_read_unlock();
147143 -int br_multicast_toggle(struct net_bridge *br, unsigned long val)
147144 +int br_multicast_toggle(struct net_bridge *br, unsigned long val,
147145 +                       struct netlink_ext_ack *extack)
147147         struct net_bridge_port *port;
147148         bool change_snoopers = false;
147149 +       int err = 0;
147151         spin_lock_bh(&br->multicast_lock);
147152         if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
147153                 goto unlock;
147155 -       br_mc_disabled_update(br->dev, val);
147156 +       err = br_mc_disabled_update(br->dev, val, extack);
147157 +       if (err == -EOPNOTSUPP)
147158 +               err = 0;
147159 +       if (err)
147160 +               goto unlock;
147162         br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
147163         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
147164                 change_snoopers = true;
147165 @@ -3607,7 +3604,7 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
147166                         br_multicast_leave_snoopers(br);
147167         }
147169 -       return 0;
147170 +       return err;
147173  bool br_multicast_enabled(const struct net_device *dev)
147174 diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
147175 index f2b1343f8332..e4e6e991313e 100644
147176 --- a/net/bridge/br_netlink.c
147177 +++ b/net/bridge/br_netlink.c
147178 @@ -103,8 +103,9 @@ static size_t br_get_link_af_size_filtered(const struct net_device *dev,
147180         rcu_read_lock();
147181         if (netif_is_bridge_port(dev)) {
147182 -               p = br_port_get_rcu(dev);
147183 -               vg = nbp_vlan_group_rcu(p);
147184 +               p = br_port_get_check_rcu(dev);
147185 +               if (p)
147186 +                       vg = nbp_vlan_group_rcu(p);
147187         } else if (dev->priv_flags & IFF_EBRIDGE) {
147188                 br = netdev_priv(dev);
147189                 vg = br_vlan_group_rcu(br);
147190 @@ -1293,7 +1294,9 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
147191         if (data[IFLA_BR_MCAST_SNOOPING]) {
147192                 u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]);
147194 -               br_multicast_toggle(br, mcast_snooping);
147195 +               err = br_multicast_toggle(br, mcast_snooping, extack);
147196 +               if (err)
147197 +                       return err;
147198         }
147200         if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) {
147201 diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
147202 index d7d167e10b70..af3430c2d6ea 100644
147203 --- a/net/bridge/br_private.h
147204 +++ b/net/bridge/br_private.h
147205 @@ -810,7 +810,8 @@ void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
147206                         struct sk_buff *skb, bool local_rcv, bool local_orig);
147207  int br_multicast_set_router(struct net_bridge *br, unsigned long val);
147208  int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val);
147209 -int br_multicast_toggle(struct net_bridge *br, unsigned long val);
147210 +int br_multicast_toggle(struct net_bridge *br, unsigned long val,
147211 +                       struct netlink_ext_ack *extack);
147212  int br_multicast_set_querier(struct net_bridge *br, unsigned long val);
147213  int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val);
147214  int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val);
147215 diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
147216 index 072e29840082..381467b691d5 100644
147217 --- a/net/bridge/br_sysfs_br.c
147218 +++ b/net/bridge/br_sysfs_br.c
147219 @@ -409,17 +409,11 @@ static ssize_t multicast_snooping_show(struct device *d,
147220         return sprintf(buf, "%d\n", br_opt_get(br, BROPT_MULTICAST_ENABLED));
147223 -static int toggle_multicast(struct net_bridge *br, unsigned long val,
147224 -                           struct netlink_ext_ack *extack)
147226 -       return br_multicast_toggle(br, val);
147229  static ssize_t multicast_snooping_store(struct device *d,
147230                                         struct device_attribute *attr,
147231                                         const char *buf, size_t len)
147233 -       return store_bridge_parm(d, buf, len, toggle_multicast);
147234 +       return store_bridge_parm(d, buf, len, br_multicast_toggle);
147236  static DEVICE_ATTR_RW(multicast_snooping);
147238 diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
147239 index ca44c327bace..79641c4afee9 100644
147240 --- a/net/ceph/auth_x.c
147241 +++ b/net/ceph/auth_x.c
147242 @@ -526,7 +526,7 @@ static int ceph_x_build_request(struct ceph_auth_client *ac,
147243                 if (ret < 0)
147244                         return ret;
147246 -               auth->struct_v = 2;  /* nautilus+ */
147247 +               auth->struct_v = 3;  /* nautilus+ */
147248                 auth->key = 0;
147249                 for (u = (u64 *)enc_buf; u + 1 <= (u64 *)(enc_buf + ret); u++)
147250                         auth->key ^= *(__le64 *)u;
147251 diff --git a/net/ceph/decode.c b/net/ceph/decode.c
147252 index b44f7651be04..bc109a1a4616 100644
147253 --- a/net/ceph/decode.c
147254 +++ b/net/ceph/decode.c
147255 @@ -4,6 +4,7 @@
147256  #include <linux/inet.h>
147258  #include <linux/ceph/decode.h>
147259 +#include <linux/ceph/messenger.h>  /* for ceph_pr_addr() */
147261  static int
147262  ceph_decode_entity_addr_versioned(void **p, void *end,
147263 @@ -110,6 +111,7 @@ int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2,
147264         }
147266         ceph_decode_32_safe(p, end, addr_cnt, e_inval);
147267 +       dout("%s addr_cnt %d\n", __func__, addr_cnt);
147269         found = false;
147270         for (i = 0; i < addr_cnt; i++) {
147271 @@ -117,6 +119,7 @@ int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2,
147272                 if (ret)
147273                         return ret;
147275 +               dout("%s i %d addr %s\n", __func__, i, ceph_pr_addr(&tmp_addr));
147276                 if (tmp_addr.type == my_type) {
147277                         if (found) {
147278                                 pr_err("another match of type %d in addrvec\n",
147279 @@ -128,13 +131,18 @@ int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2,
147280                         found = true;
147281                 }
147282         }
147283 -       if (!found && addr_cnt != 0) {
147284 -               pr_err("no match of type %d in addrvec\n",
147285 -                      le32_to_cpu(my_type));
147286 -               return -ENOENT;
147287 -       }
147289 -       return 0;
147290 +       if (found)
147291 +               return 0;
147293 +       if (!addr_cnt)
147294 +               return 0;  /* normal -- e.g. unused OSD id/slot */
147296 +       if (addr_cnt == 1 && !memchr_inv(&tmp_addr, 0, sizeof(tmp_addr)))
147297 +               return 0;  /* weird but effectively the same as !addr_cnt */
147299 +       pr_err("no match of type %d in addrvec\n", le32_to_cpu(my_type));
147300 +       return -ENOENT;
147302  e_inval:
147303         return -EINVAL;
147304 diff --git a/net/core/dev.c b/net/core/dev.c
147305 index 1f79b9aa9a3f..70829c568645 100644
147306 --- a/net/core/dev.c
147307 +++ b/net/core/dev.c
147308 @@ -4672,10 +4672,10 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
147309         void *orig_data, *orig_data_end, *hard_start;
147310         struct netdev_rx_queue *rxqueue;
147311         u32 metalen, act = XDP_DROP;
147312 +       bool orig_bcast, orig_host;
147313         u32 mac_len, frame_sz;
147314         __be16 orig_eth_type;
147315         struct ethhdr *eth;
147316 -       bool orig_bcast;
147317         int off;
147319         /* Reinjected packets coming from act_mirred or similar should
147320 @@ -4722,6 +4722,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
147321         orig_data_end = xdp->data_end;
147322         orig_data = xdp->data;
147323         eth = (struct ethhdr *)xdp->data;
147324 +       orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr);
147325         orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
147326         orig_eth_type = eth->h_proto;
147328 @@ -4749,8 +4750,11 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
147329         /* check if XDP changed eth hdr such SKB needs update */
147330         eth = (struct ethhdr *)xdp->data;
147331         if ((orig_eth_type != eth->h_proto) ||
147332 +           (orig_host != ether_addr_equal_64bits(eth->h_dest,
147333 +                                                 skb->dev->dev_addr)) ||
147334             (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) {
147335                 __skb_push(skb, ETH_HLEN);
147336 +               skb->pkt_type = PACKET_HOST;
147337                 skb->protocol = eth_type_trans(skb, skb->dev);
147338         }
147340 @@ -5914,7 +5918,7 @@ static struct list_head *gro_list_prepare(struct napi_struct *napi,
147341         return head;
147344 -static void skb_gro_reset_offset(struct sk_buff *skb)
147345 +static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
147347         const struct skb_shared_info *pinfo = skb_shinfo(skb);
147348         const skb_frag_t *frag0 = &pinfo->frags[0];
147349 @@ -5925,7 +5929,7 @@ static void skb_gro_reset_offset(struct sk_buff *skb)
147351         if (!skb_headlen(skb) && pinfo->nr_frags &&
147352             !PageHighMem(skb_frag_page(frag0)) &&
147353 -           (!NET_IP_ALIGN || !(skb_frag_off(frag0) & 3))) {
147354 +           (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
147355                 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
147356                 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
147357                                                     skb_frag_size(frag0),
147358 @@ -6143,7 +6147,7 @@ gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
147359         skb_mark_napi_id(skb, napi);
147360         trace_napi_gro_receive_entry(skb);
147362 -       skb_gro_reset_offset(skb);
147363 +       skb_gro_reset_offset(skb, 0);
147365         ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
147366         trace_napi_gro_receive_exit(ret);
147367 @@ -6232,7 +6236,7 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
147368         napi->skb = NULL;
147370         skb_reset_mac_header(skb);
147371 -       skb_gro_reset_offset(skb);
147372 +       skb_gro_reset_offset(skb, hlen);
147374         if (unlikely(skb_gro_header_hard(skb, hlen))) {
147375                 eth = skb_gro_header_slow(skb, hlen, 0);
147376 diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
147377 index a96a4f5de0ce..3f36b04d86a0 100644
147378 --- a/net/core/flow_dissector.c
147379 +++ b/net/core/flow_dissector.c
147380 @@ -828,8 +828,10 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
147381                 key_addrs = skb_flow_dissector_target(flow_dissector,
147382                                                       FLOW_DISSECTOR_KEY_IPV6_ADDRS,
147383                                                       target_container);
147384 -               memcpy(&key_addrs->v6addrs, &flow_keys->ipv6_src,
147385 -                      sizeof(key_addrs->v6addrs));
147386 +               memcpy(&key_addrs->v6addrs.src, &flow_keys->ipv6_src,
147387 +                      sizeof(key_addrs->v6addrs.src));
147388 +               memcpy(&key_addrs->v6addrs.dst, &flow_keys->ipv6_dst,
147389 +                      sizeof(key_addrs->v6addrs.dst));
147390                 key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
147391         }
147393 diff --git a/net/core/page_pool.c b/net/core/page_pool.c
147394 index ad8b0707af04..f014fd8c19a6 100644
147395 --- a/net/core/page_pool.c
147396 +++ b/net/core/page_pool.c
147397 @@ -174,8 +174,10 @@ static void page_pool_dma_sync_for_device(struct page_pool *pool,
147398                                           struct page *page,
147399                                           unsigned int dma_sync_size)
147401 +       dma_addr_t dma_addr = page_pool_get_dma_addr(page);
147403         dma_sync_size = min(dma_sync_size, pool->p.max_len);
147404 -       dma_sync_single_range_for_device(pool->p.dev, page->dma_addr,
147405 +       dma_sync_single_range_for_device(pool->p.dev, dma_addr,
147406                                          pool->p.offset, dma_sync_size,
147407                                          pool->p.dma_dir);
147409 @@ -226,7 +228,7 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
147410                 put_page(page);
147411                 return NULL;
147412         }
147413 -       page->dma_addr = dma;
147414 +       page_pool_set_dma_addr(page, dma);
147416         if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
147417                 page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
147418 @@ -294,13 +296,13 @@ void page_pool_release_page(struct page_pool *pool, struct page *page)
147419                  */
147420                 goto skip_dma_unmap;
147422 -       dma = page->dma_addr;
147423 +       dma = page_pool_get_dma_addr(page);
147425 -       /* When page is unmapped, it cannot be returned our pool */
147426 +       /* When page is unmapped, it cannot be returned to our pool */
147427         dma_unmap_page_attrs(pool->p.dev, dma,
147428                              PAGE_SIZE << pool->p.order, pool->p.dma_dir,
147429                              DMA_ATTR_SKIP_CPU_SYNC);
147430 -       page->dma_addr = 0;
147431 +       page_pool_set_dma_addr(page, 0);
147432  skip_dma_unmap:
147433         /* This may be the last page returned, releasing the pool, so
147434          * it is not safe to reference pool afterwards.
147435 diff --git a/net/core/pktgen.c b/net/core/pktgen.c
147436 index 3fba429f1f57..9a3a9a6eb837 100644
147437 --- a/net/core/pktgen.c
147438 +++ b/net/core/pktgen.c
147439 @@ -1894,7 +1894,7 @@ static void pktgen_mark_device(const struct pktgen_net *pn, const char *ifname)
147440                 mutex_unlock(&pktgen_thread_lock);
147441                 pr_debug("%s: waiting for %s to disappear....\n",
147442                          __func__, ifname);
147443 -               schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try));
147444 +               schedule_msec_hrtimeout_interruptible((msec_per_try));
147445                 mutex_lock(&pktgen_thread_lock);
147447                 if (++i >= max_tries) {
147448 diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
147449 index 771688e1b0da..2603966da904 100644
147450 --- a/net/ethtool/ioctl.c
147451 +++ b/net/ethtool/ioctl.c
147452 @@ -489,7 +489,7 @@ store_link_ksettings_for_user(void __user *to,
147454         struct ethtool_link_usettings link_usettings;
147456 -       memcpy(&link_usettings.base, &from->base, sizeof(link_usettings));
147457 +       memcpy(&link_usettings, from, sizeof(link_usettings));
147458         bitmap_to_arr32(link_usettings.link_modes.supported,
147459                         from->link_modes.supported,
147460                         __ETHTOOL_LINK_MODE_MASK_NBITS);
147461 diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
147462 index 50d3c8896f91..25a55086d2b6 100644
147463 --- a/net/ethtool/netlink.c
147464 +++ b/net/ethtool/netlink.c
147465 @@ -384,7 +384,8 @@ static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev,
147466         int ret;
147468         ehdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
147469 -                          &ethtool_genl_family, 0, ctx->ops->reply_cmd);
147470 +                          &ethtool_genl_family, NLM_F_MULTI,
147471 +                          ctx->ops->reply_cmd);
147472         if (!ehdr)
147473                 return -EMSGSIZE;
147475 diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
147476 index b218e4594009..6852e9bccf5b 100644
147477 --- a/net/hsr/hsr_forward.c
147478 +++ b/net/hsr/hsr_forward.c
147479 @@ -520,6 +520,10 @@ static int fill_frame_info(struct hsr_frame_info *frame,
147480         struct ethhdr *ethhdr;
147481         __be16 proto;
147483 +       /* Check if skb contains hsr_ethhdr */
147484 +       if (skb->mac_len < sizeof(struct hsr_ethhdr))
147485 +               return -EINVAL;
147487         memset(frame, 0, sizeof(*frame));
147488         frame->is_supervision = is_supervision_frame(port->hsr, skb);
147489         frame->node_src = hsr_get_node(port, &hsr->node_db, skb,
147490 diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
147491 index 87983e70f03f..a833a7a67ce7 100644
147492 --- a/net/ipv4/Kconfig
147493 +++ b/net/ipv4/Kconfig
147494 @@ -669,6 +669,24 @@ config TCP_CONG_BBR
147495           AQM schemes that do not provide a delay signal. It requires the fq
147496           ("Fair Queue") pacing packet scheduler.
147498 +config TCP_CONG_BBR2
147499 +       tristate "BBR2 TCP"
147500 +       default n
147501 +       help
147503 +       BBR2 TCP congestion control is a model-based congestion control
147504 +       algorithm that aims to maximize network utilization, keep queues and
147505 +       retransmit rates low, and to be able to coexist with Reno/CUBIC in
147506 +       common scenarios. It builds an explicit model of the network path.  It
147507 +       tolerates a targeted degree of random packet loss and delay that are
147508 +       unrelated to congestion. It can operate over LAN, WAN, cellular, wifi,
147509 +       or cable modem links, and can use DCTCP-L4S-style ECN signals.  It can
147510 +       coexist with flows that use loss-based congestion control, and can
147511 +       operate with shallow buffers, deep buffers, bufferbloat, policers, or
147512 +       AQM schemes that do not provide a delay signal. It requires pacing,
147513 +       using either TCP internal pacing or the fq ("Fair Queue") pacing packet
147514 +       scheduler.
147516  choice
147517         prompt "Default TCP congestion control"
147518         default DEFAULT_CUBIC
147519 @@ -706,6 +724,9 @@ choice
147520         config DEFAULT_BBR
147521                 bool "BBR" if TCP_CONG_BBR=y
147523 +       config DEFAULT_BBR2
147524 +               bool "BBR2" if TCP_CONG_BBR2=y
147526         config DEFAULT_RENO
147527                 bool "Reno"
147528  endchoice
147529 @@ -730,6 +751,7 @@ config DEFAULT_TCP_CONG
147530         default "dctcp" if DEFAULT_DCTCP
147531         default "cdg" if DEFAULT_CDG
147532         default "bbr" if DEFAULT_BBR
147533 +       default "bbr2" if DEFAULT_BBR2
147534         default "cubic"
147536  config TCP_MD5SIG
147537 diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
147538 index 5b77a46885b9..8c5779dba462 100644
147539 --- a/net/ipv4/Makefile
147540 +++ b/net/ipv4/Makefile
147541 @@ -46,6 +46,7 @@ obj-$(CONFIG_INET_TCP_DIAG) += tcp_diag.o
147542  obj-$(CONFIG_INET_UDP_DIAG) += udp_diag.o
147543  obj-$(CONFIG_INET_RAW_DIAG) += raw_diag.o
147544  obj-$(CONFIG_TCP_CONG_BBR) += tcp_bbr.o
147545 +obj-$(CONFIG_TCP_CONG_BBR2) += tcp_bbr2.o
147546  obj-$(CONFIG_TCP_CONG_BIC) += tcp_bic.o
147547  obj-$(CONFIG_TCP_CONG_CDG) += tcp_cdg.o
147548  obj-$(CONFIG_TCP_CONG_CUBIC) += tcp_cubic.o
147549 diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c
147550 index d520e61649c8..22129c1c56a2 100644
147551 --- a/net/ipv4/bpf_tcp_ca.c
147552 +++ b/net/ipv4/bpf_tcp_ca.c
147553 @@ -16,7 +16,7 @@ static u32 optional_ops[] = {
147554         offsetof(struct tcp_congestion_ops, cwnd_event),
147555         offsetof(struct tcp_congestion_ops, in_ack_event),
147556         offsetof(struct tcp_congestion_ops, pkts_acked),
147557 -       offsetof(struct tcp_congestion_ops, min_tso_segs),
147558 +       offsetof(struct tcp_congestion_ops, tso_segs),
147559         offsetof(struct tcp_congestion_ops, sndbuf_expand),
147560         offsetof(struct tcp_congestion_ops, cong_control),
147562 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
147563 index bba150fdd265..d635b4f32d34 100644
147564 --- a/net/ipv4/route.c
147565 +++ b/net/ipv4/route.c
147566 @@ -66,6 +66,7 @@
147567  #include <linux/types.h>
147568  #include <linux/kernel.h>
147569  #include <linux/mm.h>
147570 +#include <linux/memblock.h>
147571  #include <linux/string.h>
147572  #include <linux/socket.h>
147573  #include <linux/sockios.h>
147574 @@ -478,8 +479,10 @@ static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
147575         __ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
147578 -#define IP_IDENTS_SZ 2048u
147580 +/* Hash tables of size 2048..262144 depending on RAM size.
147581 + * Each bucket uses 8 bytes.
147582 + */
147583 +static u32 ip_idents_mask __read_mostly;
147584  static atomic_t *ip_idents __read_mostly;
147585  static u32 *ip_tstamps __read_mostly;
147587 @@ -489,12 +492,16 @@ static u32 *ip_tstamps __read_mostly;
147588   */
147589  u32 ip_idents_reserve(u32 hash, int segs)
147591 -       u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
147592 -       atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
147593 -       u32 old = READ_ONCE(*p_tstamp);
147594 -       u32 now = (u32)jiffies;
147595 +       u32 bucket, old, now = (u32)jiffies;
147596 +       atomic_t *p_id;
147597 +       u32 *p_tstamp;
147598         u32 delta = 0;
147600 +       bucket = hash & ip_idents_mask;
147601 +       p_tstamp = ip_tstamps + bucket;
147602 +       p_id = ip_idents + bucket;
147603 +       old = READ_ONCE(*p_tstamp);
147605         if (old != now && cmpxchg(p_tstamp, old, now) == old)
147606                 delta = prandom_u32_max(now - old);
147608 @@ -3553,18 +3560,25 @@ struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
147610  int __init ip_rt_init(void)
147612 +       void *idents_hash;
147613         int cpu;
147615 -       ip_idents = kmalloc_array(IP_IDENTS_SZ, sizeof(*ip_idents),
147616 -                                 GFP_KERNEL);
147617 -       if (!ip_idents)
147618 -               panic("IP: failed to allocate ip_idents\n");
147619 +       /* For modern hosts, this will use 2 MB of memory */
147620 +       idents_hash = alloc_large_system_hash("IP idents",
147621 +                                             sizeof(*ip_idents) + sizeof(*ip_tstamps),
147622 +                                             0,
147623 +                                             16, /* one bucket per 64 KB */
147624 +                                             HASH_ZERO,
147625 +                                             NULL,
147626 +                                             &ip_idents_mask,
147627 +                                             2048,
147628 +                                             256*1024);
147630 +       ip_idents = idents_hash;
147632 -       prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
147633 +       prandom_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents));
147635 -       ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
147636 -       if (!ip_tstamps)
147637 -               panic("IP: failed to allocate ip_tstamps\n");
147638 +       ip_tstamps = idents_hash + (ip_idents_mask + 1) * sizeof(*ip_idents);
147640         for_each_possible_cpu(cpu) {
147641                 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
147642 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
147643 index de7cc8445ac0..521f310f2ac1 100644
147644 --- a/net/ipv4/tcp.c
147645 +++ b/net/ipv4/tcp.c
147646 @@ -3033,6 +3033,7 @@ int tcp_disconnect(struct sock *sk, int flags)
147647         tp->rx_opt.dsack = 0;
147648         tp->rx_opt.num_sacks = 0;
147649         tp->rcv_ooopack = 0;
147650 +       tp->fast_ack_mode = 0;
147653         /* Clean up fastopen related fields */
147654 diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
147655 index 6ea3dc2e4219..8ef512fefe25 100644
147656 --- a/net/ipv4/tcp_bbr.c
147657 +++ b/net/ipv4/tcp_bbr.c
147658 @@ -292,26 +292,40 @@ static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
147659                 sk->sk_pacing_rate = rate;
147662 -/* override sysctl_tcp_min_tso_segs */
147663  static u32 bbr_min_tso_segs(struct sock *sk)
147665         return sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2;
147668 +/* Return the number of segments BBR would like in a TSO/GSO skb, given
147669 + * a particular max gso size as a constraint.
147670 + */
147671 +static u32 bbr_tso_segs_generic(struct sock *sk, unsigned int mss_now,
147672 +                               u32 gso_max_size)
147674 +       u32 segs;
147675 +       u64 bytes;
147677 +       /* Budget a TSO/GSO burst size allowance based on bw (pacing_rate). */
147678 +       bytes = sk->sk_pacing_rate >> sk->sk_pacing_shift;
147680 +       bytes = min_t(u32, bytes, gso_max_size - 1 - MAX_TCP_HEADER);
147681 +       segs = max_t(u32, bytes / mss_now, bbr_min_tso_segs(sk));
147682 +       return segs;
147685 +/* Custom tcp_tso_autosize() for BBR, used at transmit time to cap skb size. */
147686 +static u32  bbr_tso_segs(struct sock *sk, unsigned int mss_now)
147688 +       return bbr_tso_segs_generic(sk, mss_now, sk->sk_gso_max_size);
147691 +/* Like bbr_tso_segs(), using mss_cache, ignoring driver's sk_gso_max_size. */
147692  static u32 bbr_tso_segs_goal(struct sock *sk)
147694         struct tcp_sock *tp = tcp_sk(sk);
147695 -       u32 segs, bytes;
147697 -       /* Sort of tcp_tso_autosize() but ignoring
147698 -        * driver provided sk_gso_max_size.
147699 -        */
147700 -       bytes = min_t(unsigned long,
147701 -                     sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift),
147702 -                     GSO_MAX_SIZE - 1 - MAX_TCP_HEADER);
147703 -       segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk));
147705 -       return min(segs, 0x7FU);
147706 +       return  bbr_tso_segs_generic(sk, tp->mss_cache, GSO_MAX_SIZE);
147709  /* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */
147710 @@ -1147,7 +1161,7 @@ static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = {
147711         .undo_cwnd      = bbr_undo_cwnd,
147712         .cwnd_event     = bbr_cwnd_event,
147713         .ssthresh       = bbr_ssthresh,
147714 -       .min_tso_segs   = bbr_min_tso_segs,
147715 +       .tso_segs       = bbr_tso_segs,
147716         .get_info       = bbr_get_info,
147717         .set_state      = bbr_set_state,
147719 diff --git a/net/ipv4/tcp_bbr2.c b/net/ipv4/tcp_bbr2.c
147720 new file mode 100644
147721 index 000000000000..5510adc92bbb
147722 --- /dev/null
147723 +++ b/net/ipv4/tcp_bbr2.c
147724 @@ -0,0 +1,2671 @@
147725 +/* BBR (Bottleneck Bandwidth and RTT) congestion control, v2
147727 + * BBRv2 is a model-based congestion control algorithm that aims for low
147728 + * queues, low loss, and (bounded) Reno/CUBIC coexistence. To maintain a model
147729 + * of the network path, it uses measurements of bandwidth and RTT, as well as
147730 + * (if they occur) packet loss and/or DCTCP/L4S-style ECN signals.  Note that
147731 + * although it can use ECN or loss signals explicitly, it does not require
147732 + * either; it can bound its in-flight data based on its estimate of the BDP.
147734 + * The model has both higher and lower bounds for the operating range:
147735 + *   lo: bw_lo, inflight_lo: conservative short-term lower bound
147736 + *   hi: bw_hi, inflight_hi: robust long-term upper bound
147737 + * The bandwidth-probing time scale is (a) extended dynamically based on
147738 + * estimated BDP to improve coexistence with Reno/CUBIC; (b) bounded by
147739 + * an interactive wall-clock time-scale to be more scalable and responsive
147740 + * than Reno and CUBIC.
147742 + * Here is a state transition diagram for BBR:
147744 + *             |
147745 + *             V
147746 + *    +---> STARTUP  ----+
147747 + *    |        |         |
147748 + *    |        V         |
147749 + *    |      DRAIN   ----+
147750 + *    |        |         |
147751 + *    |        V         |
147752 + *    +---> PROBE_BW ----+
147753 + *    |      ^    |      |
147754 + *    |      |    |      |
147755 + *    |      +----+      |
147756 + *    |                  |
147757 + *    +---- PROBE_RTT <--+
147759 + * A BBR flow starts in STARTUP, and ramps up its sending rate quickly.
147760 + * When it estimates the pipe is full, it enters DRAIN to drain the queue.
147761 + * In steady state a BBR flow only uses PROBE_BW and PROBE_RTT.
147762 + * A long-lived BBR flow spends the vast majority of its time remaining
147763 + * (repeatedly) in PROBE_BW, fully probing and utilizing the pipe's bandwidth
147764 + * in a fair manner, with a small, bounded queue. *If* a flow has been
147765 + * continuously sending for the entire min_rtt window, and hasn't seen an RTT
147766 + * sample that matches or decreases its min_rtt estimate for 10 seconds, then
147767 + * it briefly enters PROBE_RTT to cut inflight to a minimum value to re-probe
147768 + * the path's two-way propagation delay (min_rtt). When exiting PROBE_RTT, if
147769 + * we estimated that we reached the full bw of the pipe then we enter PROBE_BW;
147770 + * otherwise we enter STARTUP to try to fill the pipe.
147772 + * BBR is described in detail in:
147773 + *   "BBR: Congestion-Based Congestion Control",
147774 + *   Neal Cardwell, Yuchung Cheng, C. Stephen Gunn, Soheil Hassas Yeganeh,
147775 + *   Van Jacobson. ACM Queue, Vol. 14 No. 5, September-October 2016.
147777 + * There is a public e-mail list for discussing BBR development and testing:
147778 + *   https://groups.google.com/forum/#!forum/bbr-dev
147780 + * NOTE: BBR might be used with the fq qdisc ("man tc-fq") with pacing enabled,
147781 + * otherwise TCP stack falls back to an internal pacing using one high
147782 + * resolution timer per TCP socket and may use more resources.
147783 + */
147784 +#include <linux/module.h>
147785 +#include <net/tcp.h>
147786 +#include <linux/inet_diag.h>
147787 +#include <linux/inet.h>
147788 +#include <linux/random.h>
147790 +#include "tcp_dctcp.h"
147792 +/* Scale factor for rate in pkt/uSec unit to avoid truncation in bandwidth
147793 + * estimation. The rate unit ~= (1500 bytes / 1 usec / 2^24) ~= 715 bps.
147794 + * This handles bandwidths from 0.06pps (715bps) to 256Mpps (3Tbps) in a u32.
147795 + * Since the minimum window is >=4 packets, the lower bound isn't
147796 + * an issue. The upper bound isn't an issue with existing technologies.
147797 + */
147798 +#define BW_SCALE 24
147799 +#define BW_UNIT (1 << BW_SCALE)
147801 +#define BBR_SCALE 8    /* scaling factor for fractions in BBR (e.g. gains) */
147802 +#define BBR_UNIT (1 << BBR_SCALE)
147804 +#define FLAG_DEBUG_VERBOSE     0x1     /* Verbose debugging messages */
147805 +#define FLAG_DEBUG_LOOPBACK    0x2     /* Do NOT skip loopback addr */
147807 +#define CYCLE_LEN              8       /* number of phases in a pacing gain cycle */
147809 +/* BBR has the following modes for deciding how fast to send: */
147810 +enum bbr_mode {
147811 +       BBR_STARTUP,    /* ramp up sending rate rapidly to fill pipe */
147812 +       BBR_DRAIN,      /* drain any queue created during startup */
147813 +       BBR_PROBE_BW,   /* discover, share bw: pace around estimated bw */
147814 +       BBR_PROBE_RTT,  /* cut inflight to min to probe min_rtt */
147817 +/* How does the incoming ACK stream relate to our bandwidth probing? */
147818 +enum bbr_ack_phase {
147819 +       BBR_ACKS_INIT,            /* not probing; not getting probe feedback */
147820 +       BBR_ACKS_REFILLING,       /* sending at est. bw to fill pipe */
147821 +       BBR_ACKS_PROBE_STARTING,  /* inflight rising to probe bw */
147822 +       BBR_ACKS_PROBE_FEEDBACK,  /* getting feedback from bw probing */
147823 +       BBR_ACKS_PROBE_STOPPING,  /* stopped probing; still getting feedback */
147826 +/* BBR congestion control block */
147827 +struct bbr {
147828 +       u32     min_rtt_us;             /* min RTT in min_rtt_win_sec window */
147829 +       u32     min_rtt_stamp;          /* timestamp of min_rtt_us */
147830 +       u32     probe_rtt_done_stamp;   /* end time for BBR_PROBE_RTT mode */
147831 +       u32     probe_rtt_min_us;       /* min RTT in bbr_probe_rtt_win_ms window */
147832 +       u32     probe_rtt_min_stamp;    /* timestamp of probe_rtt_min_us*/
147833 +       u32     next_rtt_delivered; /* scb->tx.delivered at end of round */
147834 +       u32     prior_rcv_nxt;  /* tp->rcv_nxt when CE state last changed */
147835 +       u64     cycle_mstamp;        /* time of this cycle phase start */
147836 +       u32     mode:3,              /* current bbr_mode in state machine */
147837 +               prev_ca_state:3,     /* CA state on previous ACK */
147838 +               packet_conservation:1,  /* use packet conservation? */
147839 +               round_start:1,       /* start of packet-timed tx->ack round? */
147840 +               ce_state:1,          /* If most recent data has CE bit set */
147841 +               bw_probe_up_rounds:5,   /* cwnd-limited rounds in PROBE_UP */
147842 +               try_fast_path:1,        /* can we take fast path? */
147843 +               unused2:11,
147844 +               idle_restart:1,      /* restarting after idle? */
147845 +               probe_rtt_round_done:1,  /* a BBR_PROBE_RTT round at 4 pkts? */
147846 +               cycle_idx:3,    /* current index in pacing_gain cycle array */
147847 +               has_seen_rtt:1;      /* have we seen an RTT sample yet? */
147848 +       u32     pacing_gain:11, /* current gain for setting pacing rate */
147849 +               cwnd_gain:11,   /* current gain for setting cwnd */
147850 +               full_bw_reached:1,   /* reached full bw in Startup? */
147851 +               full_bw_cnt:2,  /* number of rounds without large bw gains */
147852 +               init_cwnd:7;    /* initial cwnd */
147853 +       u32     prior_cwnd;     /* prior cwnd upon entering loss recovery */
147854 +       u32     full_bw;        /* recent bw, to estimate if pipe is full */
147856 +       /* For tracking ACK aggregation: */
147857 +       u64     ack_epoch_mstamp;       /* start of ACK sampling epoch */
147858 +       u16     extra_acked[2];         /* max excess data ACKed in epoch */
147859 +       u32     ack_epoch_acked:20,     /* packets (S)ACKed in sampling epoch */
147860 +               extra_acked_win_rtts:5, /* age of extra_acked, in round trips */
147861 +               extra_acked_win_idx:1,  /* current index in extra_acked array */
147862 +       /* BBR v2 state: */
147863 +               unused1:2,
147864 +               startup_ecn_rounds:2,   /* consecutive hi ECN STARTUP rounds */
147865 +               loss_in_cycle:1,        /* packet loss in this cycle? */
147866 +               ecn_in_cycle:1;         /* ECN in this cycle? */
147867 +       u32     loss_round_delivered; /* scb->tx.delivered ending loss round */
147868 +       u32     undo_bw_lo;          /* bw_lo before latest losses */
147869 +       u32     undo_inflight_lo;    /* inflight_lo before latest losses */
147870 +       u32     undo_inflight_hi;    /* inflight_hi before latest losses */
147871 +       u32     bw_latest;       /* max delivered bw in last round trip */
147872 +       u32     bw_lo;           /* lower bound on sending bandwidth */
147873 +       u32     bw_hi[2];        /* upper bound of sending bandwidth range*/
147874 +       u32     inflight_latest; /* max delivered data in last round trip */
147875 +       u32     inflight_lo;     /* lower bound of inflight data range */
147876 +       u32     inflight_hi;     /* upper bound of inflight data range */
147877 +       u32     bw_probe_up_cnt; /* packets delivered per inflight_hi incr */
147878 +       u32     bw_probe_up_acks;  /* packets (S)ACKed since inflight_hi incr */
147879 +       u32     probe_wait_us;   /* PROBE_DOWN until next clock-driven probe */
147880 +       u32     ecn_eligible:1, /* sender can use ECN (RTT, handshake)? */
147881 +               ecn_alpha:9,    /* EWMA delivered_ce/delivered; 0..256 */
147882 +               bw_probe_samples:1,    /* rate samples reflect bw probing? */
147883 +               prev_probe_too_high:1, /* did last PROBE_UP go too high? */
147884 +               stopped_risky_probe:1, /* last PROBE_UP stopped due to risk? */
147885 +               rounds_since_probe:8,  /* packet-timed rounds since probed bw */
147886 +               loss_round_start:1,    /* loss_round_delivered round trip? */
147887 +               loss_in_round:1,       /* loss marked in this round trip? */
147888 +               ecn_in_round:1,        /* ECN marked in this round trip? */
147889 +               ack_phase:3,           /* bbr_ack_phase: meaning of ACKs */
147890 +               loss_events_in_round:4,/* losses in STARTUP round */
147891 +               initialized:1;         /* has bbr_init() been called? */
147892 +       u32     alpha_last_delivered;    /* tp->delivered    at alpha update */
147893 +       u32     alpha_last_delivered_ce; /* tp->delivered_ce at alpha update */
147895 +       /* Params configurable using setsockopt. Refer to correspoding
147896 +        * module param for detailed description of params.
147897 +        */
147898 +       struct bbr_params {
147899 +               u32     high_gain:11,           /* max allowed value: 2047 */
147900 +                       drain_gain:10,          /* max allowed value: 1023 */
147901 +                       cwnd_gain:11;           /* max allowed value: 2047 */
147902 +               u32     cwnd_min_target:4,      /* max allowed value: 15 */
147903 +                       min_rtt_win_sec:5,      /* max allowed value: 31 */
147904 +                       probe_rtt_mode_ms:9,    /* max allowed value: 511 */
147905 +                       full_bw_cnt:3,          /* max allowed value: 7 */
147906 +                       cwnd_tso_budget:1,      /* allowed values: {0, 1} */
147907 +                       unused3:6,
147908 +                       drain_to_target:1,      /* boolean */
147909 +                       precise_ece_ack:1,      /* boolean */
147910 +                       extra_acked_in_startup:1, /* allowed values: {0, 1} */
147911 +                       fast_path:1;            /* boolean */
147912 +               u32     full_bw_thresh:10,      /* max allowed value: 1023 */
147913 +                       startup_cwnd_gain:11,   /* max allowed value: 2047 */
147914 +                       bw_probe_pif_gain:9,    /* max allowed value: 511 */
147915 +                       usage_based_cwnd:1,     /* boolean */
147916 +                       unused2:1;
147917 +               u16     probe_rtt_win_ms:14,    /* max allowed value: 16383 */
147918 +                       refill_add_inc:2;       /* max allowed value: 3 */
147919 +               u16     extra_acked_gain:11,    /* max allowed value: 2047 */
147920 +                       extra_acked_win_rtts:5; /* max allowed value: 31*/
147921 +               u16     pacing_gain[CYCLE_LEN]; /* max allowed value: 1023 */
147922 +               /* Mostly BBR v2 parameters below here: */
147923 +               u32     ecn_alpha_gain:8,       /* max allowed value: 255 */
147924 +                       ecn_factor:8,           /* max allowed value: 255 */
147925 +                       ecn_thresh:8,           /* max allowed value: 255 */
147926 +                       beta:8;                 /* max allowed value: 255 */
147927 +               u32     ecn_max_rtt_us:19,      /* max allowed value: 524287 */
147928 +                       bw_probe_reno_gain:9,   /* max allowed value: 511 */
147929 +                       full_loss_cnt:4;        /* max allowed value: 15 */
147930 +               u32     probe_rtt_cwnd_gain:8,  /* max allowed value: 255 */
147931 +                       inflight_headroom:8,    /* max allowed value: 255 */
147932 +                       loss_thresh:8,          /* max allowed value: 255 */
147933 +                       bw_probe_max_rounds:8;  /* max allowed value: 255 */
147934 +               u32     bw_probe_rand_rounds:4, /* max allowed value: 15 */
147935 +                       bw_probe_base_us:26,    /* usecs: 0..2^26-1 (67 secs) */
147936 +                       full_ecn_cnt:2;         /* max allowed value: 3 */
147937 +               u32     bw_probe_rand_us:26,    /* usecs: 0..2^26-1 (67 secs) */
147938 +                       undo:1,                 /* boolean */
147939 +                       tso_rtt_shift:4,        /* max allowed value: 15 */
147940 +                       unused5:1;
147941 +               u32     ecn_reprobe_gain:9,     /* max allowed value: 511 */
147942 +                       unused1:14,
147943 +                       ecn_alpha_init:9;       /* max allowed value: 256 */
147944 +       } params;
147946 +       struct {
147947 +               u32     snd_isn; /* Initial sequence number */
147948 +               u32     rs_bw;   /* last valid rate sample bw */
147949 +               u32     target_cwnd; /* target cwnd, based on BDP */
147950 +               u8      undo:1,  /* Undo even happened but not yet logged */
147951 +                       unused:7;
147952 +               char    event;   /* single-letter event debug codes */
147953 +               u16     unused2;
147954 +       } debug;
147957 +struct bbr_context {
147958 +       u32 sample_bw;
147959 +       u32 target_cwnd;
147960 +       u32 log:1;
147963 +/* Window length of min_rtt filter (in sec). Max allowed value is 31 (0x1F) */
147964 +static u32 bbr_min_rtt_win_sec = 10;
147965 +/* Minimum time (in ms) spent at bbr_cwnd_min_target in BBR_PROBE_RTT mode.
147966 + * Max allowed value is 511 (0x1FF).
147967 + */
147968 +static u32 bbr_probe_rtt_mode_ms = 200;
147969 +/* Window length of probe_rtt_min_us filter (in ms), and consequently the
147970 + * typical interval between PROBE_RTT mode entries.
147971 + * Note that bbr_probe_rtt_win_ms must be <= bbr_min_rtt_win_sec * MSEC_PER_SEC
147972 + */
147973 +static u32 bbr_probe_rtt_win_ms = 5000;
147974 +/* Skip TSO below the following bandwidth (bits/sec): */
147975 +static int bbr_min_tso_rate = 1200000;
147977 +/* Use min_rtt to help adapt TSO burst size, with smaller min_rtt resulting
147978 + * in bigger TSO bursts. By default we cut the RTT-based allowance in half
147979 + * for every 2^9 usec (aka 512 us) of RTT, so that the RTT-based allowance
147980 + * is below 1500 bytes after 6 * ~500 usec = 3ms.
147981 + */
147982 +static u32 bbr_tso_rtt_shift = 9;  /* halve allowance per 2^9 usecs, 512us */
147984 +/* Select cwnd TSO budget approach:
147985 + *  0: padding
147986 + *  1: flooring
147987 + */
147988 +static uint bbr_cwnd_tso_budget = 1;
147990 +/* Pace at ~1% below estimated bw, on average, to reduce queue at bottleneck.
147991 + * In order to help drive the network toward lower queues and low latency while
147992 + * maintaining high utilization, the average pacing rate aims to be slightly
147993 + * lower than the estimated bandwidth. This is an important aspect of the
147994 + * design.
147995 + */
147996 +static const int bbr_pacing_margin_percent = 1;
147998 +/* We use a high_gain value of 2/ln(2) because it's the smallest pacing gain
147999 + * that will allow a smoothly increasing pacing rate that will double each RTT
148000 + * and send the same number of packets per RTT that an un-paced, slow-starting
148001 + * Reno or CUBIC flow would. Max allowed value is 2047 (0x7FF).
148002 + */
148003 +static int bbr_high_gain  = BBR_UNIT * 2885 / 1000 + 1;
148004 +/* The gain for deriving startup cwnd. Max allowed value is 2047 (0x7FF). */
148005 +static int bbr_startup_cwnd_gain  = BBR_UNIT * 2885 / 1000 + 1;
148006 +/* The pacing gain of 1/high_gain in BBR_DRAIN is calculated to typically drain
148007 + * the queue created in BBR_STARTUP in a single round. Max allowed value
148008 + * is 1023 (0x3FF).
148009 + */
148010 +static int bbr_drain_gain = BBR_UNIT * 1000 / 2885;
148011 +/* The gain for deriving steady-state cwnd tolerates delayed/stretched ACKs.
148012 + * Max allowed value is 2047 (0x7FF).
148013 + */
148014 +static int bbr_cwnd_gain  = BBR_UNIT * 2;
148015 +/* The pacing_gain values for the PROBE_BW gain cycle, to discover/share bw.
148016 + * Max allowed value for each element is 1023 (0x3FF).
148017 + */
148018 +enum bbr_pacing_gain_phase {
148019 +       BBR_BW_PROBE_UP         = 0,  /* push up inflight to probe for bw/vol */
148020 +       BBR_BW_PROBE_DOWN       = 1,  /* drain excess inflight from the queue */
148021 +       BBR_BW_PROBE_CRUISE     = 2,  /* use pipe, w/ headroom in queue/pipe */
148022 +       BBR_BW_PROBE_REFILL     = 3,  /* v2: refill the pipe again to 100% */
148024 +static int bbr_pacing_gain[] = {
148025 +       BBR_UNIT * 5 / 4,       /* probe for more available bw */
148026 +       BBR_UNIT * 3 / 4,       /* drain queue and/or yield bw to other flows */
148027 +       BBR_UNIT, BBR_UNIT, BBR_UNIT,   /* cruise at 1.0*bw to utilize pipe, */
148028 +       BBR_UNIT, BBR_UNIT, BBR_UNIT    /* without creating excess queue... */
148031 +/* Try to keep at least this many packets in flight, if things go smoothly. For
148032 + * smooth functioning, a sliding window protocol ACKing every other packet
148033 + * needs at least 4 packets in flight. Max allowed value is 15 (0xF).
148034 + */
148035 +static u32 bbr_cwnd_min_target = 4;
148037 +/* Cwnd to BDP proportion in PROBE_RTT mode scaled by BBR_UNIT. Default: 50%.
148038 + * Use 0 to disable. Max allowed value is 255.
148039 + */
148040 +static u32 bbr_probe_rtt_cwnd_gain = BBR_UNIT * 1 / 2;
148042 +/* To estimate if BBR_STARTUP mode (i.e. high_gain) has filled pipe... */
148043 +/* If bw has increased significantly (1.25x), there may be more bw available.
148044 + * Max allowed value is 1023 (0x3FF).
148045 + */
148046 +static u32 bbr_full_bw_thresh = BBR_UNIT * 5 / 4;
148047 +/* But after 3 rounds w/o significant bw growth, estimate pipe is full.
148048 + * Max allowed value is 7 (0x7).
148049 + */
148050 +static u32 bbr_full_bw_cnt = 3;
148052 +static u32 bbr_flags;          /* Debugging related stuff */
148054 +/* Whether to debug using printk.
148055 + */
148056 +static bool bbr_debug_with_printk;
148058 +/* Whether to debug using ftrace event tcp:tcp_bbr_event.
148059 + * Ignored when bbr_debug_with_printk is set.
148060 + */
148061 +static bool bbr_debug_ftrace;
148063 +/* Experiment: each cycle, try to hold sub-unity gain until inflight <= BDP. */
148064 +static bool bbr_drain_to_target = true;                /* default: enabled */
148066 +/* Experiment: Flags to control BBR with ECN behavior.
148067 + */
148068 +static bool bbr_precise_ece_ack = true;                /* default: enabled */
148070 +/* The max rwin scaling shift factor is 14 (RFC 1323), so the max sane rwin is
148071 + * (2^(16+14) B)/(1024 B/packet) = 1M packets.
148072 + */
148073 +static u32 bbr_cwnd_warn_val   = 1U << 20;
148075 +static u16 bbr_debug_port_mask;
148077 +/* BBR module parameters. These are module parameters only in Google prod.
148078 + * Upstream these are intentionally not module parameters.
148079 + */
148080 +static int bbr_pacing_gain_size = CYCLE_LEN;
148082 +/* Gain factor for adding extra_acked to target cwnd: */
148083 +static int bbr_extra_acked_gain = 256;
148085 +/* Window length of extra_acked window. Max allowed val is 31. */
148086 +static u32 bbr_extra_acked_win_rtts = 5;
148088 +/* Max allowed val for ack_epoch_acked, after which sampling epoch is reset */
148089 +static u32 bbr_ack_epoch_acked_reset_thresh = 1U << 20;
148091 +/* Time period for clamping cwnd increment due to ack aggregation */
148092 +static u32 bbr_extra_acked_max_us = 100 * 1000;
148094 +/* Use extra acked in startup ?
148095 + * 0: disabled
148096 + * 1: use latest extra_acked value from 1-2 rtt in startup
148097 + */
148098 +static int bbr_extra_acked_in_startup = 1;             /* default: enabled */
148100 +/* Experiment: don't grow cwnd beyond twice of what we just probed. */
148101 +static bool bbr_usage_based_cwnd;              /* default: disabled */
148103 +/* For lab testing, researchers can enable BBRv2 ECN support with this flag,
148104 + * when they know that any ECN marks that the connections experience will be
148105 + * DCTCP/L4S-style ECN marks, rather than RFC3168 ECN marks.
148106 + * TODO(ncardwell): Production use of the BBRv2 ECN functionality depends on
148107 + * negotiation or configuration that is outside the scope of the BBRv2
148108 + * alpha release.
148109 + */
148110 +static bool bbr_ecn_enable = false;
148112 +module_param_named(min_tso_rate,      bbr_min_tso_rate,      int,    0644);
148113 +module_param_named(tso_rtt_shift,     bbr_tso_rtt_shift,     int,    0644);
148114 +module_param_named(high_gain,         bbr_high_gain,         int,    0644);
148115 +module_param_named(drain_gain,        bbr_drain_gain,        int,    0644);
148116 +module_param_named(startup_cwnd_gain, bbr_startup_cwnd_gain, int,    0644);
148117 +module_param_named(cwnd_gain,         bbr_cwnd_gain,         int,    0644);
148118 +module_param_array_named(pacing_gain, bbr_pacing_gain,       int,
148119 +                        &bbr_pacing_gain_size, 0644);
148120 +module_param_named(cwnd_min_target,   bbr_cwnd_min_target,   uint,   0644);
148121 +module_param_named(probe_rtt_cwnd_gain,
148122 +                  bbr_probe_rtt_cwnd_gain,                  uint,   0664);
148123 +module_param_named(cwnd_warn_val,     bbr_cwnd_warn_val,     uint,   0664);
148124 +module_param_named(debug_port_mask,   bbr_debug_port_mask,   ushort, 0644);
148125 +module_param_named(flags,             bbr_flags,             uint,   0644);
148126 +module_param_named(debug_ftrace,      bbr_debug_ftrace, bool,   0644);
148127 +module_param_named(debug_with_printk, bbr_debug_with_printk, bool,   0644);
148128 +module_param_named(min_rtt_win_sec,   bbr_min_rtt_win_sec,   uint,   0644);
148129 +module_param_named(probe_rtt_mode_ms, bbr_probe_rtt_mode_ms, uint,   0644);
148130 +module_param_named(probe_rtt_win_ms,  bbr_probe_rtt_win_ms,  uint,   0644);
148131 +module_param_named(full_bw_thresh,    bbr_full_bw_thresh,    uint,   0644);
148132 +module_param_named(full_bw_cnt,       bbr_full_bw_cnt,       uint,   0644);
148133 +module_param_named(cwnd_tso_bduget,   bbr_cwnd_tso_budget,   uint,   0664);
148134 +module_param_named(extra_acked_gain,  bbr_extra_acked_gain,  int,    0664);
148135 +module_param_named(extra_acked_win_rtts,
148136 +                  bbr_extra_acked_win_rtts, uint,   0664);
148137 +module_param_named(extra_acked_max_us,
148138 +                  bbr_extra_acked_max_us, uint,   0664);
148139 +module_param_named(ack_epoch_acked_reset_thresh,
148140 +                  bbr_ack_epoch_acked_reset_thresh, uint,   0664);
148141 +module_param_named(drain_to_target,   bbr_drain_to_target,   bool,   0664);
148142 +module_param_named(precise_ece_ack,   bbr_precise_ece_ack,   bool,   0664);
148143 +module_param_named(extra_acked_in_startup,
148144 +                  bbr_extra_acked_in_startup, int, 0664);
148145 +module_param_named(usage_based_cwnd, bbr_usage_based_cwnd, bool,   0664);
148146 +module_param_named(ecn_enable,       bbr_ecn_enable,         bool,   0664);
148148 +static void bbr2_exit_probe_rtt(struct sock *sk);
148149 +static void bbr2_reset_congestion_signals(struct sock *sk);
148151 +static void bbr_check_probe_rtt_done(struct sock *sk);
148153 +/* Do we estimate that STARTUP filled the pipe? */
148154 +static bool bbr_full_bw_reached(const struct sock *sk)
148156 +       const struct bbr *bbr = inet_csk_ca(sk);
148158 +       return bbr->full_bw_reached;
148161 +/* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */
148162 +static u32 bbr_max_bw(const struct sock *sk)
148164 +       struct bbr *bbr = inet_csk_ca(sk);
148166 +       return max(bbr->bw_hi[0], bbr->bw_hi[1]);
148169 +/* Return the estimated bandwidth of the path, in pkts/uS << BW_SCALE. */
148170 +static u32 bbr_bw(const struct sock *sk)
148172 +       struct bbr *bbr = inet_csk_ca(sk);
148174 +       return min(bbr_max_bw(sk), bbr->bw_lo);
148177 +/* Return maximum extra acked in past k-2k round trips,
148178 + * where k = bbr_extra_acked_win_rtts.
148179 + */
148180 +static u16 bbr_extra_acked(const struct sock *sk)
148182 +       struct bbr *bbr = inet_csk_ca(sk);
148184 +       return max(bbr->extra_acked[0], bbr->extra_acked[1]);
148187 +/* Return rate in bytes per second, optionally with a gain.
148188 + * The order here is chosen carefully to avoid overflow of u64. This should
148189 + * work for input rates of up to 2.9Tbit/sec and gain of 2.89x.
148190 + */
148191 +static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain,
148192 +                                 int margin)
148194 +       unsigned int mss = tcp_sk(sk)->mss_cache;
148196 +       rate *= mss;
148197 +       rate *= gain;
148198 +       rate >>= BBR_SCALE;
148199 +       rate *= USEC_PER_SEC / 100 * (100 - margin);
148200 +       rate >>= BW_SCALE;
148201 +       rate = max(rate, 1ULL);
148202 +       return rate;
148205 +static u64 bbr_bw_bytes_per_sec(struct sock *sk, u64 rate)
148207 +       return bbr_rate_bytes_per_sec(sk, rate, BBR_UNIT, 0);
148210 +static u64 bbr_rate_kbps(struct sock *sk, u64 rate)
148212 +       rate = bbr_bw_bytes_per_sec(sk, rate);
148213 +       rate *= 8;
148214 +       do_div(rate, 1000);
148215 +       return rate;
148218 +static u32 bbr_tso_segs_goal(struct sock *sk);
148219 +static void bbr_debug(struct sock *sk, u32 acked,
148220 +                     const struct rate_sample *rs, struct bbr_context *ctx)
148222 +       static const char ca_states[] = {
148223 +               [TCP_CA_Open]           = 'O',
148224 +               [TCP_CA_Disorder]       = 'D',
148225 +               [TCP_CA_CWR]            = 'C',
148226 +               [TCP_CA_Recovery]       = 'R',
148227 +               [TCP_CA_Loss]           = 'L',
148228 +       };
148229 +       static const char mode[] = {
148230 +               'G',  /* Growing   - BBR_STARTUP */
148231 +               'D',  /* Drain     - BBR_DRAIN */
148232 +               'W',  /* Window    - BBR_PROBE_BW */
148233 +               'M',  /* Min RTT   - BBR_PROBE_RTT */
148234 +       };
148235 +       static const char ack_phase[] = { /* bbr_ack_phase strings */
148236 +               'I',    /* BBR_ACKS_INIT           - 'Init' */
148237 +               'R',    /* BBR_ACKS_REFILLING      - 'Refilling' */
148238 +               'B',    /* BBR_ACKS_PROBE_STARTING - 'Before' */
148239 +               'F',    /* BBR_ACKS_PROBE_FEEDBACK - 'Feedback' */
148240 +               'A',    /* BBR_ACKS_PROBE_STOPPING - 'After' */
148241 +       };
148242 +       struct tcp_sock *tp = tcp_sk(sk);
148243 +       struct bbr *bbr = inet_csk_ca(sk);
148244 +       const u32 una = tp->snd_una - bbr->debug.snd_isn;
148245 +       const u32 fack = tcp_highest_sack_seq(tp);
148246 +       const u16 dport = ntohs(inet_sk(sk)->inet_dport);
148247 +       bool is_port_match = (bbr_debug_port_mask &&
148248 +                             ((dport & bbr_debug_port_mask) == 0));
148249 +       char debugmsg[320];
148251 +       if (sk->sk_state == TCP_SYN_SENT)
148252 +               return;  /* no bbr_init() yet if SYN retransmit -> CA_Loss */
148254 +       if (!tp->snd_cwnd || tp->snd_cwnd > bbr_cwnd_warn_val) {
148255 +               char addr[INET6_ADDRSTRLEN + 10] = { 0 };
148257 +               if (sk->sk_family == AF_INET)
148258 +                       snprintf(addr, sizeof(addr), "%pI4:%u",
148259 +                                &inet_sk(sk)->inet_daddr, dport);
148260 +               else if (sk->sk_family == AF_INET6)
148261 +                       snprintf(addr, sizeof(addr), "%pI6:%u",
148262 +                                &sk->sk_v6_daddr, dport);
148264 +               WARN_ONCE(1,
148265 +                       "BBR %s cwnd alert: %u "
148266 +                       "snd_una: %u ca: %d pacing_gain: %u cwnd_gain: %u "
148267 +                       "bw: %u rtt: %u min_rtt: %u "
148268 +                       "acked: %u tso_segs: %u "
148269 +                       "bw: %d %ld %d pif: %u\n",
148270 +                       addr, tp->snd_cwnd,
148271 +                       una, inet_csk(sk)->icsk_ca_state,
148272 +                       bbr->pacing_gain, bbr->cwnd_gain,
148273 +                       bbr_max_bw(sk), (tp->srtt_us >> 3), bbr->min_rtt_us,
148274 +                       acked, bbr_tso_segs_goal(sk),
148275 +                       rs->delivered, rs->interval_us, rs->is_retrans,
148276 +                       tcp_packets_in_flight(tp));
148277 +       }
148279 +       if (likely(!bbr_debug_with_printk && !bbr_debug_ftrace))
148280 +               return;
148282 +       if (!sock_flag(sk, SOCK_DBG) && !is_port_match)
148283 +               return;
148285 +       if (!ctx->log && !tp->app_limited && !(bbr_flags & FLAG_DEBUG_VERBOSE))
148286 +               return;
148288 +       if (ipv4_is_loopback(inet_sk(sk)->inet_daddr) &&
148289 +           !(bbr_flags & FLAG_DEBUG_LOOPBACK))
148290 +               return;
148292 +       snprintf(debugmsg, sizeof(debugmsg) - 1,
148293 +                "BBR %pI4:%-5u %5u,%03u:%-7u %c "
148294 +                "%c %2u br %2u cr %2d rtt %5ld d %2d i %5ld mrtt %d %cbw %llu "
148295 +                "bw %llu lb %llu ib %llu qb %llu "
148296 +                "a %u if %2u %c %c dl %u l %u al %u # %u t %u %c %c "
148297 +                "lr %d er %d ea %d bwl %lld il %d ih %d c %d "
148298 +                "v %d %c %u %c %s\n",
148299 +                &inet_sk(sk)->inet_daddr, dport,
148300 +                una / 1000, una % 1000, fack - tp->snd_una,
148301 +                ca_states[inet_csk(sk)->icsk_ca_state],
148302 +                bbr->debug.undo ? '@' : mode[bbr->mode],
148303 +                tp->snd_cwnd,
148304 +                bbr_extra_acked(sk),   /* br (legacy): extra_acked */
148305 +                rs->tx_in_flight,      /* cr (legacy): tx_inflight */
148306 +                rs->rtt_us,
148307 +                rs->delivered,
148308 +                rs->interval_us,
148309 +                bbr->min_rtt_us,
148310 +                rs->is_app_limited ? '_' : 'l',
148311 +                bbr_rate_kbps(sk, ctx->sample_bw), /* lbw: latest sample bw */
148312 +                bbr_rate_kbps(sk, bbr_max_bw(sk)), /* bw: max bw */
148313 +                0ULL,                              /* lb: [obsolete] */
148314 +                0ULL,                              /* ib: [obsolete] */
148315 +                (u64)sk->sk_pacing_rate * 8 / 1000,
148316 +                acked,
148317 +                tcp_packets_in_flight(tp),
148318 +                rs->is_ack_delayed ? 'd' : '.',
148319 +                bbr->round_start ? '*' : '.',
148320 +                tp->delivered, tp->lost,
148321 +                tp->app_limited,
148322 +                0,                                 /* #: [obsolete] */
148323 +                ctx->target_cwnd,
148324 +                tp->reord_seen ? 'r' : '.',  /* r: reordering seen? */
148325 +                ca_states[bbr->prev_ca_state],
148326 +                (rs->lost + rs->delivered) > 0 ?
148327 +                (1000 * rs->lost /
148328 +                 (rs->lost + rs->delivered)) : 0,    /* lr: loss rate x1000 */
148329 +                (rs->delivered) > 0 ?
148330 +                (1000 * rs->delivered_ce /
148331 +                 (rs->delivered)) : 0,               /* er: ECN rate x1000 */
148332 +                1000 * bbr->ecn_alpha >> BBR_SCALE,  /* ea: ECN alpha x1000 */
148333 +                bbr->bw_lo == ~0U ?
148334 +                  -1 : (s64)bbr_rate_kbps(sk, bbr->bw_lo), /* bwl */
148335 +                bbr->inflight_lo,      /* il */
148336 +                bbr->inflight_hi,      /* ih */
148337 +                bbr->bw_probe_up_cnt,  /* c */
148338 +                2,                     /* v: version */
148339 +                bbr->debug.event,
148340 +                bbr->cycle_idx,
148341 +                ack_phase[bbr->ack_phase],
148342 +                bbr->bw_probe_samples ? "Y" : "N");
148343 +       debugmsg[sizeof(debugmsg) - 1] = 0;
148345 +       /* printk takes a higher precedence. */
148346 +       if (bbr_debug_with_printk)
148347 +               printk(KERN_DEBUG "%s", debugmsg);
148349 +       if (unlikely(bbr->debug.undo))
148350 +               bbr->debug.undo = 0;
148353 +/* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */
148354 +static unsigned long bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
148356 +       u64 rate = bw;
148358 +       rate = bbr_rate_bytes_per_sec(sk, rate, gain,
148359 +                                     bbr_pacing_margin_percent);
148360 +       rate = min_t(u64, rate, sk->sk_max_pacing_rate);
148361 +       return rate;
148364 +/* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
148365 +static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
148367 +       struct tcp_sock *tp = tcp_sk(sk);
148368 +       struct bbr *bbr = inet_csk_ca(sk);
148369 +       u64 bw;
148370 +       u32 rtt_us;
148372 +       if (tp->srtt_us) {              /* any RTT sample yet? */
148373 +               rtt_us = max(tp->srtt_us >> 3, 1U);
148374 +               bbr->has_seen_rtt = 1;
148375 +       } else {                         /* no RTT sample yet */
148376 +               rtt_us = USEC_PER_MSEC;  /* use nominal default RTT */
148377 +       }
148378 +       bw = (u64)tp->snd_cwnd * BW_UNIT;
148379 +       do_div(bw, rtt_us);
148380 +       sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr->params.high_gain);
148383 +/* Pace using current bw estimate and a gain factor. */
148384 +static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
148386 +       struct tcp_sock *tp = tcp_sk(sk);
148387 +       struct bbr *bbr = inet_csk_ca(sk);
148388 +       unsigned long rate = bbr_bw_to_pacing_rate(sk, bw, gain);
148390 +       if (unlikely(!bbr->has_seen_rtt && tp->srtt_us))
148391 +               bbr_init_pacing_rate_from_rtt(sk);
148392 +       if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate)
148393 +               sk->sk_pacing_rate = rate;
148396 +static u32 bbr_min_tso_segs(struct sock *sk)
148398 +       return sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2;
148401 +/* Return the number of segments BBR would like in a TSO/GSO skb, given
148402 + * a particular max gso size as a constraint.
148403 + */
148404 +static u32 bbr_tso_segs_generic(struct sock *sk, unsigned int mss_now,
148405 +                               u32 gso_max_size)
148407 +       struct bbr *bbr = inet_csk_ca(sk);
148408 +       u32 segs, r;
148409 +       u64 bytes;
148411 +       /* Budget a TSO/GSO burst size allowance based on bw (pacing_rate). */
148412 +       bytes = sk->sk_pacing_rate >> sk->sk_pacing_shift;
148414 +       /* Budget a TSO/GSO burst size allowance based on min_rtt. For every
148415 +        * K = 2^tso_rtt_shift microseconds of min_rtt, halve the burst.
148416 +        * The min_rtt-based burst allowance is: 64 KBytes / 2^(min_rtt/K)
148417 +        */
148418 +       if (bbr->params.tso_rtt_shift) {
148419 +               r = bbr->min_rtt_us >> bbr->params.tso_rtt_shift;
148420 +               if (r < BITS_PER_TYPE(u32))   /* prevent undefined behavior */
148421 +                       bytes += GSO_MAX_SIZE >> r;
148422 +       }
148424 +       bytes = min_t(u32, bytes, gso_max_size - 1 - MAX_TCP_HEADER);
148425 +       segs = max_t(u32, bytes / mss_now, bbr_min_tso_segs(sk));
148426 +       return segs;
148429 +/* Custom tcp_tso_autosize() for BBR, used at transmit time to cap skb size. */
148430 +static u32  bbr_tso_segs(struct sock *sk, unsigned int mss_now)
148432 +       return bbr_tso_segs_generic(sk, mss_now, sk->sk_gso_max_size);
148435 +/* Like bbr_tso_segs(), using mss_cache, ignoring driver's sk_gso_max_size. */
148436 +static u32 bbr_tso_segs_goal(struct sock *sk)
148438 +       struct tcp_sock *tp = tcp_sk(sk);
148440 +       return  bbr_tso_segs_generic(sk, tp->mss_cache, GSO_MAX_SIZE);
148443 +/* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */
148444 +static void bbr_save_cwnd(struct sock *sk)
148446 +       struct tcp_sock *tp = tcp_sk(sk);
148447 +       struct bbr *bbr = inet_csk_ca(sk);
148449 +       if (bbr->prev_ca_state < TCP_CA_Recovery && bbr->mode != BBR_PROBE_RTT)
148450 +               bbr->prior_cwnd = tp->snd_cwnd;  /* this cwnd is good enough */
148451 +       else  /* loss recovery or BBR_PROBE_RTT have temporarily cut cwnd */
148452 +               bbr->prior_cwnd = max(bbr->prior_cwnd, tp->snd_cwnd);
148455 +static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
148457 +       struct tcp_sock *tp = tcp_sk(sk);
148458 +       struct bbr *bbr = inet_csk_ca(sk);
148460 +       if (event == CA_EVENT_TX_START && tp->app_limited) {
148461 +               bbr->idle_restart = 1;
148462 +               bbr->ack_epoch_mstamp = tp->tcp_mstamp;
148463 +               bbr->ack_epoch_acked = 0;
148464 +               /* Avoid pointless buffer overflows: pace at est. bw if we don't
148465 +                * need more speed (we're restarting from idle and app-limited).
148466 +                */
148467 +               if (bbr->mode == BBR_PROBE_BW)
148468 +                       bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT);
148469 +               else if (bbr->mode == BBR_PROBE_RTT)
148470 +                       bbr_check_probe_rtt_done(sk);
148471 +       } else if ((event == CA_EVENT_ECN_IS_CE ||
148472 +                   event == CA_EVENT_ECN_NO_CE) &&
148473 +                   bbr_ecn_enable &&
148474 +                   bbr->params.precise_ece_ack) {
148475 +               u32 state = bbr->ce_state;
148476 +               dctcp_ece_ack_update(sk, event, &bbr->prior_rcv_nxt, &state);
148477 +               bbr->ce_state = state;
148478 +               if (tp->fast_ack_mode == 2 && event == CA_EVENT_ECN_IS_CE)
148479 +                       tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
148480 +       }
148483 +/* Calculate bdp based on min RTT and the estimated bottleneck bandwidth:
148485 + * bdp = ceil(bw * min_rtt * gain)
148487 + * The key factor, gain, controls the amount of queue. While a small gain
148488 + * builds a smaller queue, it becomes more vulnerable to noise in RTT
148489 + * measurements (e.g., delayed ACKs or other ACK compression effects). This
148490 + * noise may cause BBR to under-estimate the rate.
148491 + */
148492 +static u32 bbr_bdp(struct sock *sk, u32 bw, int gain)
148494 +       struct bbr *bbr = inet_csk_ca(sk);
148495 +       u32 bdp;
148496 +       u64 w;
148498 +       /* If we've never had a valid RTT sample, cap cwnd at the initial
148499 +        * default. This should only happen when the connection is not using TCP
148500 +        * timestamps and has retransmitted all of the SYN/SYNACK/data packets
148501 +        * ACKed so far. In this case, an RTO can cut cwnd to 1, in which
148502 +        * case we need to slow-start up toward something safe: initial cwnd.
148503 +        */
148504 +       if (unlikely(bbr->min_rtt_us == ~0U))    /* no valid RTT samples yet? */
148505 +               return bbr->init_cwnd;  /* be safe: cap at initial cwnd */
148507 +       w = (u64)bw * bbr->min_rtt_us;
148509 +       /* Apply a gain to the given value, remove the BW_SCALE shift, and
148510 +        * round the value up to avoid a negative feedback loop.
148511 +        */
148512 +       bdp = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT;
148514 +       return bdp;
148517 +/* To achieve full performance in high-speed paths, we budget enough cwnd to
148518 + * fit full-sized skbs in-flight on both end hosts to fully utilize the path:
148519 + *   - one skb in sending host Qdisc,
148520 + *   - one skb in sending host TSO/GSO engine
148521 + *   - one skb being received by receiver host LRO/GRO/delayed-ACK engine
148522 + * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because
148523 + * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets,
148524 + * which allows 2 outstanding 2-packet sequences, to try to keep pipe
148525 + * full even with ACK-every-other-packet delayed ACKs.
148526 + */
148527 +static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd)
148529 +       struct bbr *bbr = inet_csk_ca(sk);
148530 +       u32 tso_segs_goal;
148532 +       tso_segs_goal = 3 * bbr_tso_segs_goal(sk);
148534 +       /* Allow enough full-sized skbs in flight to utilize end systems. */
148535 +       if (bbr->params.cwnd_tso_budget == 1) {
148536 +               cwnd = max_t(u32, cwnd, tso_segs_goal);
148537 +               cwnd = max_t(u32, cwnd, bbr->params.cwnd_min_target);
148538 +       } else {
148539 +               cwnd += tso_segs_goal;
148540 +               cwnd = (cwnd + 1) & ~1U;
148541 +       }
148542 +       /* Ensure gain cycling gets inflight above BDP even for small BDPs. */
148543 +       if (bbr->mode == BBR_PROBE_BW && bbr->cycle_idx == BBR_BW_PROBE_UP)
148544 +               cwnd += 2;
148546 +       return cwnd;
148549 +/* Find inflight based on min RTT and the estimated bottleneck bandwidth. */
148550 +static u32 bbr_inflight(struct sock *sk, u32 bw, int gain)
148552 +       u32 inflight;
148554 +       inflight = bbr_bdp(sk, bw, gain);
148555 +       inflight = bbr_quantization_budget(sk, inflight);
148557 +       return inflight;
148560 +/* With pacing at lower layers, there's often less data "in the network" than
148561 + * "in flight". With TSQ and departure time pacing at lower layers (e.g. fq),
148562 + * we often have several skbs queued in the pacing layer with a pre-scheduled
148563 + * earliest departure time (EDT). BBR adapts its pacing rate based on the
148564 + * inflight level that it estimates has already been "baked in" by previous
148565 + * departure time decisions. We calculate a rough estimate of the number of our
148566 + * packets that might be in the network at the earliest departure time for the
148567 + * next skb scheduled:
148568 + *   in_network_at_edt = inflight_at_edt - (EDT - now) * bw
148569 + * If we're increasing inflight, then we want to know if the transmit of the
148570 + * EDT skb will push inflight above the target, so inflight_at_edt includes
148571 + * bbr_tso_segs_goal() from the skb departing at EDT. If decreasing inflight,
148572 + * then estimate if inflight will sink too low just before the EDT transmit.
148573 + */
148574 +static u32 bbr_packets_in_net_at_edt(struct sock *sk, u32 inflight_now)
148576 +       struct tcp_sock *tp = tcp_sk(sk);
148577 +       struct bbr *bbr = inet_csk_ca(sk);
148578 +       u64 now_ns, edt_ns, interval_us;
148579 +       u32 interval_delivered, inflight_at_edt;
148581 +       now_ns = tp->tcp_clock_cache;
148582 +       edt_ns = max(tp->tcp_wstamp_ns, now_ns);
148583 +       interval_us = div_u64(edt_ns - now_ns, NSEC_PER_USEC);
148584 +       interval_delivered = (u64)bbr_bw(sk) * interval_us >> BW_SCALE;
148585 +       inflight_at_edt = inflight_now;
148586 +       if (bbr->pacing_gain > BBR_UNIT)              /* increasing inflight */
148587 +               inflight_at_edt += bbr_tso_segs_goal(sk);  /* include EDT skb */
148588 +       if (interval_delivered >= inflight_at_edt)
148589 +               return 0;
148590 +       return inflight_at_edt - interval_delivered;
148593 +/* Find the cwnd increment based on estimate of ack aggregation */
148594 +static u32 bbr_ack_aggregation_cwnd(struct sock *sk)
148596 +       struct bbr *bbr = inet_csk_ca(sk);
148597 +       u32 max_aggr_cwnd, aggr_cwnd = 0;
148599 +       if (bbr->params.extra_acked_gain &&
148600 +           (bbr_full_bw_reached(sk) || bbr->params.extra_acked_in_startup)) {
148601 +               max_aggr_cwnd = ((u64)bbr_bw(sk) * bbr_extra_acked_max_us)
148602 +                               / BW_UNIT;
148603 +               aggr_cwnd = (bbr->params.extra_acked_gain * bbr_extra_acked(sk))
148604 +                            >> BBR_SCALE;
148605 +               aggr_cwnd = min(aggr_cwnd, max_aggr_cwnd);
148606 +       }
148608 +       return aggr_cwnd;
148611 +/* Returns the cwnd for PROBE_RTT mode. */
148612 +static u32 bbr_probe_rtt_cwnd(struct sock *sk)
148614 +       struct bbr *bbr = inet_csk_ca(sk);
148616 +       if (bbr->params.probe_rtt_cwnd_gain == 0)
148617 +               return bbr->params.cwnd_min_target;
148618 +       return max_t(u32, bbr->params.cwnd_min_target,
148619 +                    bbr_bdp(sk, bbr_bw(sk), bbr->params.probe_rtt_cwnd_gain));
148622 +/* Slow-start up toward target cwnd (if bw estimate is growing, or packet loss
148623 + * has drawn us down below target), or snap down to target if we're above it.
148624 + */
148625 +static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
148626 +                        u32 acked, u32 bw, int gain, u32 cwnd,
148627 +                        struct bbr_context *ctx)
148629 +       struct tcp_sock *tp = tcp_sk(sk);
148630 +       struct bbr *bbr = inet_csk_ca(sk);
148631 +       u32 target_cwnd = 0, prev_cwnd = tp->snd_cwnd, max_probe;
148633 +       if (!acked)
148634 +               goto done;  /* no packet fully ACKed; just apply caps */
148636 +       target_cwnd = bbr_bdp(sk, bw, gain);
148638 +       /* Increment the cwnd to account for excess ACKed data that seems
148639 +        * due to aggregation (of data and/or ACKs) visible in the ACK stream.
148640 +        */
148641 +       target_cwnd += bbr_ack_aggregation_cwnd(sk);
148642 +       target_cwnd = bbr_quantization_budget(sk, target_cwnd);
148644 +       /* If we're below target cwnd, slow start cwnd toward target cwnd. */
148645 +       bbr->debug.target_cwnd = target_cwnd;
148647 +       /* Update cwnd and enable fast path if cwnd reaches target_cwnd. */
148648 +       bbr->try_fast_path = 0;
148649 +       if (bbr_full_bw_reached(sk)) { /* only cut cwnd if we filled the pipe */
148650 +               cwnd += acked;
148651 +               if (cwnd >= target_cwnd) {
148652 +                       cwnd = target_cwnd;
148653 +                       bbr->try_fast_path = 1;
148654 +               }
148655 +       } else if (cwnd < target_cwnd || cwnd  < 2 * bbr->init_cwnd) {
148656 +               cwnd += acked;
148657 +       } else {
148658 +               bbr->try_fast_path = 1;
148659 +       }
148661 +       /* When growing cwnd, don't grow beyond twice what we just probed. */
148662 +       if (bbr->params.usage_based_cwnd) {
148663 +               max_probe = max(2 * tp->max_packets_out, tp->snd_cwnd);
148664 +               cwnd = min(cwnd, max_probe);
148665 +       }
148667 +       cwnd = max_t(u32, cwnd, bbr->params.cwnd_min_target);
148668 +done:
148669 +       tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);   /* apply global cap */
148670 +       if (bbr->mode == BBR_PROBE_RTT)  /* drain queue, refresh min_rtt */
148671 +               tp->snd_cwnd = min_t(u32, tp->snd_cwnd, bbr_probe_rtt_cwnd(sk));
148673 +       ctx->target_cwnd = target_cwnd;
148674 +       ctx->log = (tp->snd_cwnd != prev_cwnd);
148677 +/* See if we have reached next round trip */
148678 +static void bbr_update_round_start(struct sock *sk,
148679 +               const struct rate_sample *rs, struct bbr_context *ctx)
148681 +       struct tcp_sock *tp = tcp_sk(sk);
148682 +       struct bbr *bbr = inet_csk_ca(sk);
148684 +       bbr->round_start = 0;
148686 +       /* See if we've reached the next RTT */
148687 +       if (rs->interval_us > 0 &&
148688 +           !before(rs->prior_delivered, bbr->next_rtt_delivered)) {
148689 +               bbr->next_rtt_delivered = tp->delivered;
148690 +               bbr->round_start = 1;
148691 +       }
148694 +/* Calculate the bandwidth based on how fast packets are delivered */
148695 +static void bbr_calculate_bw_sample(struct sock *sk,
148696 +                       const struct rate_sample *rs, struct bbr_context *ctx)
148698 +       struct bbr *bbr = inet_csk_ca(sk);
148699 +       u64 bw = 0;
148701 +       /* Divide delivered by the interval to find a (lower bound) bottleneck
148702 +        * bandwidth sample. Delivered is in packets and interval_us in uS and
148703 +        * ratio will be <<1 for most connections. So delivered is first scaled.
148704 +        * Round up to allow growth at low rates, even with integer division.
148705 +        */
148706 +       if (rs->interval_us > 0) {
148707 +               if (WARN_ONCE(rs->delivered < 0,
148708 +                             "negative delivered: %d interval_us: %ld\n",
148709 +                             rs->delivered, rs->interval_us))
148710 +                       return;
148712 +               bw = DIV_ROUND_UP_ULL((u64)rs->delivered * BW_UNIT, rs->interval_us);
148713 +       }
148715 +       ctx->sample_bw = bw;
148716 +       bbr->debug.rs_bw = bw;
148719 +/* Estimates the windowed max degree of ack aggregation.
148720 + * This is used to provision extra in-flight data to keep sending during
148721 + * inter-ACK silences.
148723 + * Degree of ack aggregation is estimated as extra data acked beyond expected.
148725 + * max_extra_acked = "maximum recent excess data ACKed beyond max_bw * interval"
148726 + * cwnd += max_extra_acked
148728 + * Max extra_acked is clamped by cwnd and bw * bbr_extra_acked_max_us (100 ms).
148729 + * Max filter is an approximate sliding window of 5-10 (packet timed) round
148730 + * trips for non-startup phase, and 1-2 round trips for startup.
148731 + */
148732 +static void bbr_update_ack_aggregation(struct sock *sk,
148733 +                                      const struct rate_sample *rs)
148735 +       u32 epoch_us, expected_acked, extra_acked;
148736 +       struct bbr *bbr = inet_csk_ca(sk);
148737 +       struct tcp_sock *tp = tcp_sk(sk);
148738 +       u32 extra_acked_win_rtts_thresh = bbr->params.extra_acked_win_rtts;
148740 +       if (!bbr->params.extra_acked_gain || rs->acked_sacked <= 0 ||
148741 +           rs->delivered < 0 || rs->interval_us <= 0)
148742 +               return;
148744 +       if (bbr->round_start) {
148745 +               bbr->extra_acked_win_rtts = min(0x1F,
148746 +                                               bbr->extra_acked_win_rtts + 1);
148747 +               if (bbr->params.extra_acked_in_startup &&
148748 +                   !bbr_full_bw_reached(sk))
148749 +                       extra_acked_win_rtts_thresh = 1;
148750 +               if (bbr->extra_acked_win_rtts >=
148751 +                   extra_acked_win_rtts_thresh) {
148752 +                       bbr->extra_acked_win_rtts = 0;
148753 +                       bbr->extra_acked_win_idx = bbr->extra_acked_win_idx ?
148754 +                                                  0 : 1;
148755 +                       bbr->extra_acked[bbr->extra_acked_win_idx] = 0;
148756 +               }
148757 +       }
148759 +       /* Compute how many packets we expected to be delivered over epoch. */
148760 +       epoch_us = tcp_stamp_us_delta(tp->delivered_mstamp,
148761 +                                     bbr->ack_epoch_mstamp);
148762 +       expected_acked = ((u64)bbr_bw(sk) * epoch_us) / BW_UNIT;
148764 +       /* Reset the aggregation epoch if ACK rate is below expected rate or
148765 +        * significantly large no. of ack received since epoch (potentially
148766 +        * quite old epoch).
148767 +        */
148768 +       if (bbr->ack_epoch_acked <= expected_acked ||
148769 +           (bbr->ack_epoch_acked + rs->acked_sacked >=
148770 +            bbr_ack_epoch_acked_reset_thresh)) {
148771 +               bbr->ack_epoch_acked = 0;
148772 +               bbr->ack_epoch_mstamp = tp->delivered_mstamp;
148773 +               expected_acked = 0;
148774 +       }
148776 +       /* Compute excess data delivered, beyond what was expected. */
148777 +       bbr->ack_epoch_acked = min_t(u32, 0xFFFFF,
148778 +                                  bbr->ack_epoch_acked + rs->acked_sacked);
148779 +       extra_acked = bbr->ack_epoch_acked - expected_acked;
148780 +       extra_acked = min(extra_acked, tp->snd_cwnd);
148781 +       if (extra_acked > bbr->extra_acked[bbr->extra_acked_win_idx])
148782 +               bbr->extra_acked[bbr->extra_acked_win_idx] = extra_acked;
148785 +/* Estimate when the pipe is full, using the change in delivery rate: BBR
148786 + * estimates that STARTUP filled the pipe if the estimated bw hasn't changed by
148787 + * at least bbr_full_bw_thresh (25%) after bbr_full_bw_cnt (3) non-app-limited
148788 + * rounds. Why 3 rounds: 1: rwin autotuning grows the rwin, 2: we fill the
148789 + * higher rwin, 3: we get higher delivery rate samples. Or transient
148790 + * cross-traffic or radio noise can go away. CUBIC Hystart shares a similar
148791 + * design goal, but uses delay and inter-ACK spacing instead of bandwidth.
148792 + */
148793 +static void bbr_check_full_bw_reached(struct sock *sk,
148794 +                                     const struct rate_sample *rs)
148796 +       struct bbr *bbr = inet_csk_ca(sk);
148797 +       u32 bw_thresh;
148799 +       if (bbr_full_bw_reached(sk) || !bbr->round_start || rs->is_app_limited)
148800 +               return;
148802 +       bw_thresh = (u64)bbr->full_bw * bbr->params.full_bw_thresh >> BBR_SCALE;
148803 +       if (bbr_max_bw(sk) >= bw_thresh) {
148804 +               bbr->full_bw = bbr_max_bw(sk);
148805 +               bbr->full_bw_cnt = 0;
148806 +               return;
148807 +       }
148808 +       ++bbr->full_bw_cnt;
148809 +       bbr->full_bw_reached = bbr->full_bw_cnt >= bbr->params.full_bw_cnt;
148812 +/* If pipe is probably full, drain the queue and then enter steady-state. */
148813 +static bool bbr_check_drain(struct sock *sk, const struct rate_sample *rs,
148814 +                           struct bbr_context *ctx)
148816 +       struct bbr *bbr = inet_csk_ca(sk);
148818 +       if (bbr->mode == BBR_STARTUP && bbr_full_bw_reached(sk)) {
148819 +               bbr->mode = BBR_DRAIN;  /* drain queue we created */
148820 +               tcp_sk(sk)->snd_ssthresh =
148821 +                               bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT);
148822 +               bbr2_reset_congestion_signals(sk);
148823 +       }       /* fall through to check if in-flight is already small: */
148824 +       if (bbr->mode == BBR_DRAIN &&
148825 +           bbr_packets_in_net_at_edt(sk, tcp_packets_in_flight(tcp_sk(sk))) <=
148826 +           bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT))
148827 +               return true;  /* exiting DRAIN now */
148828 +       return false;
148831 +static void bbr_check_probe_rtt_done(struct sock *sk)
148833 +       struct tcp_sock *tp = tcp_sk(sk);
148834 +       struct bbr *bbr = inet_csk_ca(sk);
148836 +       if (!(bbr->probe_rtt_done_stamp &&
148837 +             after(tcp_jiffies32, bbr->probe_rtt_done_stamp)))
148838 +               return;
148840 +       bbr->probe_rtt_min_stamp = tcp_jiffies32; /* schedule next PROBE_RTT */
148841 +       tp->snd_cwnd = max(tp->snd_cwnd, bbr->prior_cwnd);
148842 +       bbr2_exit_probe_rtt(sk);
148845 +/* The goal of PROBE_RTT mode is to have BBR flows cooperatively and
148846 + * periodically drain the bottleneck queue, to converge to measure the true
148847 + * min_rtt (unloaded propagation delay). This allows the flows to keep queues
148848 + * small (reducing queuing delay and packet loss) and achieve fairness among
148849 + * BBR flows.
148851 + * The min_rtt filter window is 10 seconds. When the min_rtt estimate expires,
148852 + * we enter PROBE_RTT mode and cap the cwnd at bbr_cwnd_min_target=4 packets.
148853 + * After at least bbr_probe_rtt_mode_ms=200ms and at least one packet-timed
148854 + * round trip elapsed with that flight size <= 4, we leave PROBE_RTT mode and
148855 + * re-enter the previous mode. BBR uses 200ms to approximately bound the
148856 + * performance penalty of PROBE_RTT's cwnd capping to roughly 2% (200ms/10s).
148858 + * Note that flows need only pay 2% if they are busy sending over the last 10
148859 + * seconds. Interactive applications (e.g., Web, RPCs, video chunks) often have
148860 + * natural silences or low-rate periods within 10 seconds where the rate is low
148861 + * enough for long enough to drain its queue in the bottleneck. We pick up
148862 + * these min RTT measurements opportunistically with our min_rtt filter. :-)
148863 + */
148864 +static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
148866 +       struct tcp_sock *tp = tcp_sk(sk);
148867 +       struct bbr *bbr = inet_csk_ca(sk);
148868 +       bool probe_rtt_expired, min_rtt_expired;
148869 +       u32 expire;
148871 +       /* Track min RTT in probe_rtt_win_ms to time next PROBE_RTT state. */
148872 +       expire = bbr->probe_rtt_min_stamp +
148873 +                msecs_to_jiffies(bbr->params.probe_rtt_win_ms);
148874 +       probe_rtt_expired = after(tcp_jiffies32, expire);
148875 +       if (rs->rtt_us >= 0 &&
148876 +           (rs->rtt_us <= bbr->probe_rtt_min_us ||
148877 +            (probe_rtt_expired && !rs->is_ack_delayed))) {
148878 +               bbr->probe_rtt_min_us = rs->rtt_us;
148879 +               bbr->probe_rtt_min_stamp = tcp_jiffies32;
148880 +       }
148881 +       /* Track min RTT seen in the min_rtt_win_sec filter window: */
148882 +       expire = bbr->min_rtt_stamp + bbr->params.min_rtt_win_sec * HZ;
148883 +       min_rtt_expired = after(tcp_jiffies32, expire);
148884 +       if (bbr->probe_rtt_min_us <= bbr->min_rtt_us ||
148885 +           min_rtt_expired) {
148886 +               bbr->min_rtt_us = bbr->probe_rtt_min_us;
148887 +               bbr->min_rtt_stamp = bbr->probe_rtt_min_stamp;
148888 +       }
148890 +       if (bbr->params.probe_rtt_mode_ms > 0 && probe_rtt_expired &&
148891 +           !bbr->idle_restart && bbr->mode != BBR_PROBE_RTT) {
148892 +               bbr->mode = BBR_PROBE_RTT;  /* dip, drain queue */
148893 +               bbr_save_cwnd(sk);  /* note cwnd so we can restore it */
148894 +               bbr->probe_rtt_done_stamp = 0;
148895 +               bbr->ack_phase = BBR_ACKS_PROBE_STOPPING;
148896 +               bbr->next_rtt_delivered = tp->delivered;
148897 +       }
148899 +       if (bbr->mode == BBR_PROBE_RTT) {
148900 +               /* Ignore low rate samples during this mode. */
148901 +               tp->app_limited =
148902 +                       (tp->delivered + tcp_packets_in_flight(tp)) ? : 1;
148903 +               /* Maintain min packets in flight for max(200 ms, 1 round). */
148904 +               if (!bbr->probe_rtt_done_stamp &&
148905 +                   tcp_packets_in_flight(tp) <= bbr_probe_rtt_cwnd(sk)) {
148906 +                       bbr->probe_rtt_done_stamp = tcp_jiffies32 +
148907 +                               msecs_to_jiffies(bbr->params.probe_rtt_mode_ms);
148908 +                       bbr->probe_rtt_round_done = 0;
148909 +                       bbr->next_rtt_delivered = tp->delivered;
148910 +               } else if (bbr->probe_rtt_done_stamp) {
148911 +                       if (bbr->round_start)
148912 +                               bbr->probe_rtt_round_done = 1;
148913 +                       if (bbr->probe_rtt_round_done)
148914 +                               bbr_check_probe_rtt_done(sk);
148915 +               }
148916 +       }
148917 +       /* Restart after idle ends only once we process a new S/ACK for data */
148918 +       if (rs->delivered > 0)
148919 +               bbr->idle_restart = 0;
148922 +static void bbr_update_gains(struct sock *sk)
148924 +       struct bbr *bbr = inet_csk_ca(sk);
148926 +       switch (bbr->mode) {
148927 +       case BBR_STARTUP:
148928 +               bbr->pacing_gain = bbr->params.high_gain;
148929 +               bbr->cwnd_gain   = bbr->params.startup_cwnd_gain;
148930 +               break;
148931 +       case BBR_DRAIN:
148932 +               bbr->pacing_gain = bbr->params.drain_gain;  /* slow, to drain */
148933 +               bbr->cwnd_gain = bbr->params.startup_cwnd_gain;  /* keep cwnd */
148934 +               break;
148935 +       case BBR_PROBE_BW:
148936 +               bbr->pacing_gain = bbr->params.pacing_gain[bbr->cycle_idx];
148937 +               bbr->cwnd_gain = bbr->params.cwnd_gain;
148938 +               break;
148939 +       case BBR_PROBE_RTT:
148940 +               bbr->pacing_gain = BBR_UNIT;
148941 +               bbr->cwnd_gain = BBR_UNIT;
148942 +               break;
148943 +       default:
148944 +               WARN_ONCE(1, "BBR bad mode: %u\n", bbr->mode);
148945 +               break;
148946 +       }
148949 +static void bbr_init(struct sock *sk)
148951 +       struct tcp_sock *tp = tcp_sk(sk);
148952 +       struct bbr *bbr = inet_csk_ca(sk);
148953 +       int i;
148955 +       WARN_ON_ONCE(tp->snd_cwnd >= bbr_cwnd_warn_val);
148957 +       bbr->initialized = 1;
148958 +       bbr->params.high_gain = min(0x7FF, bbr_high_gain);
148959 +       bbr->params.drain_gain = min(0x3FF, bbr_drain_gain);
148960 +       bbr->params.startup_cwnd_gain = min(0x7FF, bbr_startup_cwnd_gain);
148961 +       bbr->params.cwnd_gain = min(0x7FF, bbr_cwnd_gain);
148962 +       bbr->params.cwnd_tso_budget = min(0x1U, bbr_cwnd_tso_budget);
148963 +       bbr->params.cwnd_min_target = min(0xFU, bbr_cwnd_min_target);
148964 +       bbr->params.min_rtt_win_sec = min(0x1FU, bbr_min_rtt_win_sec);
148965 +       bbr->params.probe_rtt_mode_ms = min(0x1FFU, bbr_probe_rtt_mode_ms);
148966 +       bbr->params.full_bw_cnt = min(0x7U, bbr_full_bw_cnt);
148967 +       bbr->params.full_bw_thresh = min(0x3FFU, bbr_full_bw_thresh);
148968 +       bbr->params.extra_acked_gain = min(0x7FF, bbr_extra_acked_gain);
148969 +       bbr->params.extra_acked_win_rtts = min(0x1FU, bbr_extra_acked_win_rtts);
148970 +       bbr->params.drain_to_target = bbr_drain_to_target ? 1 : 0;
148971 +       bbr->params.precise_ece_ack = bbr_precise_ece_ack ? 1 : 0;
148972 +       bbr->params.extra_acked_in_startup = bbr_extra_acked_in_startup ? 1 : 0;
148973 +       bbr->params.probe_rtt_cwnd_gain = min(0xFFU, bbr_probe_rtt_cwnd_gain);
148974 +       bbr->params.probe_rtt_win_ms =
148975 +               min(0x3FFFU,
148976 +                   min_t(u32, bbr_probe_rtt_win_ms,
148977 +                         bbr->params.min_rtt_win_sec * MSEC_PER_SEC));
148978 +       for (i = 0; i < CYCLE_LEN; i++)
148979 +               bbr->params.pacing_gain[i] = min(0x3FF, bbr_pacing_gain[i]);
148980 +       bbr->params.usage_based_cwnd = bbr_usage_based_cwnd ? 1 : 0;
148981 +       bbr->params.tso_rtt_shift =  min(0xFU, bbr_tso_rtt_shift);
148983 +       bbr->debug.snd_isn = tp->snd_una;
148984 +       bbr->debug.target_cwnd = 0;
148985 +       bbr->debug.undo = 0;
148987 +       bbr->init_cwnd = min(0x7FU, tp->snd_cwnd);
148988 +       bbr->prior_cwnd = tp->prior_cwnd;
148989 +       tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
148990 +       bbr->next_rtt_delivered = 0;
148991 +       bbr->prev_ca_state = TCP_CA_Open;
148992 +       bbr->packet_conservation = 0;
148994 +       bbr->probe_rtt_done_stamp = 0;
148995 +       bbr->probe_rtt_round_done = 0;
148996 +       bbr->probe_rtt_min_us = tcp_min_rtt(tp);
148997 +       bbr->probe_rtt_min_stamp = tcp_jiffies32;
148998 +       bbr->min_rtt_us = tcp_min_rtt(tp);
148999 +       bbr->min_rtt_stamp = tcp_jiffies32;
149001 +       bbr->has_seen_rtt = 0;
149002 +       bbr_init_pacing_rate_from_rtt(sk);
149004 +       bbr->round_start = 0;
149005 +       bbr->idle_restart = 0;
149006 +       bbr->full_bw_reached = 0;
149007 +       bbr->full_bw = 0;
149008 +       bbr->full_bw_cnt = 0;
149009 +       bbr->cycle_mstamp = 0;
149010 +       bbr->cycle_idx = 0;
149011 +       bbr->mode = BBR_STARTUP;
149012 +       bbr->debug.rs_bw = 0;
149014 +       bbr->ack_epoch_mstamp = tp->tcp_mstamp;
149015 +       bbr->ack_epoch_acked = 0;
149016 +       bbr->extra_acked_win_rtts = 0;
149017 +       bbr->extra_acked_win_idx = 0;
149018 +       bbr->extra_acked[0] = 0;
149019 +       bbr->extra_acked[1] = 0;
149021 +       bbr->ce_state = 0;
149022 +       bbr->prior_rcv_nxt = tp->rcv_nxt;
149023 +       bbr->try_fast_path = 0;
149025 +       cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED);
149028 +static u32 bbr_sndbuf_expand(struct sock *sk)
149030 +       /* Provision 3 * cwnd since BBR may slow-start even during recovery. */
149031 +       return 3;
149034 +/* __________________________________________________________________________
149036 + * Functions new to BBR v2 ("bbr") congestion control are below here.
149037 + * __________________________________________________________________________
149038 + */
149040 +/* Incorporate a new bw sample into the current window of our max filter. */
149041 +static void bbr2_take_bw_hi_sample(struct sock *sk, u32 bw)
149043 +       struct bbr *bbr = inet_csk_ca(sk);
149045 +       bbr->bw_hi[1] = max(bw, bbr->bw_hi[1]);
149048 +/* Keep max of last 1-2 cycles. Each PROBE_BW cycle, flip filter window. */
149049 +static void bbr2_advance_bw_hi_filter(struct sock *sk)
149051 +       struct bbr *bbr = inet_csk_ca(sk);
149053 +       if (!bbr->bw_hi[1])
149054 +               return;  /* no samples in this window; remember old window */
149055 +       bbr->bw_hi[0] = bbr->bw_hi[1];
149056 +       bbr->bw_hi[1] = 0;
149059 +/* How much do we want in flight? Our BDP, unless congestion cut cwnd. */
149060 +static u32 bbr2_target_inflight(struct sock *sk)
149062 +       u32 bdp = bbr_inflight(sk, bbr_bw(sk), BBR_UNIT);
149064 +       return min(bdp, tcp_sk(sk)->snd_cwnd);
149067 +static bool bbr2_is_probing_bandwidth(struct sock *sk)
149069 +       struct bbr *bbr = inet_csk_ca(sk);
149071 +       return (bbr->mode == BBR_STARTUP) ||
149072 +               (bbr->mode == BBR_PROBE_BW &&
149073 +                (bbr->cycle_idx == BBR_BW_PROBE_REFILL ||
149074 +                 bbr->cycle_idx == BBR_BW_PROBE_UP));
149077 +/* Has the given amount of time elapsed since we marked the phase start? */
149078 +static bool bbr2_has_elapsed_in_phase(const struct sock *sk, u32 interval_us)
149080 +       const struct tcp_sock *tp = tcp_sk(sk);
149081 +       const struct bbr *bbr = inet_csk_ca(sk);
149083 +       return tcp_stamp_us_delta(tp->tcp_mstamp,
149084 +                                 bbr->cycle_mstamp + interval_us) > 0;
149087 +static void bbr2_handle_queue_too_high_in_startup(struct sock *sk)
149089 +       struct bbr *bbr = inet_csk_ca(sk);
149091 +       bbr->full_bw_reached = 1;
149092 +       bbr->inflight_hi = bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT);
149095 +/* Exit STARTUP upon N consecutive rounds with ECN mark rate > ecn_thresh. */
149096 +static void bbr2_check_ecn_too_high_in_startup(struct sock *sk, u32 ce_ratio)
149098 +       struct bbr *bbr = inet_csk_ca(sk);
149100 +       if (bbr_full_bw_reached(sk) || !bbr->ecn_eligible ||
149101 +           !bbr->params.full_ecn_cnt || !bbr->params.ecn_thresh)
149102 +               return;
149104 +       if (ce_ratio >= bbr->params.ecn_thresh)
149105 +               bbr->startup_ecn_rounds++;
149106 +       else
149107 +               bbr->startup_ecn_rounds = 0;
149109 +       if (bbr->startup_ecn_rounds >= bbr->params.full_ecn_cnt) {
149110 +               bbr->debug.event = 'E';  /* ECN caused STARTUP exit */
149111 +               bbr2_handle_queue_too_high_in_startup(sk);
149112 +               return;
149113 +       }
149116 +static void bbr2_update_ecn_alpha(struct sock *sk)
149118 +       struct tcp_sock *tp = tcp_sk(sk);
149119 +       struct bbr *bbr = inet_csk_ca(sk);
149120 +       s32 delivered, delivered_ce;
149121 +       u64 alpha, ce_ratio;
149122 +       u32 gain;
149124 +       if (bbr->params.ecn_factor == 0)
149125 +               return;
149127 +       delivered = tp->delivered - bbr->alpha_last_delivered;
149128 +       delivered_ce = tp->delivered_ce - bbr->alpha_last_delivered_ce;
149130 +       if (delivered == 0 ||           /* avoid divide by zero */
149131 +           WARN_ON_ONCE(delivered < 0 || delivered_ce < 0))  /* backwards? */
149132 +               return;
149134 +       /* See if we should use ECN sender logic for this connection. */
149135 +       if (!bbr->ecn_eligible && bbr_ecn_enable &&
149136 +           (bbr->min_rtt_us <= bbr->params.ecn_max_rtt_us ||
149137 +            !bbr->params.ecn_max_rtt_us))
149138 +               bbr->ecn_eligible = 1;
149140 +       ce_ratio = (u64)delivered_ce << BBR_SCALE;
149141 +       do_div(ce_ratio, delivered);
149142 +       gain = bbr->params.ecn_alpha_gain;
149143 +       alpha = ((BBR_UNIT - gain) * bbr->ecn_alpha) >> BBR_SCALE;
149144 +       alpha += (gain * ce_ratio) >> BBR_SCALE;
149145 +       bbr->ecn_alpha = min_t(u32, alpha, BBR_UNIT);
149147 +       bbr->alpha_last_delivered = tp->delivered;
149148 +       bbr->alpha_last_delivered_ce = tp->delivered_ce;
149150 +       bbr2_check_ecn_too_high_in_startup(sk, ce_ratio);
149153 +/* Each round trip of BBR_BW_PROBE_UP, double volume of probing data. */
149154 +static void bbr2_raise_inflight_hi_slope(struct sock *sk)
149156 +       struct tcp_sock *tp = tcp_sk(sk);
149157 +       struct bbr *bbr = inet_csk_ca(sk);
149158 +       u32 growth_this_round, cnt;
149160 +       /* Calculate "slope": packets S/Acked per inflight_hi increment. */
149161 +       growth_this_round = 1 << bbr->bw_probe_up_rounds;
149162 +       bbr->bw_probe_up_rounds = min(bbr->bw_probe_up_rounds + 1, 30);
149163 +       cnt = tp->snd_cwnd / growth_this_round;
149164 +       cnt = max(cnt, 1U);
149165 +       bbr->bw_probe_up_cnt = cnt;
149166 +       bbr->debug.event = 'G';  /* Grow inflight_hi slope */
149169 +/* In BBR_BW_PROBE_UP, not seeing high loss/ECN/queue, so raise inflight_hi. */
149170 +static void bbr2_probe_inflight_hi_upward(struct sock *sk,
149171 +                                         const struct rate_sample *rs)
149173 +       struct tcp_sock *tp = tcp_sk(sk);
149174 +       struct bbr *bbr = inet_csk_ca(sk);
149175 +       u32 delta;
149177 +       if (!tp->is_cwnd_limited || tp->snd_cwnd < bbr->inflight_hi) {
149178 +               bbr->bw_probe_up_acks = 0;  /* don't accmulate unused credits */
149179 +               return;  /* not fully using inflight_hi, so don't grow it */
149180 +       }
149182 +       /* For each bw_probe_up_cnt packets ACKed, increase inflight_hi by 1. */
149183 +       bbr->bw_probe_up_acks += rs->acked_sacked;
149184 +       if (bbr->bw_probe_up_acks >=  bbr->bw_probe_up_cnt) {
149185 +               delta = bbr->bw_probe_up_acks / bbr->bw_probe_up_cnt;
149186 +               bbr->bw_probe_up_acks -= delta * bbr->bw_probe_up_cnt;
149187 +               bbr->inflight_hi += delta;
149188 +               bbr->debug.event = 'I';  /* Increment inflight_hi */
149189 +       }
149191 +       if (bbr->round_start)
149192 +               bbr2_raise_inflight_hi_slope(sk);
149195 +/* Does loss/ECN rate for this sample say inflight is "too high"?
149196 + * This is used by both the bbr_check_loss_too_high_in_startup() function,
149197 + * which can be used in either v1 or v2, and the PROBE_UP phase of v2, which
149198 + * uses it to notice when loss/ECN rates suggest inflight is too high.
149199 + */
149200 +static bool bbr2_is_inflight_too_high(const struct sock *sk,
149201 +                                    const struct rate_sample *rs)
149203 +       const struct bbr *bbr = inet_csk_ca(sk);
149204 +       u32 loss_thresh, ecn_thresh;
149206 +       if (rs->lost > 0 && rs->tx_in_flight) {
149207 +               loss_thresh = (u64)rs->tx_in_flight * bbr->params.loss_thresh >>
149208 +                               BBR_SCALE;
149209 +               if (rs->lost > loss_thresh)
149210 +                       return true;
149211 +       }
149213 +       if (rs->delivered_ce > 0 && rs->delivered > 0 &&
149214 +           bbr->ecn_eligible && bbr->params.ecn_thresh) {
149215 +               ecn_thresh = (u64)rs->delivered * bbr->params.ecn_thresh >>
149216 +                               BBR_SCALE;
149217 +               if (rs->delivered_ce >= ecn_thresh)
149218 +                       return true;
149219 +       }
149221 +       return false;
149224 +/* Calculate the tx_in_flight level that corresponded to excessive loss.
149225 + * We find "lost_prefix" segs of the skb where loss rate went too high,
149226 + * by solving for "lost_prefix" in the following equation:
149227 + *   lost                     /  inflight                     >= loss_thresh
149228 + *  (lost_prev + lost_prefix) / (inflight_prev + lost_prefix) >= loss_thresh
149229 + * Then we take that equation, convert it to fixed point, and
149230 + * round up to the nearest packet.
149231 + */
149232 +static u32 bbr2_inflight_hi_from_lost_skb(const struct sock *sk,
149233 +                                         const struct rate_sample *rs,
149234 +                                         const struct sk_buff *skb)
149236 +       const struct bbr *bbr = inet_csk_ca(sk);
149237 +       u32 loss_thresh  = bbr->params.loss_thresh;
149238 +       u32 pcount, divisor, inflight_hi;
149239 +       s32 inflight_prev, lost_prev;
149240 +       u64 loss_budget, lost_prefix;
149242 +       pcount = tcp_skb_pcount(skb);
149244 +       /* How much data was in flight before this skb? */
149245 +       inflight_prev = rs->tx_in_flight - pcount;
149246 +       if (WARN_ONCE(inflight_prev < 0,
149247 +                     "tx_in_flight: %u pcount: %u reneg: %u",
149248 +                     rs->tx_in_flight, pcount, tcp_sk(sk)->is_sack_reneg))
149249 +               return ~0U;
149251 +       /* How much inflight data was marked lost before this skb? */
149252 +       lost_prev = rs->lost - pcount;
149253 +       if (WARN_ON_ONCE(lost_prev < 0))
149254 +               return ~0U;
149256 +       /* At what prefix of this lost skb did losss rate exceed loss_thresh? */
149257 +       loss_budget = (u64)inflight_prev * loss_thresh + BBR_UNIT - 1;
149258 +       loss_budget >>= BBR_SCALE;
149259 +       if (lost_prev >= loss_budget) {
149260 +               lost_prefix = 0;   /* previous losses crossed loss_thresh */
149261 +       } else {
149262 +               lost_prefix = loss_budget - lost_prev;
149263 +               lost_prefix <<= BBR_SCALE;
149264 +               divisor = BBR_UNIT - loss_thresh;
149265 +               if (WARN_ON_ONCE(!divisor))  /* loss_thresh is 8 bits */
149266 +                       return ~0U;
149267 +               do_div(lost_prefix, divisor);
149268 +       }
149270 +       inflight_hi = inflight_prev + lost_prefix;
149271 +       return inflight_hi;
149274 +/* If loss/ECN rates during probing indicated we may have overfilled a
149275 + * buffer, return an operating point that tries to leave unutilized headroom in
149276 + * the path for other flows, for fairness convergence and lower RTTs and loss.
149277 + */
149278 +static u32 bbr2_inflight_with_headroom(const struct sock *sk)
149280 +       struct bbr *bbr = inet_csk_ca(sk);
149281 +       u32 headroom, headroom_fraction;
149283 +       if (bbr->inflight_hi == ~0U)
149284 +               return ~0U;
149286 +       headroom_fraction = bbr->params.inflight_headroom;
149287 +       headroom = ((u64)bbr->inflight_hi * headroom_fraction) >> BBR_SCALE;
149288 +       headroom = max(headroom, 1U);
149289 +       return max_t(s32, bbr->inflight_hi - headroom,
149290 +                    bbr->params.cwnd_min_target);
149293 +/* Bound cwnd to a sensible level, based on our current probing state
149294 + * machine phase and model of a good inflight level (inflight_lo, inflight_hi).
149295 + */
149296 +static void bbr2_bound_cwnd_for_inflight_model(struct sock *sk)
149298 +       struct tcp_sock *tp = tcp_sk(sk);
149299 +       struct bbr *bbr = inet_csk_ca(sk);
149300 +       u32 cap;
149302 +       /* tcp_rcv_synsent_state_process() currently calls tcp_ack()
149303 +        * and thus cong_control() without first initializing us(!).
149304 +        */
149305 +       if (!bbr->initialized)
149306 +               return;
149308 +       cap = ~0U;
149309 +       if (bbr->mode == BBR_PROBE_BW &&
149310 +           bbr->cycle_idx != BBR_BW_PROBE_CRUISE) {
149311 +               /* Probe to see if more packets fit in the path. */
149312 +               cap = bbr->inflight_hi;
149313 +       } else {
149314 +               if (bbr->mode == BBR_PROBE_RTT ||
149315 +                   (bbr->mode == BBR_PROBE_BW &&
149316 +                    bbr->cycle_idx == BBR_BW_PROBE_CRUISE))
149317 +                       cap = bbr2_inflight_with_headroom(sk);
149318 +       }
149319 +       /* Adapt to any loss/ECN since our last bw probe. */
149320 +       cap = min(cap, bbr->inflight_lo);
149322 +       cap = max_t(u32, cap, bbr->params.cwnd_min_target);
149323 +       tp->snd_cwnd = min(cap, tp->snd_cwnd);
149326 +/* Estimate a short-term lower bound on the capacity available now, based
149327 + * on measurements of the current delivery process and recent history. When we
149328 + * are seeing loss/ECN at times when we are not probing bw, then conservatively
149329 + * move toward flow balance by multiplicatively cutting our short-term
149330 + * estimated safe rate and volume of data (bw_lo and inflight_lo). We use a
149331 + * multiplicative decrease in order to converge to a lower capacity in time
149332 + * logarithmic in the magnitude of the decrease.
149334 + * However, we do not cut our short-term estimates lower than the current rate
149335 + * and volume of delivered data from this round trip, since from the current
149336 + * delivery process we can estimate the measured capacity available now.
149338 + * Anything faster than that approach would knowingly risk high loss, which can
149339 + * cause low bw for Reno/CUBIC and high loss recovery latency for
149340 + * request/response flows using any congestion control.
149341 + */
149342 +static void bbr2_adapt_lower_bounds(struct sock *sk)
149344 +       struct tcp_sock *tp = tcp_sk(sk);
149345 +       struct bbr *bbr = inet_csk_ca(sk);
149346 +       u32 ecn_cut, ecn_inflight_lo, beta;
149348 +       /* We only use lower-bound estimates when not probing bw.
149349 +        * When probing we need to push inflight higher to probe bw.
149350 +        */
149351 +       if (bbr2_is_probing_bandwidth(sk))
149352 +               return;
149354 +       /* ECN response. */
149355 +       if (bbr->ecn_in_round && bbr->ecn_eligible && bbr->params.ecn_factor) {
149356 +               /* Reduce inflight to (1 - alpha*ecn_factor). */
149357 +               ecn_cut = (BBR_UNIT -
149358 +                          ((bbr->ecn_alpha * bbr->params.ecn_factor) >>
149359 +                           BBR_SCALE));
149360 +               if (bbr->inflight_lo == ~0U)
149361 +                       bbr->inflight_lo = tp->snd_cwnd;
149362 +               ecn_inflight_lo = (u64)bbr->inflight_lo * ecn_cut >> BBR_SCALE;
149363 +       } else {
149364 +               ecn_inflight_lo = ~0U;
149365 +       }
149367 +       /* Loss response. */
149368 +       if (bbr->loss_in_round) {
149369 +               /* Reduce bw and inflight to (1 - beta). */
149370 +               if (bbr->bw_lo == ~0U)
149371 +                       bbr->bw_lo = bbr_max_bw(sk);
149372 +               if (bbr->inflight_lo == ~0U)
149373 +                       bbr->inflight_lo = tp->snd_cwnd;
149374 +               beta = bbr->params.beta;
149375 +               bbr->bw_lo =
149376 +                       max_t(u32, bbr->bw_latest,
149377 +                             (u64)bbr->bw_lo *
149378 +                             (BBR_UNIT - beta) >> BBR_SCALE);
149379 +               bbr->inflight_lo =
149380 +                       max_t(u32, bbr->inflight_latest,
149381 +                             (u64)bbr->inflight_lo *
149382 +                             (BBR_UNIT - beta) >> BBR_SCALE);
149383 +       }
149385 +       /* Adjust to the lower of the levels implied by loss or ECN. */
149386 +       bbr->inflight_lo = min(bbr->inflight_lo, ecn_inflight_lo);
149389 +/* Reset any short-term lower-bound adaptation to congestion, so that we can
149390 + * push our inflight up.
149391 + */
149392 +static void bbr2_reset_lower_bounds(struct sock *sk)
149394 +       struct bbr *bbr = inet_csk_ca(sk);
149396 +       bbr->bw_lo = ~0U;
149397 +       bbr->inflight_lo = ~0U;
149400 +/* After bw probing (STARTUP/PROBE_UP), reset signals before entering a state
149401 + * machine phase where we adapt our lower bound based on congestion signals.
149402 + */
149403 +static void bbr2_reset_congestion_signals(struct sock *sk)
149405 +       struct bbr *bbr = inet_csk_ca(sk);
149407 +       bbr->loss_in_round = 0;
149408 +       bbr->ecn_in_round = 0;
149409 +       bbr->loss_in_cycle = 0;
149410 +       bbr->ecn_in_cycle = 0;
149411 +       bbr->bw_latest = 0;
149412 +       bbr->inflight_latest = 0;
149415 +/* Update (most of) our congestion signals: track the recent rate and volume of
149416 + * delivered data, presence of loss, and EWMA degree of ECN marking.
149417 + */
149418 +static void bbr2_update_congestion_signals(
149419 +       struct sock *sk, const struct rate_sample *rs, struct bbr_context *ctx)
149421 +       struct tcp_sock *tp = tcp_sk(sk);
149422 +       struct bbr *bbr = inet_csk_ca(sk);
149423 +       u64 bw;
149425 +       bbr->loss_round_start = 0;
149426 +       if (rs->interval_us <= 0 || !rs->acked_sacked)
149427 +               return; /* Not a valid observation */
149428 +       bw = ctx->sample_bw;
149430 +       if (!rs->is_app_limited || bw >= bbr_max_bw(sk))
149431 +               bbr2_take_bw_hi_sample(sk, bw);
149433 +       bbr->loss_in_round |= (rs->losses > 0);
149435 +       /* Update rate and volume of delivered data from latest round trip: */
149436 +       bbr->bw_latest       = max_t(u32, bbr->bw_latest,       ctx->sample_bw);
149437 +       bbr->inflight_latest = max_t(u32, bbr->inflight_latest, rs->delivered);
149439 +       if (before(rs->prior_delivered, bbr->loss_round_delivered))
149440 +               return;         /* skip the per-round-trip updates */
149441 +       /* Now do per-round-trip updates. */
149442 +       bbr->loss_round_delivered = tp->delivered;  /* mark round trip */
149443 +       bbr->loss_round_start = 1;
149444 +       bbr2_adapt_lower_bounds(sk);
149446 +       /* Update windowed "latest" (single-round-trip) filters. */
149447 +       bbr->loss_in_round = 0;
149448 +       bbr->ecn_in_round  = 0;
149449 +       bbr->bw_latest = ctx->sample_bw;
149450 +       bbr->inflight_latest = rs->delivered;
149453 +/* Bandwidth probing can cause loss. To help coexistence with loss-based
149454 + * congestion control we spread out our probing in a Reno-conscious way. Due to
149455 + * the shape of the Reno sawtooth, the time required between loss epochs for an
149456 + * idealized Reno flow is a number of round trips that is the BDP of that
149457 + * flow. We count packet-timed round trips directly, since measured RTT can
149458 + * vary widely, and Reno is driven by packet-timed round trips.
149459 + */
149460 +static bool bbr2_is_reno_coexistence_probe_time(struct sock *sk)
149462 +       struct bbr *bbr = inet_csk_ca(sk);
149463 +       u32 inflight, rounds, reno_gain, reno_rounds;
149465 +       /* Random loss can shave some small percentage off of our inflight
149466 +        * in each round. To survive this, flows need robust periodic probes.
149467 +        */
149468 +       rounds = bbr->params.bw_probe_max_rounds;
149470 +       reno_gain = bbr->params.bw_probe_reno_gain;
149471 +       if (reno_gain) {
149472 +               inflight = bbr2_target_inflight(sk);
149473 +               reno_rounds = ((u64)inflight * reno_gain) >> BBR_SCALE;
149474 +               rounds = min(rounds, reno_rounds);
149475 +       }
149476 +       return bbr->rounds_since_probe >= rounds;
149479 +/* How long do we want to wait before probing for bandwidth (and risking
149480 + * loss)? We randomize the wait, for better mixing and fairness convergence.
149482 + * We bound the Reno-coexistence inter-bw-probe time to be 62-63 round trips.
149483 + * This is calculated to allow fairness with a 25Mbps, 30ms Reno flow,
149484 + * (eg 4K video to a broadband user):
149485 + *   BDP = 25Mbps * .030sec /(1514bytes) = 61.9 packets
149487 + * We bound the BBR-native inter-bw-probe wall clock time to be:
149488 + *  (a) higher than 2 sec: to try to avoid causing loss for a long enough time
149489 + *      to allow Reno at 30ms to get 4K video bw, the inter-bw-probe time must
149490 + *      be at least: 25Mbps * .030sec / (1514bytes) * 0.030sec = 1.9secs
149491 + *  (b) lower than 3 sec: to ensure flows can start probing in a reasonable
149492 + *      amount of time to discover unutilized bw on human-scale interactive
149493 + *      time-scales (e.g. perhaps traffic from a web page download that we
149494 + *      were competing with is now complete).
149495 + */
149496 +static void bbr2_pick_probe_wait(struct sock *sk)
149498 +       struct bbr *bbr = inet_csk_ca(sk);
149500 +       /* Decide the random round-trip bound for wait until probe: */
149501 +       bbr->rounds_since_probe =
149502 +               prandom_u32_max(bbr->params.bw_probe_rand_rounds);
149503 +       /* Decide the random wall clock bound for wait until probe: */
149504 +       bbr->probe_wait_us = bbr->params.bw_probe_base_us +
149505 +                            prandom_u32_max(bbr->params.bw_probe_rand_us);
149508 +static void bbr2_set_cycle_idx(struct sock *sk, int cycle_idx)
149510 +       struct bbr *bbr = inet_csk_ca(sk);
149512 +       bbr->cycle_idx = cycle_idx;
149513 +       /* New phase, so need to update cwnd and pacing rate. */
149514 +       bbr->try_fast_path = 0;
149517 +/* Send at estimated bw to fill the pipe, but not queue. We need this phase
149518 + * before PROBE_UP, because as soon as we send faster than the available bw
149519 + * we will start building a queue, and if the buffer is shallow we can cause
149520 + * loss. If we do not fill the pipe before we cause this loss, our bw_hi and
149521 + * inflight_hi estimates will underestimate.
149522 + */
149523 +static void bbr2_start_bw_probe_refill(struct sock *sk, u32 bw_probe_up_rounds)
149525 +       struct tcp_sock *tp = tcp_sk(sk);
149526 +       struct bbr *bbr = inet_csk_ca(sk);
149528 +       bbr2_reset_lower_bounds(sk);
149529 +       if (bbr->inflight_hi != ~0U)
149530 +               bbr->inflight_hi += bbr->params.refill_add_inc;
149531 +       bbr->bw_probe_up_rounds = bw_probe_up_rounds;
149532 +       bbr->bw_probe_up_acks = 0;
149533 +       bbr->stopped_risky_probe = 0;
149534 +       bbr->ack_phase = BBR_ACKS_REFILLING;
149535 +       bbr->next_rtt_delivered = tp->delivered;
149536 +       bbr2_set_cycle_idx(sk, BBR_BW_PROBE_REFILL);
149539 +/* Now probe max deliverable data rate and volume. */
149540 +static void bbr2_start_bw_probe_up(struct sock *sk)
149542 +       struct tcp_sock *tp = tcp_sk(sk);
149543 +       struct bbr *bbr = inet_csk_ca(sk);
149545 +       bbr->ack_phase = BBR_ACKS_PROBE_STARTING;
149546 +       bbr->next_rtt_delivered = tp->delivered;
149547 +       bbr->cycle_mstamp = tp->tcp_mstamp;
149548 +       bbr2_set_cycle_idx(sk, BBR_BW_PROBE_UP);
149549 +       bbr2_raise_inflight_hi_slope(sk);
149552 +/* Start a new PROBE_BW probing cycle of some wall clock length. Pick a wall
149553 + * clock time at which to probe beyond an inflight that we think to be
149554 + * safe. This will knowingly risk packet loss, so we want to do this rarely, to
149555 + * keep packet loss rates low. Also start a round-trip counter, to probe faster
149556 + * if we estimate a Reno flow at our BDP would probe faster.
149557 + */
149558 +static void bbr2_start_bw_probe_down(struct sock *sk)
149560 +       struct tcp_sock *tp = tcp_sk(sk);
149561 +       struct bbr *bbr = inet_csk_ca(sk);
149563 +       bbr2_reset_congestion_signals(sk);
149564 +       bbr->bw_probe_up_cnt = ~0U;     /* not growing inflight_hi any more */
149565 +       bbr2_pick_probe_wait(sk);
149566 +       bbr->cycle_mstamp = tp->tcp_mstamp;             /* start wall clock */
149567 +       bbr->ack_phase = BBR_ACKS_PROBE_STOPPING;
149568 +       bbr->next_rtt_delivered = tp->delivered;
149569 +       bbr2_set_cycle_idx(sk, BBR_BW_PROBE_DOWN);
149572 +/* Cruise: maintain what we estimate to be a neutral, conservative
149573 + * operating point, without attempting to probe up for bandwidth or down for
149574 + * RTT, and only reducing inflight in response to loss/ECN signals.
149575 + */
149576 +static void bbr2_start_bw_probe_cruise(struct sock *sk)
149578 +       struct bbr *bbr = inet_csk_ca(sk);
149580 +       if (bbr->inflight_lo != ~0U)
149581 +               bbr->inflight_lo = min(bbr->inflight_lo, bbr->inflight_hi);
149583 +       bbr2_set_cycle_idx(sk, BBR_BW_PROBE_CRUISE);
149586 +/* Loss and/or ECN rate is too high while probing.
149587 + * Adapt (once per bw probe) by cutting inflight_hi and then restarting cycle.
149588 + */
149589 +static void bbr2_handle_inflight_too_high(struct sock *sk,
149590 +                                         const struct rate_sample *rs)
149592 +       struct bbr *bbr = inet_csk_ca(sk);
149593 +       const u32 beta = bbr->params.beta;
149595 +       bbr->prev_probe_too_high = 1;
149596 +       bbr->bw_probe_samples = 0;  /* only react once per probe */
149597 +       bbr->debug.event = 'L';     /* Loss/ECN too high */
149598 +       /* If we are app-limited then we are not robustly
149599 +        * probing the max volume of inflight data we think
149600 +        * might be safe (analogous to how app-limited bw
149601 +        * samples are not known to be robustly probing bw).
149602 +        */
149603 +       if (!rs->is_app_limited)
149604 +               bbr->inflight_hi = max_t(u32, rs->tx_in_flight,
149605 +                                        (u64)bbr2_target_inflight(sk) *
149606 +                                        (BBR_UNIT - beta) >> BBR_SCALE);
149607 +       if (bbr->mode == BBR_PROBE_BW && bbr->cycle_idx == BBR_BW_PROBE_UP)
149608 +               bbr2_start_bw_probe_down(sk);
149611 +/* If we're seeing bw and loss samples reflecting our bw probing, adapt
149612 + * using the signals we see. If loss or ECN mark rate gets too high, then adapt
149613 + * inflight_hi downward. If we're able to push inflight higher without such
149614 + * signals, push higher: adapt inflight_hi upward.
149615 + */
149616 +static bool bbr2_adapt_upper_bounds(struct sock *sk,
149617 +                                  const struct rate_sample *rs)
149619 +       struct bbr *bbr = inet_csk_ca(sk);
149621 +       /* Track when we'll see bw/loss samples resulting from our bw probes. */
149622 +       if (bbr->ack_phase == BBR_ACKS_PROBE_STARTING && bbr->round_start)
149623 +               bbr->ack_phase = BBR_ACKS_PROBE_FEEDBACK;
149624 +       if (bbr->ack_phase == BBR_ACKS_PROBE_STOPPING && bbr->round_start) {
149625 +               /* End of samples from bw probing phase. */
149626 +               bbr->bw_probe_samples = 0;
149627 +               bbr->ack_phase = BBR_ACKS_INIT;
149628 +               /* At this point in the cycle, our current bw sample is also
149629 +                * our best recent chance at finding the highest available bw
149630 +                * for this flow. So now is the best time to forget the bw
149631 +                * samples from the previous cycle, by advancing the window.
149632 +                */
149633 +               if (bbr->mode == BBR_PROBE_BW && !rs->is_app_limited)
149634 +                       bbr2_advance_bw_hi_filter(sk);
149635 +               /* If we had an inflight_hi, then probed and pushed inflight all
149636 +                * the way up to hit that inflight_hi without seeing any
149637 +                * high loss/ECN in all the resulting ACKs from that probing,
149638 +                * then probe up again, this time letting inflight persist at
149639 +                * inflight_hi for a round trip, then accelerating beyond.
149640 +                */
149641 +               if (bbr->mode == BBR_PROBE_BW &&
149642 +                   bbr->stopped_risky_probe && !bbr->prev_probe_too_high) {
149643 +                       bbr->debug.event = 'R';  /* reprobe */
149644 +                       bbr2_start_bw_probe_refill(sk, 0);
149645 +                       return true;  /* yes, decided state transition */
149646 +               }
149647 +       }
149649 +       if (bbr2_is_inflight_too_high(sk, rs)) {
149650 +               if (bbr->bw_probe_samples)  /*  sample is from bw probing? */
149651 +                       bbr2_handle_inflight_too_high(sk, rs);
149652 +       } else {
149653 +               /* Loss/ECN rate is declared safe. Adjust upper bound upward. */
149654 +               if (bbr->inflight_hi == ~0U)  /* no excess queue signals yet? */
149655 +                       return false;
149657 +               /* To be resilient to random loss, we must raise inflight_hi
149658 +                * if we observe in any phase that a higher level is safe.
149659 +                */
149660 +               if (rs->tx_in_flight > bbr->inflight_hi) {
149661 +                       bbr->inflight_hi = rs->tx_in_flight;
149662 +                       bbr->debug.event = 'U';  /* raise up inflight_hi */
149663 +               }
149665 +               if (bbr->mode == BBR_PROBE_BW &&
149666 +                   bbr->cycle_idx == BBR_BW_PROBE_UP)
149667 +                       bbr2_probe_inflight_hi_upward(sk, rs);
149668 +       }
149670 +       return false;
149673 +/* Check if it's time to probe for bandwidth now, and if so, kick it off. */
149674 +static bool bbr2_check_time_to_probe_bw(struct sock *sk)
149676 +       struct bbr *bbr = inet_csk_ca(sk);
149677 +       u32 n;
149679 +       /* If we seem to be at an operating point where we are not seeing loss
149680 +        * but we are seeing ECN marks, then when the ECN marks cease we reprobe
149681 +        * quickly (in case a burst of cross-traffic has ceased and freed up bw,
149682 +        * or in case we are sharing with multiplicatively probing traffic).
149683 +        */
149684 +       if (bbr->params.ecn_reprobe_gain && bbr->ecn_eligible &&
149685 +           bbr->ecn_in_cycle && !bbr->loss_in_cycle &&
149686 +           inet_csk(sk)->icsk_ca_state == TCP_CA_Open) {
149687 +               bbr->debug.event = 'A';  /* *A*ll clear to probe *A*gain */
149688 +               /* Calculate n so that when bbr2_raise_inflight_hi_slope()
149689 +                * computes growth_this_round as 2^n it will be roughly the
149690 +                * desired volume of data (inflight_hi*ecn_reprobe_gain).
149691 +                */
149692 +               n = ilog2((((u64)bbr->inflight_hi *
149693 +                           bbr->params.ecn_reprobe_gain) >> BBR_SCALE));
149694 +               bbr2_start_bw_probe_refill(sk, n);
149695 +               return true;
149696 +       }
149698 +       if (bbr2_has_elapsed_in_phase(sk, bbr->probe_wait_us) ||
149699 +           bbr2_is_reno_coexistence_probe_time(sk)) {
149700 +               bbr2_start_bw_probe_refill(sk, 0);
149701 +               return true;
149702 +       }
149703 +       return false;
149706 +/* Is it time to transition from PROBE_DOWN to PROBE_CRUISE? */
149707 +static bool bbr2_check_time_to_cruise(struct sock *sk, u32 inflight, u32 bw)
149709 +       struct bbr *bbr = inet_csk_ca(sk);
149710 +       bool is_under_bdp, is_long_enough;
149712 +       /* Always need to pull inflight down to leave headroom in queue. */
149713 +       if (inflight > bbr2_inflight_with_headroom(sk))
149714 +               return false;
149716 +       is_under_bdp = inflight <= bbr_inflight(sk, bw, BBR_UNIT);
149717 +       if (bbr->params.drain_to_target)
149718 +               return is_under_bdp;
149720 +       is_long_enough = bbr2_has_elapsed_in_phase(sk, bbr->min_rtt_us);
149721 +       return is_under_bdp || is_long_enough;
149724 +/* PROBE_BW state machine: cruise, refill, probe for bw, or drain? */
149725 +static void bbr2_update_cycle_phase(struct sock *sk,
149726 +                                   const struct rate_sample *rs)
149728 +       struct bbr *bbr = inet_csk_ca(sk);
149729 +       bool is_risky = false, is_queuing = false;
149730 +       u32 inflight, bw;
149732 +       if (!bbr_full_bw_reached(sk))
149733 +               return;
149735 +       /* In DRAIN, PROBE_BW, or PROBE_RTT, adjust upper bounds. */
149736 +       if (bbr2_adapt_upper_bounds(sk, rs))
149737 +               return;         /* already decided state transition */
149739 +       if (bbr->mode != BBR_PROBE_BW)
149740 +               return;
149742 +       inflight = bbr_packets_in_net_at_edt(sk, rs->prior_in_flight);
149743 +       bw = bbr_max_bw(sk);
149745 +       switch (bbr->cycle_idx) {
149746 +       /* First we spend most of our time cruising with a pacing_gain of 1.0,
149747 +        * which paces at the estimated bw, to try to fully use the pipe
149748 +        * without building queue. If we encounter loss/ECN marks, we adapt
149749 +        * by slowing down.
149750 +        */
149751 +       case BBR_BW_PROBE_CRUISE:
149752 +               if (bbr2_check_time_to_probe_bw(sk))
149753 +                       return;         /* already decided state transition */
149754 +               break;
149756 +       /* After cruising, when it's time to probe, we first "refill": we send
149757 +        * at the estimated bw to fill the pipe, before probing higher and
149758 +        * knowingly risking overflowing the bottleneck buffer (causing loss).
149759 +        */
149760 +       case BBR_BW_PROBE_REFILL:
149761 +               if (bbr->round_start) {
149762 +                       /* After one full round trip of sending in REFILL, we
149763 +                        * start to see bw samples reflecting our REFILL, which
149764 +                        * may be putting too much data in flight.
149765 +                        */
149766 +                       bbr->bw_probe_samples = 1;
149767 +                       bbr2_start_bw_probe_up(sk);
149768 +               }
149769 +               break;
149771 +       /* After we refill the pipe, we probe by using a pacing_gain > 1.0, to
149772 +        * probe for bw. If we have not seen loss/ECN, we try to raise inflight
149773 +        * to at least pacing_gain*BDP; note that this may take more than
149774 +        * min_rtt if min_rtt is small (e.g. on a LAN).
149775 +        *
149776 +        * We terminate PROBE_UP bandwidth probing upon any of the following:
149777 +        *
149778 +        * (1) We've pushed inflight up to hit the inflight_hi target set in the
149779 +        *     most recent previous bw probe phase. Thus we want to start
149780 +        *     draining the queue immediately because it's very likely the most
149781 +        *     recently sent packets will fill the queue and cause drops.
149782 +        *     (checked here)
149783 +        * (2) We have probed for at least 1*min_rtt_us, and the
149784 +        *     estimated queue is high enough (inflight > 1.25 * estimated_bdp).
149785 +        *     (checked here)
149786 +        * (3) Loss filter says loss rate is "too high".
149787 +        *     (checked in bbr_is_inflight_too_high())
149788 +        * (4) ECN filter says ECN mark rate is "too high".
149789 +        *     (checked in bbr_is_inflight_too_high())
149790 +        */
149791 +       case BBR_BW_PROBE_UP:
149792 +               if (bbr->prev_probe_too_high &&
149793 +                   inflight >= bbr->inflight_hi) {
149794 +                       bbr->stopped_risky_probe = 1;
149795 +                       is_risky = true;
149796 +                       bbr->debug.event = 'D';   /* D for danger */
149797 +               } else if (bbr2_has_elapsed_in_phase(sk, bbr->min_rtt_us) &&
149798 +                          inflight >=
149799 +                          bbr_inflight(sk, bw,
149800 +                                       bbr->params.bw_probe_pif_gain)) {
149801 +                       is_queuing = true;
149802 +                       bbr->debug.event = 'Q'; /* building Queue */
149803 +               }
149804 +               if (is_risky || is_queuing) {
149805 +                       bbr->prev_probe_too_high = 0;  /* no loss/ECN (yet) */
149806 +                       bbr2_start_bw_probe_down(sk);  /* restart w/ down */
149807 +               }
149808 +               break;
149810 +       /* After probing in PROBE_UP, we have usually accumulated some data in
149811 +        * the bottleneck buffer (if bw probing didn't find more bw). We next
149812 +        * enter PROBE_DOWN to try to drain any excess data from the queue. To
149813 +        * do this, we use a pacing_gain < 1.0. We hold this pacing gain until
149814 +        * our inflight is less then that target cruising point, which is the
149815 +        * minimum of (a) the amount needed to leave headroom, and (b) the
149816 +        * estimated BDP. Once inflight falls to match the target, we estimate
149817 +        * the queue is drained; persisting would underutilize the pipe.
149818 +        */
149819 +       case BBR_BW_PROBE_DOWN:
149820 +               if (bbr2_check_time_to_probe_bw(sk))
149821 +                       return;         /* already decided state transition */
149822 +               if (bbr2_check_time_to_cruise(sk, inflight, bw))
149823 +                       bbr2_start_bw_probe_cruise(sk);
149824 +               break;
149826 +       default:
149827 +               WARN_ONCE(1, "BBR invalid cycle index %u\n", bbr->cycle_idx);
149828 +       }
149831 +/* Exiting PROBE_RTT, so return to bandwidth probing in STARTUP or PROBE_BW. */
149832 +static void bbr2_exit_probe_rtt(struct sock *sk)
149834 +       struct bbr *bbr = inet_csk_ca(sk);
149836 +       bbr2_reset_lower_bounds(sk);
149837 +       if (bbr_full_bw_reached(sk)) {
149838 +               bbr->mode = BBR_PROBE_BW;
149839 +               /* Raising inflight after PROBE_RTT may cause loss, so reset
149840 +                * the PROBE_BW clock and schedule the next bandwidth probe for
149841 +                * a friendly and randomized future point in time.
149842 +                */
149843 +               bbr2_start_bw_probe_down(sk);
149844 +               /* Since we are exiting PROBE_RTT, we know inflight is
149845 +                * below our estimated BDP, so it is reasonable to cruise.
149846 +                */
149847 +               bbr2_start_bw_probe_cruise(sk);
149848 +       } else {
149849 +               bbr->mode = BBR_STARTUP;
149850 +       }
149853 +/* Exit STARTUP based on loss rate > 1% and loss gaps in round >= N. Wait until
149854 + * the end of the round in recovery to get a good estimate of how many packets
149855 + * have been lost, and how many we need to drain with a low pacing rate.
149856 + */
149857 +static void bbr2_check_loss_too_high_in_startup(struct sock *sk,
149858 +                                              const struct rate_sample *rs)
149860 +       struct bbr *bbr = inet_csk_ca(sk);
149862 +       if (bbr_full_bw_reached(sk))
149863 +               return;
149865 +       /* For STARTUP exit, check the loss rate at the end of each round trip
149866 +        * of Recovery episodes in STARTUP. We check the loss rate at the end
149867 +        * of the round trip to filter out noisy/low loss and have a better
149868 +        * sense of inflight (extent of loss), so we can drain more accurately.
149869 +        */
149870 +       if (rs->losses && bbr->loss_events_in_round < 0xf)
149871 +               bbr->loss_events_in_round++;  /* update saturating counter */
149872 +       if (bbr->params.full_loss_cnt && bbr->loss_round_start &&
149873 +           inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery &&
149874 +           bbr->loss_events_in_round >= bbr->params.full_loss_cnt &&
149875 +           bbr2_is_inflight_too_high(sk, rs)) {
149876 +               bbr->debug.event = 'P';  /* Packet loss caused STARTUP exit */
149877 +               bbr2_handle_queue_too_high_in_startup(sk);
149878 +               return;
149879 +       }
149880 +       if (bbr->loss_round_start)
149881 +               bbr->loss_events_in_round = 0;
149884 +/* If we are done draining, advance into steady state operation in PROBE_BW. */
149885 +static void bbr2_check_drain(struct sock *sk, const struct rate_sample *rs,
149886 +                            struct bbr_context *ctx)
149888 +       struct bbr *bbr = inet_csk_ca(sk);
149890 +       if (bbr_check_drain(sk, rs, ctx)) {
149891 +               bbr->mode = BBR_PROBE_BW;
149892 +               bbr2_start_bw_probe_down(sk);
149893 +       }
149896 +static void bbr2_update_model(struct sock *sk, const struct rate_sample *rs,
149897 +                             struct bbr_context *ctx)
149899 +       bbr2_update_congestion_signals(sk, rs, ctx);
149900 +       bbr_update_ack_aggregation(sk, rs);
149901 +       bbr2_check_loss_too_high_in_startup(sk, rs);
149902 +       bbr_check_full_bw_reached(sk, rs);
149903 +       bbr2_check_drain(sk, rs, ctx);
149904 +       bbr2_update_cycle_phase(sk, rs);
149905 +       bbr_update_min_rtt(sk, rs);
149908 +/* Fast path for app-limited case.
149910 + * On each ack, we execute bbr state machine, which primarily consists of:
149911 + * 1) update model based on new rate sample, and
149912 + * 2) update control based on updated model or state change.
149914 + * There are certain workload/scenarios, e.g. app-limited case, where
149915 + * either we can skip updating model or we can skip update of both model
149916 + * as well as control. This provides signifcant softirq cpu savings for
149917 + * processing incoming acks.
149919 + * In case of app-limited, if there is no congestion (loss/ecn) and
149920 + * if observed bw sample is less than current estimated bw, then we can
149921 + * skip some of the computation in bbr state processing:
149923 + * - if there is no rtt/mode/phase change: In this case, since all the
149924 + *   parameters of the network model are constant, we can skip model
149925 + *   as well control update.
149927 + * - else we can skip rest of the model update. But we still need to
149928 + *   update the control to account for the new rtt/mode/phase.
149930 + * Returns whether we can take fast path or not.
149931 + */
149932 +static bool bbr2_fast_path(struct sock *sk, bool *update_model,
149933 +               const struct rate_sample *rs, struct bbr_context *ctx)
149935 +       struct bbr *bbr = inet_csk_ca(sk);
149936 +       u32 prev_min_rtt_us, prev_mode;
149938 +       if (bbr->params.fast_path && bbr->try_fast_path &&
149939 +           rs->is_app_limited && ctx->sample_bw < bbr_max_bw(sk) &&
149940 +           !bbr->loss_in_round && !bbr->ecn_in_round) {
149941 +               prev_mode = bbr->mode;
149942 +               prev_min_rtt_us = bbr->min_rtt_us;
149943 +               bbr2_check_drain(sk, rs, ctx);
149944 +               bbr2_update_cycle_phase(sk, rs);
149945 +               bbr_update_min_rtt(sk, rs);
149947 +               if (bbr->mode == prev_mode &&
149948 +                   bbr->min_rtt_us == prev_min_rtt_us &&
149949 +                   bbr->try_fast_path)
149950 +                       return true;
149952 +               /* Skip model update, but control still needs to be updated */
149953 +               *update_model = false;
149954 +       }
149955 +       return false;
149958 +static void bbr2_main(struct sock *sk, const struct rate_sample *rs)
149960 +       struct tcp_sock *tp = tcp_sk(sk);
149961 +       struct bbr *bbr = inet_csk_ca(sk);
149962 +       struct bbr_context ctx = { 0 };
149963 +       bool update_model = true;
149964 +       u32 bw;
149966 +       bbr->debug.event = '.';  /* init to default NOP (no event yet) */
149968 +       bbr_update_round_start(sk, rs, &ctx);
149969 +       if (bbr->round_start) {
149970 +               bbr->rounds_since_probe =
149971 +                       min_t(s32, bbr->rounds_since_probe + 1, 0xFF);
149972 +               bbr2_update_ecn_alpha(sk);
149973 +       }
149975 +       bbr->ecn_in_round  |= rs->is_ece;
149976 +       bbr_calculate_bw_sample(sk, rs, &ctx);
149978 +       if (bbr2_fast_path(sk, &update_model, rs, &ctx))
149979 +               goto out;
149981 +       if (update_model)
149982 +               bbr2_update_model(sk, rs, &ctx);
149984 +       bbr_update_gains(sk);
149985 +       bw = bbr_bw(sk);
149986 +       bbr_set_pacing_rate(sk, bw, bbr->pacing_gain);
149987 +       bbr_set_cwnd(sk, rs, rs->acked_sacked, bw, bbr->cwnd_gain,
149988 +                    tp->snd_cwnd, &ctx);
149989 +       bbr2_bound_cwnd_for_inflight_model(sk);
149991 +out:
149992 +       bbr->prev_ca_state = inet_csk(sk)->icsk_ca_state;
149993 +       bbr->loss_in_cycle |= rs->lost > 0;
149994 +       bbr->ecn_in_cycle  |= rs->delivered_ce > 0;
149996 +       bbr_debug(sk, rs->acked_sacked, rs, &ctx);
149999 +/* Module parameters that are settable by TCP_CONGESTION_PARAMS are declared
150000 + * down here, so that the algorithm functions that use the parameters must use
150001 + * the per-socket parameters; if they accidentally use the global version
150002 + * then there will be a compile error.
150003 + * TODO(ncardwell): move all per-socket parameters down to this section.
150004 + */
150006 +/* On losses, scale down inflight and pacing rate by beta scaled by BBR_SCALE.
150007 + * No loss response when 0. Max allwed value is 255.
150008 + */
150009 +static u32 bbr_beta = BBR_UNIT * 30 / 100;
150011 +/* Gain factor for ECN mark ratio samples, scaled by BBR_SCALE.
150012 + * Max allowed value is 255.
150013 + */
150014 +static u32 bbr_ecn_alpha_gain = BBR_UNIT * 1 / 16;  /* 1/16 = 6.25% */
150016 +/* The initial value for the ecn_alpha state variable. Default and max
150017 + * BBR_UNIT (256), representing 1.0. This allows a flow to respond quickly
150018 + * to congestion if the bottleneck is congested when the flow starts up.
150019 + */
150020 +static u32 bbr_ecn_alpha_init = BBR_UNIT;      /* 1.0, to respond quickly */
150022 +/* On ECN, cut inflight_lo to (1 - ecn_factor * ecn_alpha) scaled by BBR_SCALE.
150023 + * No ECN based bounding when 0. Max allwed value is 255.
150024 + */
150025 +static u32 bbr_ecn_factor = BBR_UNIT * 1 / 3;      /* 1/3 = 33% */
150027 +/* Estimate bw probing has gone too far if CE ratio exceeds this threshold.
150028 + * Scaled by BBR_SCALE. Disabled when 0. Max allowed is 255.
150029 + */
150030 +static u32 bbr_ecn_thresh = BBR_UNIT * 1 / 2;  /* 1/2 = 50% */
150032 +/* Max RTT (in usec) at which to use sender-side ECN logic.
150033 + * Disabled when 0 (ECN allowed at any RTT).
150034 + * Max allowed for the parameter is 524287 (0x7ffff) us, ~524 ms.
150035 + */
150036 +static u32 bbr_ecn_max_rtt_us = 5000;
150038 +/* If non-zero, if in a cycle with no losses but some ECN marks, after ECN
150039 + * clears then use a multiplicative increase to quickly reprobe bw by
150040 + * starting inflight probing at the given multiple of inflight_hi.
150041 + * Default for this experimental knob is 0 (disabled).
150042 + * Planned value for experiments: BBR_UNIT * 1 / 2 = 128, representing 0.5.
150043 + */
150044 +static u32 bbr_ecn_reprobe_gain;
150046 +/* Estimate bw probing has gone too far if loss rate exceeds this level. */
150047 +static u32 bbr_loss_thresh = BBR_UNIT * 2 / 100;  /* 2% loss */
150049 +/* Exit STARTUP if number of loss marking events in a Recovery round is >= N,
150050 + * and loss rate is higher than bbr_loss_thresh.
150051 + * Disabled if 0. Max allowed value is 15 (0xF).
150052 + */
150053 +static u32 bbr_full_loss_cnt = 8;
150055 +/* Exit STARTUP if number of round trips with ECN mark rate above ecn_thresh
150056 + * meets this count. Max allowed value is 3.
150057 + */
150058 +static u32 bbr_full_ecn_cnt = 2;
150060 +/* Fraction of unutilized headroom to try to leave in path upon high loss. */
150061 +static u32 bbr_inflight_headroom = BBR_UNIT * 15 / 100;
150063 +/* Multiplier to get target inflight (as multiple of BDP) for PROBE_UP phase.
150064 + * Default is 1.25x, as in BBR v1. Max allowed is 511.
150065 + */
150066 +static u32 bbr_bw_probe_pif_gain = BBR_UNIT * 5 / 4;
150068 +/* Multiplier to get Reno-style probe epoch duration as: k * BDP round trips.
150069 + * If zero, disables this BBR v2 Reno-style BDP-scaled coexistence mechanism.
150070 + * Max allowed is 511.
150071 + */
150072 +static u32 bbr_bw_probe_reno_gain = BBR_UNIT;
150074 +/* Max number of packet-timed rounds to wait before probing for bandwidth.  If
150075 + * we want to tolerate 1% random loss per round, and not have this cut our
150076 + * inflight too much, we must probe for bw periodically on roughly this scale.
150077 + * If low, limits Reno/CUBIC coexistence; if high, limits loss tolerance.
150078 + * We aim to be fair with Reno/CUBIC up to a BDP of at least:
150079 + *  BDP = 25Mbps * .030sec /(1514bytes) = 61.9 packets
150080 + */
150081 +static u32 bbr_bw_probe_max_rounds = 63;
150083 +/* Max amount of randomness to inject in round counting for Reno-coexistence.
150084 + * Max value is 15.
150085 + */
150086 +static u32 bbr_bw_probe_rand_rounds = 2;
150088 +/* Use BBR-native probe time scale starting at this many usec.
150089 + * We aim to be fair with Reno/CUBIC up to an inter-loss time epoch of at least:
150090 + *  BDP*RTT = 25Mbps * .030sec /(1514bytes) * 0.030sec = 1.9 secs
150091 + */
150092 +static u32 bbr_bw_probe_base_us = 2 * USEC_PER_SEC;  /* 2 secs */
150094 +/* Use BBR-native probes spread over this many usec: */
150095 +static u32 bbr_bw_probe_rand_us = 1 * USEC_PER_SEC;  /* 1 secs */
150097 +/* Undo the model changes made in loss recovery if recovery was spurious? */
150098 +static bool bbr_undo = true;
150100 +/* Use fast path if app-limited, no loss/ECN, and target cwnd was reached? */
150101 +static bool bbr_fast_path = true;      /* default: enabled */
150103 +/* Use fast ack mode ? */
150104 +static int bbr_fast_ack_mode = 1;      /* default: rwnd check off */
150106 +/* How much to additively increase inflight_hi when entering REFILL? */
150107 +static u32 bbr_refill_add_inc;         /* default: disabled */
150109 +module_param_named(beta,                 bbr_beta,                 uint, 0644);
150110 +module_param_named(ecn_alpha_gain,       bbr_ecn_alpha_gain,       uint, 0644);
150111 +module_param_named(ecn_alpha_init,       bbr_ecn_alpha_init,       uint, 0644);
150112 +module_param_named(ecn_factor,           bbr_ecn_factor,           uint, 0644);
150113 +module_param_named(ecn_thresh,           bbr_ecn_thresh,           uint, 0644);
150114 +module_param_named(ecn_max_rtt_us,       bbr_ecn_max_rtt_us,       uint, 0644);
150115 +module_param_named(ecn_reprobe_gain,     bbr_ecn_reprobe_gain,     uint, 0644);
150116 +module_param_named(loss_thresh,          bbr_loss_thresh,          uint, 0664);
150117 +module_param_named(full_loss_cnt,        bbr_full_loss_cnt,        uint, 0664);
150118 +module_param_named(full_ecn_cnt,         bbr_full_ecn_cnt,         uint, 0664);
150119 +module_param_named(inflight_headroom,    bbr_inflight_headroom,    uint, 0664);
150120 +module_param_named(bw_probe_pif_gain,    bbr_bw_probe_pif_gain,    uint, 0664);
150121 +module_param_named(bw_probe_reno_gain,   bbr_bw_probe_reno_gain,   uint, 0664);
150122 +module_param_named(bw_probe_max_rounds,  bbr_bw_probe_max_rounds,  uint, 0664);
150123 +module_param_named(bw_probe_rand_rounds, bbr_bw_probe_rand_rounds, uint, 0664);
150124 +module_param_named(bw_probe_base_us,     bbr_bw_probe_base_us,     uint, 0664);
150125 +module_param_named(bw_probe_rand_us,     bbr_bw_probe_rand_us,     uint, 0664);
150126 +module_param_named(undo,                 bbr_undo,                 bool, 0664);
150127 +module_param_named(fast_path,           bbr_fast_path,            bool, 0664);
150128 +module_param_named(fast_ack_mode,       bbr_fast_ack_mode,        uint, 0664);
150129 +module_param_named(refill_add_inc,       bbr_refill_add_inc,       uint, 0664);
150131 +static void bbr2_init(struct sock *sk)
150133 +       struct tcp_sock *tp = tcp_sk(sk);
150134 +       struct bbr *bbr = inet_csk_ca(sk);
150136 +       bbr_init(sk);   /* run shared init code for v1 and v2 */
150138 +       /* BBR v2 parameters: */
150139 +       bbr->params.beta = min_t(u32, 0xFFU, bbr_beta);
150140 +       bbr->params.ecn_alpha_gain = min_t(u32, 0xFFU, bbr_ecn_alpha_gain);
150141 +       bbr->params.ecn_alpha_init = min_t(u32, BBR_UNIT, bbr_ecn_alpha_init);
150142 +       bbr->params.ecn_factor = min_t(u32, 0xFFU, bbr_ecn_factor);
150143 +       bbr->params.ecn_thresh = min_t(u32, 0xFFU, bbr_ecn_thresh);
150144 +       bbr->params.ecn_max_rtt_us = min_t(u32, 0x7ffffU, bbr_ecn_max_rtt_us);
150145 +       bbr->params.ecn_reprobe_gain = min_t(u32, 0x1FF, bbr_ecn_reprobe_gain);
150146 +       bbr->params.loss_thresh = min_t(u32, 0xFFU, bbr_loss_thresh);
150147 +       bbr->params.full_loss_cnt = min_t(u32, 0xFU, bbr_full_loss_cnt);
150148 +       bbr->params.full_ecn_cnt = min_t(u32, 0x3U, bbr_full_ecn_cnt);
150149 +       bbr->params.inflight_headroom =
150150 +               min_t(u32, 0xFFU, bbr_inflight_headroom);
150151 +       bbr->params.bw_probe_pif_gain =
150152 +               min_t(u32, 0x1FFU, bbr_bw_probe_pif_gain);
150153 +       bbr->params.bw_probe_reno_gain =
150154 +               min_t(u32, 0x1FFU, bbr_bw_probe_reno_gain);
150155 +       bbr->params.bw_probe_max_rounds =
150156 +               min_t(u32, 0xFFU, bbr_bw_probe_max_rounds);
150157 +       bbr->params.bw_probe_rand_rounds =
150158 +               min_t(u32, 0xFU, bbr_bw_probe_rand_rounds);
150159 +       bbr->params.bw_probe_base_us =
150160 +               min_t(u32, (1 << 26) - 1, bbr_bw_probe_base_us);
150161 +       bbr->params.bw_probe_rand_us =
150162 +               min_t(u32, (1 << 26) - 1, bbr_bw_probe_rand_us);
150163 +       bbr->params.undo = bbr_undo;
150164 +       bbr->params.fast_path = bbr_fast_path ? 1 : 0;
150165 +       bbr->params.refill_add_inc = min_t(u32, 0x3U, bbr_refill_add_inc);
150167 +       /* BBR v2 state: */
150168 +       bbr->initialized = 1;
150169 +       /* Start sampling ECN mark rate after first full flight is ACKed: */
150170 +       bbr->loss_round_delivered = tp->delivered + 1;
150171 +       bbr->loss_round_start = 0;
150172 +       bbr->undo_bw_lo = 0;
150173 +       bbr->undo_inflight_lo = 0;
150174 +       bbr->undo_inflight_hi = 0;
150175 +       bbr->loss_events_in_round = 0;
150176 +       bbr->startup_ecn_rounds = 0;
150177 +       bbr2_reset_congestion_signals(sk);
150178 +       bbr->bw_lo = ~0U;
150179 +       bbr->bw_hi[0] = 0;
150180 +       bbr->bw_hi[1] = 0;
150181 +       bbr->inflight_lo = ~0U;
150182 +       bbr->inflight_hi = ~0U;
150183 +       bbr->bw_probe_up_cnt = ~0U;
150184 +       bbr->bw_probe_up_acks = 0;
150185 +       bbr->bw_probe_up_rounds = 0;
150186 +       bbr->probe_wait_us = 0;
150187 +       bbr->stopped_risky_probe = 0;
150188 +       bbr->ack_phase = BBR_ACKS_INIT;
150189 +       bbr->rounds_since_probe = 0;
150190 +       bbr->bw_probe_samples = 0;
150191 +       bbr->prev_probe_too_high = 0;
150192 +       bbr->ecn_eligible = 0;
150193 +       bbr->ecn_alpha = bbr->params.ecn_alpha_init;
150194 +       bbr->alpha_last_delivered = 0;
150195 +       bbr->alpha_last_delivered_ce = 0;
150197 +       tp->fast_ack_mode = min_t(u32, 0x2U, bbr_fast_ack_mode);
150200 +/* Core TCP stack informs us that the given skb was just marked lost. */
150201 +static void bbr2_skb_marked_lost(struct sock *sk, const struct sk_buff *skb)
150203 +       struct tcp_sock *tp = tcp_sk(sk);
150204 +       struct bbr *bbr = inet_csk_ca(sk);
150205 +       struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
150206 +       struct rate_sample rs;
150208 +       /* Capture "current" data over the full round trip of loss,
150209 +        * to have a better chance to see the full capacity of the path.
150210 +       */
150211 +       if (!bbr->loss_in_round)  /* first loss in this round trip? */
150212 +               bbr->loss_round_delivered = tp->delivered;  /* set round trip */
150213 +       bbr->loss_in_round = 1;
150214 +       bbr->loss_in_cycle = 1;
150216 +       if (!bbr->bw_probe_samples)
150217 +               return;  /* not an skb sent while probing for bandwidth */
150218 +       if (unlikely(!scb->tx.delivered_mstamp))
150219 +               return;  /* skb was SACKed, reneged, marked lost; ignore it */
150220 +       /* We are probing for bandwidth. Construct a rate sample that
150221 +        * estimates what happened in the flight leading up to this lost skb,
150222 +        * then see if the loss rate went too high, and if so at which packet.
150223 +        */
150224 +       memset(&rs, 0, sizeof(rs));
150225 +       rs.tx_in_flight = scb->tx.in_flight;
150226 +       rs.lost = tp->lost - scb->tx.lost;
150227 +       rs.is_app_limited = scb->tx.is_app_limited;
150228 +       if (bbr2_is_inflight_too_high(sk, &rs)) {
150229 +               rs.tx_in_flight = bbr2_inflight_hi_from_lost_skb(sk, &rs, skb);
150230 +               bbr2_handle_inflight_too_high(sk, &rs);
150231 +       }
150234 +/* Revert short-term model if current loss recovery event was spurious. */
150235 +static u32 bbr2_undo_cwnd(struct sock *sk)
150237 +       struct tcp_sock *tp = tcp_sk(sk);
150238 +       struct bbr *bbr = inet_csk_ca(sk);
150240 +       bbr->debug.undo = 1;
150241 +       bbr->full_bw = 0;   /* spurious slow-down; reset full pipe detection */
150242 +       bbr->full_bw_cnt = 0;
150243 +       bbr->loss_in_round = 0;
150245 +       if (!bbr->params.undo)
150246 +               return tp->snd_cwnd;
150248 +       /* Revert to cwnd and other state saved before loss episode. */
150249 +       bbr->bw_lo = max(bbr->bw_lo, bbr->undo_bw_lo);
150250 +       bbr->inflight_lo = max(bbr->inflight_lo, bbr->undo_inflight_lo);
150251 +       bbr->inflight_hi = max(bbr->inflight_hi, bbr->undo_inflight_hi);
150252 +       return bbr->prior_cwnd;
150255 +/* Entering loss recovery, so save state for when we undo recovery. */
150256 +static u32 bbr2_ssthresh(struct sock *sk)
150258 +       struct bbr *bbr = inet_csk_ca(sk);
150260 +       bbr_save_cwnd(sk);
150261 +       /* For undo, save state that adapts based on loss signal. */
150262 +       bbr->undo_bw_lo         = bbr->bw_lo;
150263 +       bbr->undo_inflight_lo   = bbr->inflight_lo;
150264 +       bbr->undo_inflight_hi   = bbr->inflight_hi;
150265 +       return tcp_sk(sk)->snd_ssthresh;
150268 +static enum tcp_bbr2_phase bbr2_get_phase(struct bbr *bbr)
150270 +       switch (bbr->mode) {
150271 +       case BBR_STARTUP:
150272 +               return BBR2_PHASE_STARTUP;
150273 +       case BBR_DRAIN:
150274 +               return BBR2_PHASE_DRAIN;
150275 +       case BBR_PROBE_BW:
150276 +               break;
150277 +       case BBR_PROBE_RTT:
150278 +               return BBR2_PHASE_PROBE_RTT;
150279 +       default:
150280 +               return BBR2_PHASE_INVALID;
150281 +       }
150282 +       switch (bbr->cycle_idx) {
150283 +       case BBR_BW_PROBE_UP:
150284 +               return BBR2_PHASE_PROBE_BW_UP;
150285 +       case BBR_BW_PROBE_DOWN:
150286 +               return BBR2_PHASE_PROBE_BW_DOWN;
150287 +       case BBR_BW_PROBE_CRUISE:
150288 +               return BBR2_PHASE_PROBE_BW_CRUISE;
150289 +       case BBR_BW_PROBE_REFILL:
150290 +               return BBR2_PHASE_PROBE_BW_REFILL;
150291 +       default:
150292 +               return BBR2_PHASE_INVALID;
150293 +       }
150296 +static size_t bbr2_get_info(struct sock *sk, u32 ext, int *attr,
150297 +                           union tcp_cc_info *info)
150299 +       if (ext & (1 << (INET_DIAG_BBRINFO - 1)) ||
150300 +           ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
150301 +               struct bbr *bbr = inet_csk_ca(sk);
150302 +               u64 bw = bbr_bw_bytes_per_sec(sk, bbr_bw(sk));
150303 +               u64 bw_hi = bbr_bw_bytes_per_sec(sk, bbr_max_bw(sk));
150304 +               u64 bw_lo = bbr->bw_lo == ~0U ?
150305 +                       ~0ULL : bbr_bw_bytes_per_sec(sk, bbr->bw_lo);
150307 +               memset(&info->bbr2, 0, sizeof(info->bbr2));
150308 +               info->bbr2.bbr_bw_lsb           = (u32)bw;
150309 +               info->bbr2.bbr_bw_msb           = (u32)(bw >> 32);
150310 +               info->bbr2.bbr_min_rtt          = bbr->min_rtt_us;
150311 +               info->bbr2.bbr_pacing_gain      = bbr->pacing_gain;
150312 +               info->bbr2.bbr_cwnd_gain        = bbr->cwnd_gain;
150313 +               info->bbr2.bbr_bw_hi_lsb        = (u32)bw_hi;
150314 +               info->bbr2.bbr_bw_hi_msb        = (u32)(bw_hi >> 32);
150315 +               info->bbr2.bbr_bw_lo_lsb        = (u32)bw_lo;
150316 +               info->bbr2.bbr_bw_lo_msb        = (u32)(bw_lo >> 32);
150317 +               info->bbr2.bbr_mode             = bbr->mode;
150318 +               info->bbr2.bbr_phase            = (__u8)bbr2_get_phase(bbr);
150319 +               info->bbr2.bbr_version          = (__u8)2;
150320 +               info->bbr2.bbr_inflight_lo      = bbr->inflight_lo;
150321 +               info->bbr2.bbr_inflight_hi      = bbr->inflight_hi;
150322 +               info->bbr2.bbr_extra_acked      = bbr_extra_acked(sk);
150323 +               *attr = INET_DIAG_BBRINFO;
150324 +               return sizeof(info->bbr2);
150325 +       }
150326 +       return 0;
150329 +static void bbr2_set_state(struct sock *sk, u8 new_state)
150331 +       struct tcp_sock *tp = tcp_sk(sk);
150332 +       struct bbr *bbr = inet_csk_ca(sk);
150334 +       if (new_state == TCP_CA_Loss) {
150335 +               struct rate_sample rs = { .losses = 1 };
150336 +               struct bbr_context ctx = { 0 };
150338 +               bbr->prev_ca_state = TCP_CA_Loss;
150339 +               bbr->full_bw = 0;
150340 +               if (!bbr2_is_probing_bandwidth(sk) && bbr->inflight_lo == ~0U) {
150341 +                       /* bbr_adapt_lower_bounds() needs cwnd before
150342 +                        * we suffered an RTO, to update inflight_lo:
150343 +                        */
150344 +                       bbr->inflight_lo =
150345 +                               max(tp->snd_cwnd, bbr->prior_cwnd);
150346 +               }
150347 +               bbr_debug(sk, 0, &rs, &ctx);
150348 +       } else if (bbr->prev_ca_state == TCP_CA_Loss &&
150349 +                  new_state != TCP_CA_Loss) {
150350 +               tp->snd_cwnd = max(tp->snd_cwnd, bbr->prior_cwnd);
150351 +               bbr->try_fast_path = 0; /* bound cwnd using latest model */
150352 +       }
150355 +static struct tcp_congestion_ops tcp_bbr2_cong_ops __read_mostly = {
150356 +       .flags          = TCP_CONG_NON_RESTRICTED | TCP_CONG_WANTS_CE_EVENTS,
150357 +       .name           = "bbr2",
150358 +       .owner          = THIS_MODULE,
150359 +       .init           = bbr2_init,
150360 +       .cong_control   = bbr2_main,
150361 +       .sndbuf_expand  = bbr_sndbuf_expand,
150362 +       .skb_marked_lost = bbr2_skb_marked_lost,
150363 +       .undo_cwnd      = bbr2_undo_cwnd,
150364 +       .cwnd_event     = bbr_cwnd_event,
150365 +       .ssthresh       = bbr2_ssthresh,
150366 +       .tso_segs       = bbr_tso_segs,
150367 +       .get_info       = bbr2_get_info,
150368 +       .set_state      = bbr2_set_state,
150371 +static int __init bbr_register(void)
150373 +       BUILD_BUG_ON(sizeof(struct bbr) > ICSK_CA_PRIV_SIZE);
150374 +       return tcp_register_congestion_control(&tcp_bbr2_cong_ops);
150377 +static void __exit bbr_unregister(void)
150379 +       tcp_unregister_congestion_control(&tcp_bbr2_cong_ops);
150382 +module_init(bbr_register);
150383 +module_exit(bbr_unregister);
150385 +MODULE_AUTHOR("Van Jacobson <vanj@google.com>");
150386 +MODULE_AUTHOR("Neal Cardwell <ncardwell@google.com>");
150387 +MODULE_AUTHOR("Yuchung Cheng <ycheng@google.com>");
150388 +MODULE_AUTHOR("Soheil Hassas Yeganeh <soheil@google.com>");
150389 +MODULE_AUTHOR("Priyaranjan Jha <priyarjha@google.com>");
150390 +MODULE_AUTHOR("Yousuk Seung <ysseung@google.com>");
150391 +MODULE_AUTHOR("Kevin Yang <yyd@google.com>");
150392 +MODULE_AUTHOR("Arjun Roy <arjunroy@google.com>");
150394 +MODULE_LICENSE("Dual BSD/GPL");
150395 +MODULE_DESCRIPTION("TCP BBR (Bottleneck Bandwidth and RTT)");
150396 diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
150397 index 563d016e7478..153ed9010c0c 100644
150398 --- a/net/ipv4/tcp_cong.c
150399 +++ b/net/ipv4/tcp_cong.c
150400 @@ -179,6 +179,7 @@ void tcp_init_congestion_control(struct sock *sk)
150401         struct inet_connection_sock *icsk = inet_csk(sk);
150403         tcp_sk(sk)->prior_ssthresh = 0;
150404 +       tcp_sk(sk)->fast_ack_mode = 0;
150405         if (icsk->icsk_ca_ops->init)
150406                 icsk->icsk_ca_ops->init(sk);
150407         if (tcp_ca_needs_ecn(sk))
150408 @@ -230,6 +231,10 @@ int tcp_set_default_congestion_control(struct net *net, const char *name)
150409                 ret = -ENOENT;
150410         } else if (!bpf_try_module_get(ca, ca->owner)) {
150411                 ret = -EBUSY;
150412 +       } else if (!net_eq(net, &init_net) &&
150413 +                       !(ca->flags & TCP_CONG_NON_RESTRICTED)) {
150414 +               /* Only init netns can set default to a restricted algorithm */
150415 +               ret = -EPERM;
150416         } else {
150417                 prev = xchg(&net->ipv4.tcp_congestion_control, ca);
150418                 if (prev)
150419 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
150420 index 69a545db80d2..45aaba87ce8e 100644
150421 --- a/net/ipv4/tcp_input.c
150422 +++ b/net/ipv4/tcp_input.c
150423 @@ -348,7 +348,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
150424                         tcp_enter_quickack_mode(sk, 2);
150425                 break;
150426         case INET_ECN_CE:
150427 -               if (tcp_ca_needs_ecn(sk))
150428 +               if (tcp_ca_wants_ce_events(sk))
150429                         tcp_ca_event(sk, CA_EVENT_ECN_IS_CE);
150431                 if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
150432 @@ -359,7 +359,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
150433                 tp->ecn_flags |= TCP_ECN_SEEN;
150434                 break;
150435         default:
150436 -               if (tcp_ca_needs_ecn(sk))
150437 +               if (tcp_ca_wants_ce_events(sk))
150438                         tcp_ca_event(sk, CA_EVENT_ECN_NO_CE);
150439                 tp->ecn_flags |= TCP_ECN_SEEN;
150440                 break;
150441 @@ -1039,7 +1039,12 @@ static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
150442   */
150443  static void tcp_notify_skb_loss_event(struct tcp_sock *tp, const struct sk_buff *skb)
150445 +       struct sock *sk = (struct sock *)tp;
150446 +       const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
150448         tp->lost += tcp_skb_pcount(skb);
150449 +       if (ca_ops->skb_marked_lost)
150450 +               ca_ops->skb_marked_lost(sk, skb);
150453  void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
150454 @@ -1420,6 +1425,17 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
150455         WARN_ON_ONCE(tcp_skb_pcount(skb) < pcount);
150456         tcp_skb_pcount_add(skb, -pcount);
150458 +       /* Adjust tx.in_flight as pcount is shifted from skb to prev. */
150459 +       if (WARN_ONCE(TCP_SKB_CB(skb)->tx.in_flight < pcount,
150460 +                     "prev in_flight: %u skb in_flight: %u pcount: %u",
150461 +                     TCP_SKB_CB(prev)->tx.in_flight,
150462 +                     TCP_SKB_CB(skb)->tx.in_flight,
150463 +                     pcount))
150464 +               TCP_SKB_CB(skb)->tx.in_flight = 0;
150465 +       else
150466 +               TCP_SKB_CB(skb)->tx.in_flight -= pcount;
150467 +       TCP_SKB_CB(prev)->tx.in_flight += pcount;
150469         /* When we're adding to gso_segs == 1, gso_size will be zero,
150470          * in theory this shouldn't be necessary but as long as DSACK
150471          * code can come after this skb later on it's better to keep
150472 @@ -3182,7 +3198,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
150473         long seq_rtt_us = -1L;
150474         long ca_rtt_us = -1L;
150475         u32 pkts_acked = 0;
150476 -       u32 last_in_flight = 0;
150477         bool rtt_update;
150478         int flag = 0;
150480 @@ -3218,7 +3233,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
150481                         if (!first_ackt)
150482                                 first_ackt = last_ackt;
150484 -                       last_in_flight = TCP_SKB_CB(skb)->tx.in_flight;
150485                         if (before(start_seq, reord))
150486                                 reord = start_seq;
150487                         if (!after(scb->end_seq, tp->high_seq))
150488 @@ -3284,8 +3298,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
150489                 seq_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, first_ackt);
150490                 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, last_ackt);
150492 -               if (pkts_acked == 1 && last_in_flight < tp->mss_cache &&
150493 -                   last_in_flight && !prior_sacked && fully_acked &&
150494 +               if (pkts_acked == 1 && fully_acked && !prior_sacked &&
150495 +                   (tp->snd_una - prior_snd_una) < tp->mss_cache &&
150496                     sack->rate->prior_delivered + 1 == tp->delivered &&
150497                     !(flag & (FLAG_CA_ALERT | FLAG_SYN_ACKED))) {
150498                         /* Conservatively mark a delayed ACK. It's typically
150499 @@ -3342,9 +3356,10 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
150501         if (icsk->icsk_ca_ops->pkts_acked) {
150502                 struct ack_sample sample = { .pkts_acked = pkts_acked,
150503 -                                            .rtt_us = sack->rate->rtt_us,
150504 -                                            .in_flight = last_in_flight };
150505 +                                            .rtt_us = sack->rate->rtt_us };
150507 +               sample.in_flight = tp->mss_cache *
150508 +                       (tp->delivered - sack->rate->prior_delivered);
150509                 icsk->icsk_ca_ops->pkts_acked(sk, &sample);
150510         }
150512 @@ -3742,6 +3757,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
150514         prior_fack = tcp_is_sack(tp) ? tcp_highest_sack_seq(tp) : tp->snd_una;
150515         rs.prior_in_flight = tcp_packets_in_flight(tp);
150516 +       tcp_rate_check_app_limited(sk);
150518         /* ts_recent update must be made after we are sure that the packet
150519          * is in window.
150520 @@ -3839,6 +3855,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
150521         delivered = tcp_newly_delivered(sk, delivered, flag);
150522         lost = tp->lost - lost;                 /* freshly marked lost */
150523         rs.is_ack_delayed = !!(flag & FLAG_ACK_MAYBE_DELAYED);
150524 +       rs.is_ece = !!(flag & FLAG_ECE);
150525         tcp_rate_gen(sk, delivered, lost, is_sack_reneg, sack_state.rate);
150526         tcp_cong_control(sk, ack, delivered, flag, sack_state.rate);
150527         tcp_xmit_recovery(sk, rexmit);
150528 @@ -5399,13 +5416,14 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
150530             /* More than one full frame received... */
150531         if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss &&
150532 +            (tp->fast_ack_mode == 1 ||
150533              /* ... and right edge of window advances far enough.
150534               * (tcp_recvmsg() will send ACK otherwise).
150535               * If application uses SO_RCVLOWAT, we want send ack now if
150536               * we have not received enough bytes to satisfy the condition.
150537               */
150538 -           (tp->rcv_nxt - tp->copied_seq < sk->sk_rcvlowat ||
150539 -            __tcp_select_window(sk) >= tp->rcv_wnd)) ||
150540 +             (tp->rcv_nxt - tp->copied_seq < sk->sk_rcvlowat ||
150541 +              __tcp_select_window(sk) >= tp->rcv_wnd))) ||
150542             /* We ACK each frame or... */
150543             tcp_in_quickack_mode(sk) ||
150544             /* Protocol state mandates a one-time immediate ACK */
150545 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
150546 index fbf140a770d8..90d939375b29 100644
150547 --- a/net/ipv4/tcp_output.c
150548 +++ b/net/ipv4/tcp_output.c
150549 @@ -1256,8 +1256,6 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
150550         tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache);
150551         skb->skb_mstamp_ns = tp->tcp_wstamp_ns;
150552         if (clone_it) {
150553 -               TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
150554 -                       - tp->snd_una;
150555                 oskb = skb;
150557                 tcp_skb_tsorted_save(oskb) {
150558 @@ -1536,7 +1534,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
150560         struct tcp_sock *tp = tcp_sk(sk);
150561         struct sk_buff *buff;
150562 -       int nsize, old_factor;
150563 +       int nsize, old_factor, inflight_prev;
150564         long limit;
150565         int nlen;
150566         u8 flags;
150567 @@ -1615,6 +1613,15 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
150569                 if (diff)
150570                         tcp_adjust_pcount(sk, skb, diff);
150572 +               /* Set buff tx.in_flight as if buff were sent by itself. */
150573 +               inflight_prev = TCP_SKB_CB(skb)->tx.in_flight - old_factor;
150574 +               if (WARN_ONCE(inflight_prev < 0,
150575 +                             "inconsistent: tx.in_flight: %u old_factor: %d",
150576 +                             TCP_SKB_CB(skb)->tx.in_flight, old_factor))
150577 +                       inflight_prev = 0;
150578 +               TCP_SKB_CB(buff)->tx.in_flight = inflight_prev +
150579 +                                                tcp_skb_pcount(buff);
150580         }
150582         /* Link BUFF into the send queue. */
150583 @@ -1982,13 +1989,12 @@ static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
150584  static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
150586         const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
150587 -       u32 min_tso, tso_segs;
150589 -       min_tso = ca_ops->min_tso_segs ?
150590 -                       ca_ops->min_tso_segs(sk) :
150591 -                       sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs;
150592 +       u32 tso_segs;
150594 -       tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
150595 +       tso_segs = ca_ops->tso_segs ?
150596 +               ca_ops->tso_segs(sk, mss_now) :
150597 +               tcp_tso_autosize(sk, mss_now,
150598 +                                sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
150599         return min_t(u32, tso_segs, sk->sk_gso_max_segs);
150602 @@ -2628,6 +2634,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
150603                         skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache;
150604                         list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
150605                         tcp_init_tso_segs(skb, mss_now);
150606 +                       tcp_set_tx_in_flight(sk, skb);
150607                         goto repair; /* Skip network transmission */
150608                 }
150610 diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c
150611 index 0de693565963..796fa6e5310c 100644
150612 --- a/net/ipv4/tcp_rate.c
150613 +++ b/net/ipv4/tcp_rate.c
150614 @@ -34,6 +34,24 @@
150615   * ready to send in the write queue.
150616   */
150618 +void tcp_set_tx_in_flight(struct sock *sk, struct sk_buff *skb)
150620 +       struct tcp_sock *tp = tcp_sk(sk);
150621 +       u32 in_flight;
150623 +       /* Check, sanitize, and record packets in flight after skb was sent. */
150624 +       in_flight = tcp_packets_in_flight(tp) + tcp_skb_pcount(skb);
150625 +       if (WARN_ONCE(in_flight > TCPCB_IN_FLIGHT_MAX,
150626 +                     "insane in_flight %u cc %s mss %u "
150627 +                     "cwnd %u pif %u %u %u %u\n",
150628 +                     in_flight, inet_csk(sk)->icsk_ca_ops->name,
150629 +                     tp->mss_cache, tp->snd_cwnd,
150630 +                     tp->packets_out, tp->retrans_out,
150631 +                     tp->sacked_out, tp->lost_out))
150632 +               in_flight = TCPCB_IN_FLIGHT_MAX;
150633 +       TCP_SKB_CB(skb)->tx.in_flight = in_flight;
150636  /* Snapshot the current delivery information in the skb, to generate
150637   * a rate sample later when the skb is (s)acked in tcp_rate_skb_delivered().
150638   */
150639 @@ -65,7 +83,10 @@ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb)
150640         TCP_SKB_CB(skb)->tx.first_tx_mstamp     = tp->first_tx_mstamp;
150641         TCP_SKB_CB(skb)->tx.delivered_mstamp    = tp->delivered_mstamp;
150642         TCP_SKB_CB(skb)->tx.delivered           = tp->delivered;
150643 +       TCP_SKB_CB(skb)->tx.delivered_ce        = tp->delivered_ce;
150644 +       TCP_SKB_CB(skb)->tx.lost                = tp->lost;
150645         TCP_SKB_CB(skb)->tx.is_app_limited      = tp->app_limited ? 1 : 0;
150646 +       tcp_set_tx_in_flight(sk, skb);
150649  /* When an skb is sacked or acked, we fill in the rate sample with the (prior)
150650 @@ -86,16 +107,20 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
150652         if (!rs->prior_delivered ||
150653             after(scb->tx.delivered, rs->prior_delivered)) {
150654 +               rs->prior_lost       = scb->tx.lost;
150655 +               rs->prior_delivered_ce  = scb->tx.delivered_ce;
150656                 rs->prior_delivered  = scb->tx.delivered;
150657                 rs->prior_mstamp     = scb->tx.delivered_mstamp;
150658                 rs->is_app_limited   = scb->tx.is_app_limited;
150659                 rs->is_retrans       = scb->sacked & TCPCB_RETRANS;
150660 +               rs->tx_in_flight     = scb->tx.in_flight;
150662                 /* Record send time of most recently ACKed packet: */
150663                 tp->first_tx_mstamp  = tcp_skb_timestamp_us(skb);
150664                 /* Find the duration of the "send phase" of this window: */
150665 -               rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp,
150666 -                                                    scb->tx.first_tx_mstamp);
150667 +               rs->interval_us      = tcp_stamp32_us_delta(
150668 +                                               tp->first_tx_mstamp,
150669 +                                               scb->tx.first_tx_mstamp);
150671         }
150672         /* Mark off the skb delivered once it's sacked to avoid being
150673 @@ -137,6 +162,11 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
150674                 return;
150675         }
150676         rs->delivered   = tp->delivered - rs->prior_delivered;
150677 +       rs->lost        = tp->lost - rs->prior_lost;
150679 +       rs->delivered_ce = tp->delivered_ce - rs->prior_delivered_ce;
150680 +       /* delivered_ce occupies less than 32 bits in the skb control block */
150681 +       rs->delivered_ce &= TCPCB_DELIVERED_CE_MASK;
150683         /* Model sending data and receiving ACKs as separate pipeline phases
150684          * for a window. Usually the ACK phase is longer, but with ACK
150685 @@ -144,7 +174,7 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
150686          * longer phase.
150687          */
150688         snd_us = rs->interval_us;                               /* send phase */
150689 -       ack_us = tcp_stamp_us_delta(tp->tcp_mstamp,
150690 +       ack_us = tcp_stamp32_us_delta(tp->tcp_mstamp,
150691                                     rs->prior_mstamp); /* ack phase */
150692         rs->interval_us = max(snd_us, ack_us);
150694 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
150695 index 4ef08079ccfa..b5b24caa8ba0 100644
150696 --- a/net/ipv4/tcp_timer.c
150697 +++ b/net/ipv4/tcp_timer.c
150698 @@ -607,6 +607,7 @@ void tcp_write_timer_handler(struct sock *sk)
150699                 goto out;
150700         }
150702 +       tcp_rate_check_app_limited(sk);
150703         tcp_mstamp_refresh(tcp_sk(sk));
150704         event = icsk->icsk_pending;
150706 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
150707 index 99d743eb9dc4..c586a6bb8c6d 100644
150708 --- a/net/ipv4/udp.c
150709 +++ b/net/ipv4/udp.c
150710 @@ -2664,9 +2664,12 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
150712         case UDP_GRO:
150713                 lock_sock(sk);
150715 +               /* when enabling GRO, accept the related GSO packet type */
150716                 if (valbool)
150717                         udp_tunnel_encap_enable(sk->sk_socket);
150718                 up->gro_enabled = valbool;
150719 +               up->accept_udp_l4 = valbool;
150720                 release_sock(sk);
150721                 break;
150723 diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
150724 index c5b4b586570f..25134a3548e9 100644
150725 --- a/net/ipv4/udp_offload.c
150726 +++ b/net/ipv4/udp_offload.c
150727 @@ -515,21 +515,24 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
150728         unsigned int off = skb_gro_offset(skb);
150729         int flush = 1;
150731 +       /* we can do L4 aggregation only if the packet can't land in a tunnel
150732 +        * otherwise we could corrupt the inner stream
150733 +        */
150734         NAPI_GRO_CB(skb)->is_flist = 0;
150735 -       if (skb->dev->features & NETIF_F_GRO_FRAGLIST)
150736 -               NAPI_GRO_CB(skb)->is_flist = sk ? !udp_sk(sk)->gro_enabled: 1;
150737 +       if (!sk || !udp_sk(sk)->gro_receive) {
150738 +               if (skb->dev->features & NETIF_F_GRO_FRAGLIST)
150739 +                       NAPI_GRO_CB(skb)->is_flist = sk ? !udp_sk(sk)->gro_enabled : 1;
150741 -       if ((!sk && (skb->dev->features & NETIF_F_GRO_UDP_FWD)) ||
150742 -           (sk && udp_sk(sk)->gro_enabled) || NAPI_GRO_CB(skb)->is_flist) {
150743 -               pp = call_gro_receive(udp_gro_receive_segment, head, skb);
150744 +               if ((!sk && (skb->dev->features & NETIF_F_GRO_UDP_FWD)) ||
150745 +                   (sk && udp_sk(sk)->gro_enabled) || NAPI_GRO_CB(skb)->is_flist)
150746 +                       pp = call_gro_receive(udp_gro_receive_segment, head, skb);
150747                 return pp;
150748         }
150750 -       if (!sk || NAPI_GRO_CB(skb)->encap_mark ||
150751 +       if (NAPI_GRO_CB(skb)->encap_mark ||
150752             (uh->check && skb->ip_summed != CHECKSUM_PARTIAL &&
150753              NAPI_GRO_CB(skb)->csum_cnt == 0 &&
150754 -            !NAPI_GRO_CB(skb)->csum_valid) ||
150755 -           !udp_sk(sk)->gro_receive)
150756 +            !NAPI_GRO_CB(skb)->csum_valid))
150757                 goto out;
150759         /* mark that this skb passed once through the tunnel gro layer */
150760 diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
150761 index 1baf43aacb2e..bc224f917bbd 100644
150762 --- a/net/ipv6/ip6_gre.c
150763 +++ b/net/ipv6/ip6_gre.c
150764 @@ -387,7 +387,6 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
150765         if (!(nt->parms.o_flags & TUNNEL_SEQ))
150766                 dev->features |= NETIF_F_LLTX;
150768 -       dev_hold(dev);
150769         ip6gre_tunnel_link(ign, nt);
150770         return nt;
150772 @@ -1496,6 +1495,7 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
150773         }
150774         ip6gre_tnl_init_features(dev);
150776 +       dev_hold(dev);
150777         return 0;
150779  cleanup_dst_cache_init:
150780 @@ -1538,8 +1538,6 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
150781         strcpy(tunnel->parms.name, dev->name);
150783         tunnel->hlen            = sizeof(struct ipv6hdr) + 4;
150785 -       dev_hold(dev);
150788  static struct inet6_protocol ip6gre_protocol __read_mostly = {
150789 @@ -1889,6 +1887,7 @@ static int ip6erspan_tap_init(struct net_device *dev)
150790         dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
150791         ip6erspan_tnl_link_config(tunnel, 1);
150793 +       dev_hold(dev);
150794         return 0;
150796  cleanup_dst_cache_init:
150797 @@ -1988,8 +1987,6 @@ static int ip6gre_newlink_common(struct net *src_net, struct net_device *dev,
150798         if (tb[IFLA_MTU])
150799                 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
150801 -       dev_hold(dev);
150803  out:
150804         return err;
150806 diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
150807 index 42fe7db6bbb3..d42f471b0d65 100644
150808 --- a/net/ipv6/ip6_tunnel.c
150809 +++ b/net/ipv6/ip6_tunnel.c
150810 @@ -266,7 +266,6 @@ static int ip6_tnl_create2(struct net_device *dev)
150812         strcpy(t->parms.name, dev->name);
150814 -       dev_hold(dev);
150815         ip6_tnl_link(ip6n, t);
150816         return 0;
150818 @@ -1882,6 +1881,7 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
150819         dev->min_mtu = ETH_MIN_MTU;
150820         dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
150822 +       dev_hold(dev);
150823         return 0;
150825  destroy_dst:
150826 @@ -1925,7 +1925,6 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
150827         struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
150829         t->parms.proto = IPPROTO_IPV6;
150830 -       dev_hold(dev);
150832         rcu_assign_pointer(ip6n->tnls_wc[0], t);
150833         return 0;
150834 diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
150835 index e0cc32e45880..2d048e21abbb 100644
150836 --- a/net/ipv6/ip6_vti.c
150837 +++ b/net/ipv6/ip6_vti.c
150838 @@ -193,7 +193,6 @@ static int vti6_tnl_create2(struct net_device *dev)
150840         strcpy(t->parms.name, dev->name);
150842 -       dev_hold(dev);
150843         vti6_tnl_link(ip6n, t);
150845         return 0;
150846 @@ -934,6 +933,7 @@ static inline int vti6_dev_init_gen(struct net_device *dev)
150847         dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
150848         if (!dev->tstats)
150849                 return -ENOMEM;
150850 +       dev_hold(dev);
150851         return 0;
150854 @@ -965,7 +965,6 @@ static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev)
150855         struct vti6_net *ip6n = net_generic(net, vti6_net_id);
150857         t->parms.proto = IPPROTO_IPV6;
150858 -       dev_hold(dev);
150860         rcu_assign_pointer(ip6n->tnls_wc[0], t);
150861         return 0;
150862 diff --git a/net/ipv6/mcast_snoop.c b/net/ipv6/mcast_snoop.c
150863 index d3d6b6a66e5f..04d5fcdfa6e0 100644
150864 --- a/net/ipv6/mcast_snoop.c
150865 +++ b/net/ipv6/mcast_snoop.c
150866 @@ -109,7 +109,7 @@ static int ipv6_mc_check_mld_msg(struct sk_buff *skb)
150867         struct mld_msg *mld;
150869         if (!ipv6_mc_may_pull(skb, len))
150870 -               return -EINVAL;
150871 +               return -ENODATA;
150873         mld = (struct mld_msg *)skb_transport_header(skb);
150875 @@ -122,7 +122,7 @@ static int ipv6_mc_check_mld_msg(struct sk_buff *skb)
150876         case ICMPV6_MGM_QUERY:
150877                 return ipv6_mc_check_mld_query(skb);
150878         default:
150879 -               return -ENOMSG;
150880 +               return -ENODATA;
150881         }
150884 @@ -131,7 +131,7 @@ static inline __sum16 ipv6_mc_validate_checksum(struct sk_buff *skb)
150885         return skb_checksum_validate(skb, IPPROTO_ICMPV6, ip6_compute_pseudo);
150888 -int ipv6_mc_check_icmpv6(struct sk_buff *skb)
150889 +static int ipv6_mc_check_icmpv6(struct sk_buff *skb)
150891         unsigned int len = skb_transport_offset(skb) + sizeof(struct icmp6hdr);
150892         unsigned int transport_len = ipv6_transport_len(skb);
150893 @@ -150,7 +150,6 @@ int ipv6_mc_check_icmpv6(struct sk_buff *skb)
150895         return 0;
150897 -EXPORT_SYMBOL(ipv6_mc_check_icmpv6);
150899  /**
150900   * ipv6_mc_check_mld - checks whether this is a sane MLD packet
150901 @@ -161,7 +160,10 @@ EXPORT_SYMBOL(ipv6_mc_check_icmpv6);
150902   *
150903   * -EINVAL: A broken packet was detected, i.e. it violates some internet
150904   *  standard
150905 - * -ENOMSG: IP header validation succeeded but it is not an MLD packet.
150906 + * -ENOMSG: IP header validation succeeded but it is not an ICMPv6 packet
150907 + *  with a hop-by-hop option.
150908 + * -ENODATA: IP+ICMPv6 header with hop-by-hop option validation succeeded
150909 + *  but it is not an MLD packet.
150910   * -ENOMEM: A memory allocation failure happened.
150911   *
150912   * Caller needs to set the skb network header and free any returned skb if it
150913 diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
150914 index 9fdccf0718b5..fcc9ba2c80e9 100644
150915 --- a/net/ipv6/sit.c
150916 +++ b/net/ipv6/sit.c
150917 @@ -218,8 +218,6 @@ static int ipip6_tunnel_create(struct net_device *dev)
150919         ipip6_tunnel_clone_6rd(dev, sitn);
150921 -       dev_hold(dev);
150923         ipip6_tunnel_link(sitn, t);
150924         return 0;
150926 @@ -1456,7 +1454,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
150927                 dev->tstats = NULL;
150928                 return err;
150929         }
150931 +       dev_hold(dev);
150932         return 0;
150935 @@ -1472,7 +1470,6 @@ static void __net_init ipip6_fb_tunnel_init(struct net_device *dev)
150936         iph->ihl                = 5;
150937         iph->ttl                = 64;
150939 -       dev_hold(dev);
150940         rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
150943 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
150944 index 1b9c82616606..0331f3a3c40e 100644
150945 --- a/net/mac80211/main.c
150946 +++ b/net/mac80211/main.c
150947 @@ -1141,8 +1141,11 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
150948         if (local->hw.wiphy->max_scan_ie_len)
150949                 local->hw.wiphy->max_scan_ie_len -= local->scan_ies_len;
150951 -       WARN_ON(!ieee80211_cs_list_valid(local->hw.cipher_schemes,
150952 -                                        local->hw.n_cipher_schemes));
150953 +       if (WARN_ON(!ieee80211_cs_list_valid(local->hw.cipher_schemes,
150954 +                                            local->hw.n_cipher_schemes))) {
150955 +               result = -EINVAL;
150956 +               goto fail_workqueue;
150957 +       }
150959         result = ieee80211_init_cipher_suites(local);
150960         if (result < 0)
150961 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
150962 index 96f487fc0071..0fe91dc9817e 100644
150963 --- a/net/mac80211/mlme.c
150964 +++ b/net/mac80211/mlme.c
150965 @@ -1295,6 +1295,11 @@ static void ieee80211_chswitch_post_beacon(struct ieee80211_sub_if_data *sdata)
150967         sdata->vif.csa_active = false;
150968         ifmgd->csa_waiting_bcn = false;
150969 +       /*
150970 +        * If the CSA IE is still present on the beacon after the switch,
150971 +        * we need to consider it as a new CSA (possibly to self).
150972 +        */
150973 +       ifmgd->beacon_crc_valid = false;
150975         ret = drv_post_channel_switch(sdata);
150976         if (ret) {
150977 @@ -1400,11 +1405,8 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
150978                 ch_switch.delay = csa_ie.max_switch_time;
150979         }
150981 -       if (res < 0) {
150982 -               ieee80211_queue_work(&local->hw,
150983 -                                    &ifmgd->csa_connection_drop_work);
150984 -               return;
150985 -       }
150986 +       if (res < 0)
150987 +               goto lock_and_drop_connection;
150989         if (beacon && sdata->vif.csa_active && !ifmgd->csa_waiting_bcn) {
150990                 if (res)
150991 diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
150992 index 3b3bcefbf657..28422d687096 100644
150993 --- a/net/mac80211/tx.c
150994 +++ b/net/mac80211/tx.c
150995 @@ -2267,17 +2267,6 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
150996                                                     payload[7]);
150997         }
150999 -       /* Initialize skb->priority for QoS frames. If the DONT_REORDER flag
151000 -        * is set, stick to the default value for skb->priority to assure
151001 -        * frames injected with this flag are not reordered relative to each
151002 -        * other.
151003 -        */
151004 -       if (ieee80211_is_data_qos(hdr->frame_control) &&
151005 -           !(info->control.flags & IEEE80211_TX_CTRL_DONT_REORDER)) {
151006 -               u8 *p = ieee80211_get_qos_ctl(hdr);
151007 -               skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
151008 -       }
151010         rcu_read_lock();
151012         /*
151013 @@ -2341,6 +2330,15 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
151015         info->band = chandef->chan->band;
151017 +       /* Initialize skb->priority according to frame type and TID class,
151018 +        * with respect to the sub interface that the frame will actually
151019 +        * be transmitted on. If the DONT_REORDER flag is set, the original
151020 +        * skb-priority is preserved to assure frames injected with this
151021 +        * flag are not reordered relative to each other.
151022 +        */
151023 +       ieee80211_select_queue_80211(sdata, skb, hdr);
151024 +       skb_set_queue_mapping(skb, ieee80211_ac_from_tid(skb->priority));
151026         /* remove the injection radiotap header */
151027         skb_pull(skb, len_rthdr);
151029 diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
151030 index 4bde960e19dc..65e5d3eb1078 100644
151031 --- a/net/mptcp/protocol.c
151032 +++ b/net/mptcp/protocol.c
151033 @@ -399,6 +399,14 @@ static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq)
151034         return false;
151037 +static void mptcp_set_datafin_timeout(const struct sock *sk)
151039 +       struct inet_connection_sock *icsk = inet_csk(sk);
151041 +       mptcp_sk(sk)->timer_ival = min(TCP_RTO_MAX,
151042 +                                      TCP_RTO_MIN << icsk->icsk_retransmits);
151045  static void mptcp_set_timeout(const struct sock *sk, const struct sock *ssk)
151047         long tout = ssk && inet_csk(ssk)->icsk_pending ?
151048 @@ -1052,7 +1060,7 @@ static void __mptcp_clean_una(struct sock *sk)
151049         }
151051         if (snd_una == READ_ONCE(msk->snd_nxt)) {
151052 -               if (msk->timer_ival)
151053 +               if (msk->timer_ival && !mptcp_data_fin_enabled(msk))
151054                         mptcp_stop_timer(sk);
151055         } else {
151056                 mptcp_reset_timer(sk);
151057 @@ -1275,7 +1283,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
151058         int avail_size;
151059         size_t ret = 0;
151061 -       pr_debug("msk=%p ssk=%p sending dfrag at seq=%lld len=%d already sent=%d",
151062 +       pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u",
151063                  msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent);
151065         /* compute send limit */
151066 @@ -1693,7 +1701,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
151067                         if (!msk->first_pending)
151068                                 WRITE_ONCE(msk->first_pending, dfrag);
151069                 }
151070 -               pr_debug("msk=%p dfrag at seq=%lld len=%d sent=%d new=%d", msk,
151071 +               pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d", msk,
151072                          dfrag->data_seq, dfrag->data_len, dfrag->already_sent,
151073                          !dfrag_collapsed);
151075 @@ -2276,8 +2284,19 @@ static void __mptcp_retrans(struct sock *sk)
151077         __mptcp_clean_una_wakeup(sk);
151078         dfrag = mptcp_rtx_head(sk);
151079 -       if (!dfrag)
151080 +       if (!dfrag) {
151081 +               if (mptcp_data_fin_enabled(msk)) {
151082 +                       struct inet_connection_sock *icsk = inet_csk(sk);
151084 +                       icsk->icsk_retransmits++;
151085 +                       mptcp_set_datafin_timeout(sk);
151086 +                       mptcp_send_ack(msk);
151088 +                       goto reset_timer;
151089 +               }
151091                 return;
151092 +       }
151094         ssk = mptcp_subflow_get_retrans(msk);
151095         if (!ssk)
151096 @@ -2460,6 +2479,8 @@ void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
151097                         pr_debug("Sending DATA_FIN on subflow %p", ssk);
151098                         mptcp_set_timeout(sk, ssk);
151099                         tcp_send_ack(ssk);
151100 +                       if (!mptcp_timer_pending(sk))
151101 +                               mptcp_reset_timer(sk);
151102                 }
151103                 break;
151104         }
151105 diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
151106 index d17d39ccdf34..4fe7acaa472f 100644
151107 --- a/net/mptcp/subflow.c
151108 +++ b/net/mptcp/subflow.c
151109 @@ -524,8 +524,7 @@ static void mptcp_sock_destruct(struct sock *sk)
151110          * ESTABLISHED state and will not have the SOCK_DEAD flag.
151111          * Both result in warnings from inet_sock_destruct.
151112          */
151114 -       if (sk->sk_state == TCP_ESTABLISHED) {
151115 +       if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
151116                 sk->sk_state = TCP_CLOSE;
151117                 WARN_ON_ONCE(sk->sk_socket);
151118                 sock_orphan(sk);
151119 diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
151120 index c6c0cb465664..313d1c8ff066 100644
151121 --- a/net/netfilter/nf_conntrack_standalone.c
151122 +++ b/net/netfilter/nf_conntrack_standalone.c
151123 @@ -1060,16 +1060,10 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
151124         nf_conntrack_standalone_init_dccp_sysctl(net, table);
151125         nf_conntrack_standalone_init_gre_sysctl(net, table);
151127 -       /* Don't allow unprivileged users to alter certain sysctls */
151128 -       if (net->user_ns != &init_user_ns) {
151129 +       /* Don't allow non-init_net ns to alter global sysctls */
151130 +       if (!net_eq(&init_net, net)) {
151131                 table[NF_SYSCTL_CT_MAX].mode = 0444;
151132                 table[NF_SYSCTL_CT_EXPECT_MAX].mode = 0444;
151133 -               table[NF_SYSCTL_CT_HELPER].mode = 0444;
151134 -#ifdef CONFIG_NF_CONNTRACK_EVENTS
151135 -               table[NF_SYSCTL_CT_EVENTS].mode = 0444;
151136 -#endif
151137 -               table[NF_SYSCTL_CT_BUCKETS].mode = 0444;
151138 -       } else if (!net_eq(&init_net, net)) {
151139                 table[NF_SYSCTL_CT_BUCKETS].mode = 0444;
151140         }
151142 diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
151143 index 589d2f6978d3..878ed49d0c56 100644
151144 --- a/net/netfilter/nf_tables_api.c
151145 +++ b/net/netfilter/nf_tables_api.c
151146 @@ -6246,9 +6246,9 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
151147         INIT_LIST_HEAD(&obj->list);
151148         return err;
151149  err_trans:
151150 -       kfree(obj->key.name);
151151 -err_userdata:
151152         kfree(obj->udata);
151153 +err_userdata:
151154 +       kfree(obj->key.name);
151155  err_strdup:
151156         if (obj->ops->destroy)
151157                 obj->ops->destroy(&ctx, obj);
151158 diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
151159 index 9ae14270c543..2b00f7f47693 100644
151160 --- a/net/netfilter/nf_tables_offload.c
151161 +++ b/net/netfilter/nf_tables_offload.c
151162 @@ -45,6 +45,48 @@ void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
151163                 offsetof(struct nft_flow_key, control);
151166 +struct nft_offload_ethertype {
151167 +       __be16 value;
151168 +       __be16 mask;
151171 +static void nft_flow_rule_transfer_vlan(struct nft_offload_ctx *ctx,
151172 +                                       struct nft_flow_rule *flow)
151174 +       struct nft_flow_match *match = &flow->match;
151175 +       struct nft_offload_ethertype ethertype;
151177 +       if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL) &&
151178 +           match->key.basic.n_proto != htons(ETH_P_8021Q) &&
151179 +           match->key.basic.n_proto != htons(ETH_P_8021AD))
151180 +               return;
151182 +       ethertype.value = match->key.basic.n_proto;
151183 +       ethertype.mask = match->mask.basic.n_proto;
151185 +       if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_VLAN) &&
151186 +           (match->key.vlan.vlan_tpid == htons(ETH_P_8021Q) ||
151187 +            match->key.vlan.vlan_tpid == htons(ETH_P_8021AD))) {
151188 +               match->key.basic.n_proto = match->key.cvlan.vlan_tpid;
151189 +               match->mask.basic.n_proto = match->mask.cvlan.vlan_tpid;
151190 +               match->key.cvlan.vlan_tpid = match->key.vlan.vlan_tpid;
151191 +               match->mask.cvlan.vlan_tpid = match->mask.vlan.vlan_tpid;
151192 +               match->key.vlan.vlan_tpid = ethertype.value;
151193 +               match->mask.vlan.vlan_tpid = ethertype.mask;
151194 +               match->dissector.offset[FLOW_DISSECTOR_KEY_CVLAN] =
151195 +                       offsetof(struct nft_flow_key, cvlan);
151196 +               match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CVLAN);
151197 +       } else {
151198 +               match->key.basic.n_proto = match->key.vlan.vlan_tpid;
151199 +               match->mask.basic.n_proto = match->mask.vlan.vlan_tpid;
151200 +               match->key.vlan.vlan_tpid = ethertype.value;
151201 +               match->mask.vlan.vlan_tpid = ethertype.mask;
151202 +               match->dissector.offset[FLOW_DISSECTOR_KEY_VLAN] =
151203 +                       offsetof(struct nft_flow_key, vlan);
151204 +               match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN);
151205 +       }
151208  struct nft_flow_rule *nft_flow_rule_create(struct net *net,
151209                                            const struct nft_rule *rule)
151211 @@ -89,6 +131,8 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net,
151213                 expr = nft_expr_next(expr);
151214         }
151215 +       nft_flow_rule_transfer_vlan(ctx, flow);
151217         flow->proto = ctx->dep.l3num;
151218         kfree(ctx);
151220 diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
151221 index 916a3c7f9eaf..79fbf37291f3 100644
151222 --- a/net/netfilter/nfnetlink_osf.c
151223 +++ b/net/netfilter/nfnetlink_osf.c
151224 @@ -186,6 +186,8 @@ static const struct tcphdr *nf_osf_hdr_ctx_init(struct nf_osf_hdr_ctx *ctx,
151226                 ctx->optp = skb_header_pointer(skb, ip_hdrlen(skb) +
151227                                 sizeof(struct tcphdr), ctx->optsize, opts);
151228 +               if (!ctx->optp)
151229 +                       return NULL;
151230         }
151232         return tcp;
151233 diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
151234 index eb6a43a180bb..47b6d05f1ae6 100644
151235 --- a/net/netfilter/nft_cmp.c
151236 +++ b/net/netfilter/nft_cmp.c
151237 @@ -114,19 +114,56 @@ static int nft_cmp_dump(struct sk_buff *skb, const struct nft_expr *expr)
151238         return -1;
151241 +union nft_cmp_offload_data {
151242 +       u16     val16;
151243 +       u32     val32;
151244 +       u64     val64;
151247 +static void nft_payload_n2h(union nft_cmp_offload_data *data,
151248 +                           const u8 *val, u32 len)
151250 +       switch (len) {
151251 +       case 2:
151252 +               data->val16 = ntohs(*((u16 *)val));
151253 +               break;
151254 +       case 4:
151255 +               data->val32 = ntohl(*((u32 *)val));
151256 +               break;
151257 +       case 8:
151258 +               data->val64 = be64_to_cpu(*((u64 *)val));
151259 +               break;
151260 +       default:
151261 +               WARN_ON_ONCE(1);
151262 +               break;
151263 +       }
151266  static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
151267                              struct nft_flow_rule *flow,
151268                              const struct nft_cmp_expr *priv)
151270         struct nft_offload_reg *reg = &ctx->regs[priv->sreg];
151271 +       union nft_cmp_offload_data _data, _datamask;
151272         u8 *mask = (u8 *)&flow->match.mask;
151273         u8 *key = (u8 *)&flow->match.key;
151274 +       u8 *data, *datamask;
151276         if (priv->op != NFT_CMP_EQ || priv->len > reg->len)
151277                 return -EOPNOTSUPP;
151279 -       memcpy(key + reg->offset, &priv->data, reg->len);
151280 -       memcpy(mask + reg->offset, &reg->mask, reg->len);
151281 +       if (reg->flags & NFT_OFFLOAD_F_NETWORK2HOST) {
151282 +               nft_payload_n2h(&_data, (u8 *)&priv->data, reg->len);
151283 +               nft_payload_n2h(&_datamask, (u8 *)&reg->mask, reg->len);
151284 +               data = (u8 *)&_data;
151285 +               datamask = (u8 *)&_datamask;
151286 +       } else {
151287 +               data = (u8 *)&priv->data;
151288 +               datamask = (u8 *)&reg->mask;
151289 +       }
151291 +       memcpy(key + reg->offset, data, reg->len);
151292 +       memcpy(mask + reg->offset, datamask, reg->len);
151294         flow->match.dissector.used_keys |= BIT(reg->key);
151295         flow->match.dissector.offset[reg->key] = reg->base_offset;
151296 diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
151297 index cb1c8c231880..501c5b24cc39 100644
151298 --- a/net/netfilter/nft_payload.c
151299 +++ b/net/netfilter/nft_payload.c
151300 @@ -226,8 +226,9 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
151301                 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
151302                         return -EOPNOTSUPP;
151304 -               NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
151305 -                                 vlan_tci, sizeof(__be16), reg);
151306 +               NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_VLAN, vlan,
151307 +                                       vlan_tci, sizeof(__be16), reg,
151308 +                                       NFT_OFFLOAD_F_NETWORK2HOST);
151309                 break;
151310         case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
151311                 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
151312 @@ -241,16 +242,18 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
151313                 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
151314                         return -EOPNOTSUPP;
151316 -               NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
151317 -                                 vlan_tci, sizeof(__be16), reg);
151318 +               NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
151319 +                                       vlan_tci, sizeof(__be16), reg,
151320 +                                       NFT_OFFLOAD_F_NETWORK2HOST);
151321                 break;
151322         case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
151323                                                         sizeof(struct vlan_hdr):
151324                 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
151325                         return -EOPNOTSUPP;
151327 -               NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
151328 +               NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
151329                                   vlan_tpid, sizeof(__be16), reg);
151330 +               nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
151331                 break;
151332         default:
151333                 return -EOPNOTSUPP;
151334 diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
151335 index bf618b7ec1ae..560c2cda52ee 100644
151336 --- a/net/netfilter/nft_set_hash.c
151337 +++ b/net/netfilter/nft_set_hash.c
151338 @@ -406,9 +406,17 @@ static void nft_rhash_destroy(const struct nft_set *set)
151339                                     (void *)set);
151342 +/* Number of buckets is stored in u32, so cap our result to 1U<<31 */
151343 +#define NFT_MAX_BUCKETS (1U << 31)
151345  static u32 nft_hash_buckets(u32 size)
151347 -       return roundup_pow_of_two(size * 4 / 3);
151348 +       u64 val = div_u64((u64)size * 4, 3);
151350 +       if (val >= NFT_MAX_BUCKETS)
151351 +               return NFT_MAX_BUCKETS;
151353 +       return roundup_pow_of_two(val);
151356  static bool nft_rhash_estimate(const struct nft_set_desc *desc, u32 features,
151357 diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
151358 index 75625d13e976..498a0bf6f044 100644
151359 --- a/net/netfilter/xt_SECMARK.c
151360 +++ b/net/netfilter/xt_SECMARK.c
151361 @@ -24,10 +24,9 @@ MODULE_ALIAS("ip6t_SECMARK");
151362  static u8 mode;
151364  static unsigned int
151365 -secmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
151366 +secmark_tg(struct sk_buff *skb, const struct xt_secmark_target_info_v1 *info)
151368         u32 secmark = 0;
151369 -       const struct xt_secmark_target_info *info = par->targinfo;
151371         switch (mode) {
151372         case SECMARK_MODE_SEL:
151373 @@ -41,7 +40,7 @@ secmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
151374         return XT_CONTINUE;
151377 -static int checkentry_lsm(struct xt_secmark_target_info *info)
151378 +static int checkentry_lsm(struct xt_secmark_target_info_v1 *info)
151380         int err;
151382 @@ -73,15 +72,15 @@ static int checkentry_lsm(struct xt_secmark_target_info *info)
151383         return 0;
151386 -static int secmark_tg_check(const struct xt_tgchk_param *par)
151387 +static int
151388 +secmark_tg_check(const char *table, struct xt_secmark_target_info_v1 *info)
151390 -       struct xt_secmark_target_info *info = par->targinfo;
151391         int err;
151393 -       if (strcmp(par->table, "mangle") != 0 &&
151394 -           strcmp(par->table, "security") != 0) {
151395 +       if (strcmp(table, "mangle") != 0 &&
151396 +           strcmp(table, "security") != 0) {
151397                 pr_info_ratelimited("only valid in \'mangle\' or \'security\' table, not \'%s\'\n",
151398 -                                   par->table);
151399 +                                   table);
151400                 return -EINVAL;
151401         }
151403 @@ -116,25 +115,76 @@ static void secmark_tg_destroy(const struct xt_tgdtor_param *par)
151404         }
151407 -static struct xt_target secmark_tg_reg __read_mostly = {
151408 -       .name       = "SECMARK",
151409 -       .revision   = 0,
151410 -       .family     = NFPROTO_UNSPEC,
151411 -       .checkentry = secmark_tg_check,
151412 -       .destroy    = secmark_tg_destroy,
151413 -       .target     = secmark_tg,
151414 -       .targetsize = sizeof(struct xt_secmark_target_info),
151415 -       .me         = THIS_MODULE,
151416 +static int secmark_tg_check_v0(const struct xt_tgchk_param *par)
151418 +       struct xt_secmark_target_info *info = par->targinfo;
151419 +       struct xt_secmark_target_info_v1 newinfo = {
151420 +               .mode   = info->mode,
151421 +       };
151422 +       int ret;
151424 +       memcpy(newinfo.secctx, info->secctx, SECMARK_SECCTX_MAX);
151426 +       ret = secmark_tg_check(par->table, &newinfo);
151427 +       info->secid = newinfo.secid;
151429 +       return ret;
151432 +static unsigned int
151433 +secmark_tg_v0(struct sk_buff *skb, const struct xt_action_param *par)
151435 +       const struct xt_secmark_target_info *info = par->targinfo;
151436 +       struct xt_secmark_target_info_v1 newinfo = {
151437 +               .secid  = info->secid,
151438 +       };
151440 +       return secmark_tg(skb, &newinfo);
151443 +static int secmark_tg_check_v1(const struct xt_tgchk_param *par)
151445 +       return secmark_tg_check(par->table, par->targinfo);
151448 +static unsigned int
151449 +secmark_tg_v1(struct sk_buff *skb, const struct xt_action_param *par)
151451 +       return secmark_tg(skb, par->targinfo);
151454 +static struct xt_target secmark_tg_reg[] __read_mostly = {
151455 +       {
151456 +               .name           = "SECMARK",
151457 +               .revision       = 0,
151458 +               .family         = NFPROTO_UNSPEC,
151459 +               .checkentry     = secmark_tg_check_v0,
151460 +               .destroy        = secmark_tg_destroy,
151461 +               .target         = secmark_tg_v0,
151462 +               .targetsize     = sizeof(struct xt_secmark_target_info),
151463 +               .me             = THIS_MODULE,
151464 +       },
151465 +       {
151466 +               .name           = "SECMARK",
151467 +               .revision       = 1,
151468 +               .family         = NFPROTO_UNSPEC,
151469 +               .checkentry     = secmark_tg_check_v1,
151470 +               .destroy        = secmark_tg_destroy,
151471 +               .target         = secmark_tg_v1,
151472 +               .targetsize     = sizeof(struct xt_secmark_target_info_v1),
151473 +               .usersize       = offsetof(struct xt_secmark_target_info_v1, secid),
151474 +               .me             = THIS_MODULE,
151475 +       },
151478  static int __init secmark_tg_init(void)
151480 -       return xt_register_target(&secmark_tg_reg);
151481 +       return xt_register_targets(secmark_tg_reg, ARRAY_SIZE(secmark_tg_reg));
151484  static void __exit secmark_tg_exit(void)
151486 -       xt_unregister_target(&secmark_tg_reg);
151487 +       xt_unregister_targets(secmark_tg_reg, ARRAY_SIZE(secmark_tg_reg));
151490  module_init(secmark_tg_init);
151491 diff --git a/net/nfc/digital_dep.c b/net/nfc/digital_dep.c
151492 index 5971fb6f51cc..dc21b4141b0a 100644
151493 --- a/net/nfc/digital_dep.c
151494 +++ b/net/nfc/digital_dep.c
151495 @@ -1273,6 +1273,8 @@ static void digital_tg_recv_dep_req(struct nfc_digital_dev *ddev, void *arg,
151496         }
151498         rc = nfc_tm_data_received(ddev->nfc_dev, resp);
151499 +       if (rc)
151500 +               resp = NULL;
151502  exit:
151503         kfree_skb(ddev->chaining_skb);
151504 diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
151505 index a3b46f888803..53dbe733f998 100644
151506 --- a/net/nfc/llcp_sock.c
151507 +++ b/net/nfc/llcp_sock.c
151508 @@ -109,12 +109,14 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
151509                                           GFP_KERNEL);
151510         if (!llcp_sock->service_name) {
151511                 nfc_llcp_local_put(llcp_sock->local);
151512 +               llcp_sock->local = NULL;
151513                 ret = -ENOMEM;
151514                 goto put_dev;
151515         }
151516         llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
151517         if (llcp_sock->ssap == LLCP_SAP_MAX) {
151518                 nfc_llcp_local_put(llcp_sock->local);
151519 +               llcp_sock->local = NULL;
151520                 kfree(llcp_sock->service_name);
151521                 llcp_sock->service_name = NULL;
151522                 ret = -EADDRINUSE;
151523 @@ -709,6 +711,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
151524         llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
151525         if (llcp_sock->ssap == LLCP_SAP_MAX) {
151526                 nfc_llcp_local_put(llcp_sock->local);
151527 +               llcp_sock->local = NULL;
151528                 ret = -ENOMEM;
151529                 goto put_dev;
151530         }
151531 @@ -756,6 +759,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
151532  sock_llcp_release:
151533         nfc_llcp_put_ssap(local, llcp_sock->ssap);
151534         nfc_llcp_local_put(llcp_sock->local);
151535 +       llcp_sock->local = NULL;
151537  put_dev:
151538         nfc_put_device(dev);
151539 diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
151540 index 92a0b67b2728..77d924ab8cdb 100644
151541 --- a/net/openvswitch/actions.c
151542 +++ b/net/openvswitch/actions.c
151543 @@ -827,17 +827,17 @@ static void ovs_fragment(struct net *net, struct vport *vport,
151544         }
151546         if (key->eth.type == htons(ETH_P_IP)) {
151547 -               struct dst_entry ovs_dst;
151548 +               struct rtable ovs_rt = { 0 };
151549                 unsigned long orig_dst;
151551                 prepare_frag(vport, skb, orig_network_offset,
151552                              ovs_key_mac_proto(key));
151553 -               dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
151554 +               dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
151555                          DST_OBSOLETE_NONE, DST_NOCOUNT);
151556 -               ovs_dst.dev = vport->dev;
151557 +               ovs_rt.dst.dev = vport->dev;
151559                 orig_dst = skb->_skb_refdst;
151560 -               skb_dst_set_noref(skb, &ovs_dst);
151561 +               skb_dst_set_noref(skb, &ovs_rt.dst);
151562                 IPCB(skb)->frag_max_size = mru;
151564                 ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
151565 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
151566 index e24b2841c643..9611e41c7b8b 100644
151567 --- a/net/packet/af_packet.c
151568 +++ b/net/packet/af_packet.c
151569 @@ -1359,7 +1359,7 @@ static unsigned int fanout_demux_rollover(struct packet_fanout *f,
151570         struct packet_sock *po, *po_next, *po_skip = NULL;
151571         unsigned int i, j, room = ROOM_NONE;
151573 -       po = pkt_sk(f->arr[idx]);
151574 +       po = pkt_sk(rcu_dereference(f->arr[idx]));
151576         if (try_self) {
151577                 room = packet_rcv_has_room(po, skb);
151578 @@ -1371,7 +1371,7 @@ static unsigned int fanout_demux_rollover(struct packet_fanout *f,
151580         i = j = min_t(int, po->rollover->sock, num - 1);
151581         do {
151582 -               po_next = pkt_sk(f->arr[i]);
151583 +               po_next = pkt_sk(rcu_dereference(f->arr[i]));
151584                 if (po_next != po_skip && !READ_ONCE(po_next->pressure) &&
151585                     packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
151586                         if (i != j)
151587 @@ -1466,7 +1466,7 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
151588         if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
151589                 idx = fanout_demux_rollover(f, skb, idx, true, num);
151591 -       po = pkt_sk(f->arr[idx]);
151592 +       po = pkt_sk(rcu_dereference(f->arr[idx]));
151593         return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
151596 @@ -1480,7 +1480,7 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po)
151597         struct packet_fanout *f = po->fanout;
151599         spin_lock(&f->lock);
151600 -       f->arr[f->num_members] = sk;
151601 +       rcu_assign_pointer(f->arr[f->num_members], sk);
151602         smp_wmb();
151603         f->num_members++;
151604         if (f->num_members == 1)
151605 @@ -1495,11 +1495,14 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
151607         spin_lock(&f->lock);
151608         for (i = 0; i < f->num_members; i++) {
151609 -               if (f->arr[i] == sk)
151610 +               if (rcu_dereference_protected(f->arr[i],
151611 +                                             lockdep_is_held(&f->lock)) == sk)
151612                         break;
151613         }
151614         BUG_ON(i >= f->num_members);
151615 -       f->arr[i] = f->arr[f->num_members - 1];
151616 +       rcu_assign_pointer(f->arr[i],
151617 +                          rcu_dereference_protected(f->arr[f->num_members - 1],
151618 +                                                    lockdep_is_held(&f->lock)));
151619         f->num_members--;
151620         if (f->num_members == 0)
151621                 __dev_remove_pack(&f->prot_hook);
151622 diff --git a/net/packet/internal.h b/net/packet/internal.h
151623 index 5f61e59ebbff..48af35b1aed2 100644
151624 --- a/net/packet/internal.h
151625 +++ b/net/packet/internal.h
151626 @@ -94,7 +94,7 @@ struct packet_fanout {
151627         spinlock_t              lock;
151628         refcount_t              sk_ref;
151629         struct packet_type      prot_hook ____cacheline_aligned_in_smp;
151630 -       struct sock             *arr[];
151631 +       struct sock     __rcu   *arr[];
151634  struct packet_rollover {
151635 diff --git a/net/qrtr/mhi.c b/net/qrtr/mhi.c
151636 index 2bf2b1943e61..fa611678af05 100644
151637 --- a/net/qrtr/mhi.c
151638 +++ b/net/qrtr/mhi.c
151639 @@ -50,6 +50,9 @@ static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
151640         struct qrtr_mhi_dev *qdev = container_of(ep, struct qrtr_mhi_dev, ep);
151641         int rc;
151643 +       if (skb->sk)
151644 +               sock_hold(skb->sk);
151646         rc = skb_linearize(skb);
151647         if (rc)
151648                 goto free_skb;
151649 @@ -59,12 +62,11 @@ static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
151650         if (rc)
151651                 goto free_skb;
151653 -       if (skb->sk)
151654 -               sock_hold(skb->sk);
151656         return rc;
151658  free_skb:
151659 +       if (skb->sk)
151660 +               sock_put(skb->sk);
151661         kfree_skb(skb);
151663         return rc;
151664 diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
151665 index 16e888a9601d..48fdf7293dea 100644
151666 --- a/net/sched/act_ct.c
151667 +++ b/net/sched/act_ct.c
151668 @@ -732,7 +732,8 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
151669  #endif
151670         }
151672 -       *qdisc_skb_cb(skb) = cb;
151673 +       if (err != -EINPROGRESS)
151674 +               *qdisc_skb_cb(skb) = cb;
151675         skb_clear_hash(skb);
151676         skb->ignore_df = 1;
151677         return err;
151678 @@ -967,7 +968,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
151679         err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
151680         if (err == -EINPROGRESS) {
151681                 retval = TC_ACT_STOLEN;
151682 -               goto out;
151683 +               goto out_clear;
151684         }
151685         if (err)
151686                 goto drop;
151687 @@ -1030,7 +1031,6 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
151688  out_push:
151689         skb_push_rcsum(skb, nh_ofs);
151691 -out:
151692         qdisc_skb_cb(skb)->post_ct = true;
151693  out_clear:
151694         tcf_action_update_bstats(&c->common, skb);
151695 diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
151696 index c69a4ba9c33f..3035f96c6e6c 100644
151697 --- a/net/sched/cls_flower.c
151698 +++ b/net/sched/cls_flower.c
151699 @@ -209,16 +209,16 @@ static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
151700                                   struct fl_flow_key *key,
151701                                   struct fl_flow_key *mkey)
151703 -       __be16 min_mask, max_mask, min_val, max_val;
151704 +       u16 min_mask, max_mask, min_val, max_val;
151706 -       min_mask = htons(filter->mask->key.tp_range.tp_min.dst);
151707 -       max_mask = htons(filter->mask->key.tp_range.tp_max.dst);
151708 -       min_val = htons(filter->key.tp_range.tp_min.dst);
151709 -       max_val = htons(filter->key.tp_range.tp_max.dst);
151710 +       min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst);
151711 +       max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst);
151712 +       min_val = ntohs(filter->key.tp_range.tp_min.dst);
151713 +       max_val = ntohs(filter->key.tp_range.tp_max.dst);
151715         if (min_mask && max_mask) {
151716 -               if (htons(key->tp_range.tp.dst) < min_val ||
151717 -                   htons(key->tp_range.tp.dst) > max_val)
151718 +               if (ntohs(key->tp_range.tp.dst) < min_val ||
151719 +                   ntohs(key->tp_range.tp.dst) > max_val)
151720                         return false;
151722                 /* skb does not have min and max values */
151723 @@ -232,16 +232,16 @@ static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
151724                                   struct fl_flow_key *key,
151725                                   struct fl_flow_key *mkey)
151727 -       __be16 min_mask, max_mask, min_val, max_val;
151728 +       u16 min_mask, max_mask, min_val, max_val;
151730 -       min_mask = htons(filter->mask->key.tp_range.tp_min.src);
151731 -       max_mask = htons(filter->mask->key.tp_range.tp_max.src);
151732 -       min_val = htons(filter->key.tp_range.tp_min.src);
151733 -       max_val = htons(filter->key.tp_range.tp_max.src);
151734 +       min_mask = ntohs(filter->mask->key.tp_range.tp_min.src);
151735 +       max_mask = ntohs(filter->mask->key.tp_range.tp_max.src);
151736 +       min_val = ntohs(filter->key.tp_range.tp_min.src);
151737 +       max_val = ntohs(filter->key.tp_range.tp_max.src);
151739         if (min_mask && max_mask) {
151740 -               if (htons(key->tp_range.tp.src) < min_val ||
151741 -                   htons(key->tp_range.tp.src) > max_val)
151742 +               if (ntohs(key->tp_range.tp.src) < min_val ||
151743 +                   ntohs(key->tp_range.tp.src) > max_val)
151744                         return false;
151746                 /* skb does not have min and max values */
151747 @@ -783,16 +783,16 @@ static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
151748                        TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
151750         if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
151751 -           htons(key->tp_range.tp_max.dst) <=
151752 -           htons(key->tp_range.tp_min.dst)) {
151753 +           ntohs(key->tp_range.tp_max.dst) <=
151754 +           ntohs(key->tp_range.tp_min.dst)) {
151755                 NL_SET_ERR_MSG_ATTR(extack,
151756                                     tb[TCA_FLOWER_KEY_PORT_DST_MIN],
151757                                     "Invalid destination port range (min must be strictly smaller than max)");
151758                 return -EINVAL;
151759         }
151760         if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
151761 -           htons(key->tp_range.tp_max.src) <=
151762 -           htons(key->tp_range.tp_min.src)) {
151763 +           ntohs(key->tp_range.tp_max.src) <=
151764 +           ntohs(key->tp_range.tp_min.src)) {
151765                 NL_SET_ERR_MSG_ATTR(extack,
151766                                     tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
151767                                     "Invalid source port range (min must be strictly smaller than max)");
151768 diff --git a/net/sched/sch_frag.c b/net/sched/sch_frag.c
151769 index e1e77d3fb6c0..8c06381391d6 100644
151770 --- a/net/sched/sch_frag.c
151771 +++ b/net/sched/sch_frag.c
151772 @@ -90,16 +90,16 @@ static int sch_fragment(struct net *net, struct sk_buff *skb,
151773         }
151775         if (skb_protocol(skb, true) == htons(ETH_P_IP)) {
151776 -               struct dst_entry sch_frag_dst;
151777 +               struct rtable sch_frag_rt = { 0 };
151778                 unsigned long orig_dst;
151780                 sch_frag_prepare_frag(skb, xmit);
151781 -               dst_init(&sch_frag_dst, &sch_frag_dst_ops, NULL, 1,
151782 +               dst_init(&sch_frag_rt.dst, &sch_frag_dst_ops, NULL, 1,
151783                          DST_OBSOLETE_NONE, DST_NOCOUNT);
151784 -               sch_frag_dst.dev = skb->dev;
151785 +               sch_frag_rt.dst.dev = skb->dev;
151787                 orig_dst = skb->_skb_refdst;
151788 -               skb_dst_set_noref(skb, &sch_frag_dst);
151789 +               skb_dst_set_noref(skb, &sch_frag_rt.dst);
151790                 IPCB(skb)->frag_max_size = mru;
151792                 ret = ip_do_fragment(net, skb->sk, skb, sch_frag_xmit);
151793 diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
151794 index 8287894541e3..909c798b7403 100644
151795 --- a/net/sched/sch_taprio.c
151796 +++ b/net/sched/sch_taprio.c
151797 @@ -901,6 +901,12 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
151799                 list_for_each_entry(entry, &new->entries, list)
151800                         cycle = ktime_add_ns(cycle, entry->interval);
151802 +               if (!cycle) {
151803 +                       NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0");
151804 +                       return -EINVAL;
151805 +               }
151807                 new->cycle_time = cycle;
151808         }
151810 diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
151811 index f77484df097b..da4ce0947c3a 100644
151812 --- a/net/sctp/sm_make_chunk.c
151813 +++ b/net/sctp/sm_make_chunk.c
151814 @@ -3147,7 +3147,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
151815                  * primary.
151816                  */
151817                 if (af->is_any(&addr))
151818 -                       memcpy(&addr.v4, sctp_source(asconf), sizeof(addr));
151819 +                       memcpy(&addr, sctp_source(asconf), sizeof(addr));
151821                 if (security_sctp_bind_connect(asoc->ep->base.sk,
151822                                                SCTP_PARAM_SET_PRIMARY,
151823 diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
151824 index af2b7041fa4e..73bb4c6e9201 100644
151825 --- a/net/sctp/sm_statefuns.c
151826 +++ b/net/sctp/sm_statefuns.c
151827 @@ -1852,20 +1852,35 @@ static enum sctp_disposition sctp_sf_do_dupcook_a(
151828                         SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
151829         sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_ASCONF_QUEUE, SCTP_NULL());
151831 -       repl = sctp_make_cookie_ack(new_asoc, chunk);
151832 +       /* Update the content of current association. */
151833 +       if (sctp_assoc_update((struct sctp_association *)asoc, new_asoc)) {
151834 +               struct sctp_chunk *abort;
151836 +               abort = sctp_make_abort(asoc, NULL, sizeof(struct sctp_errhdr));
151837 +               if (abort) {
151838 +                       sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0);
151839 +                       sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
151840 +               }
151841 +               sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNABORTED));
151842 +               sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
151843 +                               SCTP_PERR(SCTP_ERROR_RSRC_LOW));
151844 +               SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
151845 +               SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
151846 +               goto nomem;
151847 +       }
151849 +       repl = sctp_make_cookie_ack(asoc, chunk);
151850         if (!repl)
151851                 goto nomem;
151853         /* Report association restart to upper layer. */
151854         ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_RESTART, 0,
151855 -                                            new_asoc->c.sinit_num_ostreams,
151856 -                                            new_asoc->c.sinit_max_instreams,
151857 +                                            asoc->c.sinit_num_ostreams,
151858 +                                            asoc->c.sinit_max_instreams,
151859                                              NULL, GFP_ATOMIC);
151860         if (!ev)
151861                 goto nomem_ev;
151863 -       /* Update the content of current association. */
151864 -       sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
151865         sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
151866         if ((sctp_state(asoc, SHUTDOWN_PENDING) ||
151867              sctp_state(asoc, SHUTDOWN_SENT)) &&
151868 @@ -1929,7 +1944,8 @@ static enum sctp_disposition sctp_sf_do_dupcook_b(
151869         sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
151870         sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
151871                         SCTP_STATE(SCTP_STATE_ESTABLISHED));
151872 -       SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
151873 +       if (asoc->state < SCTP_STATE_ESTABLISHED)
151874 +               SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
151875         sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
151877         repl = sctp_make_cookie_ack(new_asoc, chunk);
151878 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
151879 index b9b3d899a611..4ae428f2f2c5 100644
151880 --- a/net/sctp/socket.c
151881 +++ b/net/sctp/socket.c
151882 @@ -357,6 +357,18 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
151883         return af;
151886 +static void sctp_auto_asconf_init(struct sctp_sock *sp)
151888 +       struct net *net = sock_net(&sp->inet.sk);
151890 +       if (net->sctp.default_auto_asconf) {
151891 +               spin_lock(&net->sctp.addr_wq_lock);
151892 +               list_add_tail(&sp->auto_asconf_list, &net->sctp.auto_asconf_splist);
151893 +               spin_unlock(&net->sctp.addr_wq_lock);
151894 +               sp->do_auto_asconf = 1;
151895 +       }
151898  /* Bind a local address either to an endpoint or to an association.  */
151899  static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
151901 @@ -418,8 +430,10 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
151902                 return -EADDRINUSE;
151904         /* Refresh ephemeral port.  */
151905 -       if (!bp->port)
151906 +       if (!bp->port) {
151907                 bp->port = inet_sk(sk)->inet_num;
151908 +               sctp_auto_asconf_init(sp);
151909 +       }
151911         /* Add the address to the bind address list.
151912          * Use GFP_ATOMIC since BHs will be disabled.
151913 @@ -1520,9 +1534,11 @@ static void sctp_close(struct sock *sk, long timeout)
151915         /* Supposedly, no process has access to the socket, but
151916          * the net layers still may.
151917 +        * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
151918 +        * held and that should be grabbed before socket lock.
151919          */
151920 -       local_bh_disable();
151921 -       bh_lock_sock(sk);
151922 +       spin_lock_bh(&net->sctp.addr_wq_lock);
151923 +       bh_lock_sock_nested(sk);
151925         /* Hold the sock, since sk_common_release() will put sock_put()
151926          * and we have just a little more cleanup.
151927 @@ -1531,7 +1547,7 @@ static void sctp_close(struct sock *sk, long timeout)
151928         sk_common_release(sk);
151930         bh_unlock_sock(sk);
151931 -       local_bh_enable();
151932 +       spin_unlock_bh(&net->sctp.addr_wq_lock);
151934         sock_put(sk);
151936 @@ -4991,16 +5007,6 @@ static int sctp_init_sock(struct sock *sk)
151937         sk_sockets_allocated_inc(sk);
151938         sock_prot_inuse_add(net, sk->sk_prot, 1);
151940 -       if (net->sctp.default_auto_asconf) {
151941 -               spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
151942 -               list_add_tail(&sp->auto_asconf_list,
151943 -                   &net->sctp.auto_asconf_splist);
151944 -               sp->do_auto_asconf = 1;
151945 -               spin_unlock(&sock_net(sk)->sctp.addr_wq_lock);
151946 -       } else {
151947 -               sp->do_auto_asconf = 0;
151948 -       }
151950         local_bh_enable();
151952         return 0;
151953 @@ -5025,9 +5031,7 @@ static void sctp_destroy_sock(struct sock *sk)
151955         if (sp->do_auto_asconf) {
151956                 sp->do_auto_asconf = 0;
151957 -               spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
151958                 list_del(&sp->auto_asconf_list);
151959 -               spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
151960         }
151961         sctp_endpoint_free(sp->ep);
151962         local_bh_disable();
151963 @@ -9398,6 +9402,8 @@ static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
151964                         return err;
151965         }
151967 +       sctp_auto_asconf_init(newsp);
151969         /* Move any messages in the old socket's receive queue that are for the
151970          * peeled off association to the new socket's receive queue.
151971          */
151972 diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
151973 index 47340b3b514f..cb23cca72c24 100644
151974 --- a/net/smc/af_smc.c
151975 +++ b/net/smc/af_smc.c
151976 @@ -2162,6 +2162,9 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
151977         struct smc_sock *smc;
151978         int val, rc;
151980 +       if (level == SOL_TCP && optname == TCP_ULP)
151981 +               return -EOPNOTSUPP;
151983         smc = smc_sk(sk);
151985         /* generic setsockopts reaching us here always apply to the
151986 @@ -2186,7 +2189,6 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
151987         if (rc || smc->use_fallback)
151988                 goto out;
151989         switch (optname) {
151990 -       case TCP_ULP:
151991         case TCP_FASTOPEN:
151992         case TCP_FASTOPEN_CONNECT:
151993         case TCP_FASTOPEN_KEY:
151994 diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
151995 index 612f0a641f4c..f555d335e910 100644
151996 --- a/net/sunrpc/clnt.c
151997 +++ b/net/sunrpc/clnt.c
151998 @@ -1799,7 +1799,6 @@ call_allocate(struct rpc_task *task)
152000         status = xprt->ops->buf_alloc(task);
152001         trace_rpc_buf_alloc(task, status);
152002 -       xprt_inject_disconnect(xprt);
152003         if (status == 0)
152004                 return;
152005         if (status != -ENOMEM) {
152006 @@ -2457,12 +2456,6 @@ call_decode(struct rpc_task *task)
152007                 task->tk_flags &= ~RPC_CALL_MAJORSEEN;
152008         }
152010 -       /*
152011 -        * Ensure that we see all writes made by xprt_complete_rqst()
152012 -        * before it changed req->rq_reply_bytes_recvd.
152013 -        */
152014 -       smp_rmb();
152016         /*
152017          * Did we ever call xprt_complete_rqst()? If not, we should assume
152018          * the message is incomplete.
152019 @@ -2471,6 +2464,11 @@ call_decode(struct rpc_task *task)
152020         if (!req->rq_reply_bytes_recvd)
152021                 goto out;
152023 +       /* Ensure that we see all writes made by xprt_complete_rqst()
152024 +        * before it changed req->rq_reply_bytes_recvd.
152025 +        */
152026 +       smp_rmb();
152028         req->rq_rcv_buf.len = req->rq_private_buf.len;
152029         trace_rpc_xdr_recvfrom(task, &req->rq_rcv_buf);
152031 diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
152032 index d76dc9d95d16..0de918cb3d90 100644
152033 --- a/net/sunrpc/svc.c
152034 +++ b/net/sunrpc/svc.c
152035 @@ -846,7 +846,8 @@ void
152036  svc_rqst_free(struct svc_rqst *rqstp)
152038         svc_release_buffer(rqstp);
152039 -       put_page(rqstp->rq_scratch_page);
152040 +       if (rqstp->rq_scratch_page)
152041 +               put_page(rqstp->rq_scratch_page);
152042         kfree(rqstp->rq_resp);
152043         kfree(rqstp->rq_argp);
152044         kfree(rqstp->rq_auth_data);
152045 diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
152046 index 2e2f007dfc9f..7cde41a936a4 100644
152047 --- a/net/sunrpc/svcsock.c
152048 +++ b/net/sunrpc/svcsock.c
152049 @@ -1171,7 +1171,7 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
152050         tcp_sock_set_cork(svsk->sk_sk, true);
152051         err = svc_tcp_sendmsg(svsk->sk_sock, xdr, marker, &sent);
152052         xdr_free_bvec(xdr);
152053 -       trace_svcsock_tcp_send(xprt, err < 0 ? err : sent);
152054 +       trace_svcsock_tcp_send(xprt, err < 0 ? (long)err : sent);
152055         if (err < 0 || sent != (xdr->len + sizeof(marker)))
152056                 goto out_close;
152057         if (atomic_dec_and_test(&svsk->sk_sendqlen))
152058 diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
152059 index 691ccf8049a4..20fe31b1b776 100644
152060 --- a/net/sunrpc/xprt.c
152061 +++ b/net/sunrpc/xprt.c
152062 @@ -698,9 +698,9 @@ int xprt_adjust_timeout(struct rpc_rqst *req)
152063         const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
152064         int status = 0;
152066 -       if (time_before(jiffies, req->rq_minortimeo))
152067 -               return status;
152068         if (time_before(jiffies, req->rq_majortimeo)) {
152069 +               if (time_before(jiffies, req->rq_minortimeo))
152070 +                       return status;
152071                 if (to->to_exponential)
152072                         req->rq_timeout <<= 1;
152073                 else
152074 @@ -1469,8 +1469,6 @@ bool xprt_prepare_transmit(struct rpc_task *task)
152075         struct rpc_xprt *xprt = req->rq_xprt;
152077         if (!xprt_lock_write(xprt, task)) {
152078 -               trace_xprt_transmit_queued(xprt, task);
152080                 /* Race breaker: someone may have transmitted us */
152081                 if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
152082                         rpc_wake_up_queued_task_set_status(&xprt->sending,
152083 @@ -1483,7 +1481,10 @@ bool xprt_prepare_transmit(struct rpc_task *task)
152085  void xprt_end_transmit(struct rpc_task *task)
152087 -       xprt_release_write(task->tk_rqstp->rq_xprt, task);
152088 +       struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
152090 +       xprt_inject_disconnect(xprt);
152091 +       xprt_release_write(xprt, task);
152094  /**
152095 @@ -1885,7 +1886,6 @@ void xprt_release(struct rpc_task *task)
152096         spin_unlock(&xprt->transport_lock);
152097         if (req->rq_buffer)
152098                 xprt->ops->buf_free(task);
152099 -       xprt_inject_disconnect(xprt);
152100         xdr_free_bvec(&req->rq_rcv_buf);
152101         xdr_free_bvec(&req->rq_snd_buf);
152102         if (req->rq_cred != NULL)
152103 diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
152104 index 766a1048a48a..aca2228095db 100644
152105 --- a/net/sunrpc/xprtrdma/frwr_ops.c
152106 +++ b/net/sunrpc/xprtrdma/frwr_ops.c
152107 @@ -257,6 +257,7 @@ int frwr_query_device(struct rpcrdma_ep *ep, const struct ib_device *device)
152108         ep->re_attr.cap.max_send_wr += 1; /* for ib_drain_sq */
152109         ep->re_attr.cap.max_recv_wr = ep->re_max_requests;
152110         ep->re_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
152111 +       ep->re_attr.cap.max_recv_wr += RPCRDMA_MAX_RECV_BATCH;
152112         ep->re_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
152114         ep->re_max_rdma_segs =
152115 @@ -575,7 +576,6 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
152116                 mr = container_of(frwr, struct rpcrdma_mr, frwr);
152117                 bad_wr = bad_wr->next;
152119 -               list_del_init(&mr->mr_list);
152120                 frwr_mr_recycle(mr);
152121         }
152123 diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
152124 index 292f066d006e..21ddd78a8c35 100644
152125 --- a/net/sunrpc/xprtrdma/rpc_rdma.c
152126 +++ b/net/sunrpc/xprtrdma/rpc_rdma.c
152127 @@ -1430,9 +1430,10 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
152128                 credits = 1;    /* don't deadlock */
152129         else if (credits > r_xprt->rx_ep->re_max_requests)
152130                 credits = r_xprt->rx_ep->re_max_requests;
152131 +       rpcrdma_post_recvs(r_xprt, credits + (buf->rb_bc_srv_max_requests << 1),
152132 +                          false);
152133         if (buf->rb_credits != credits)
152134                 rpcrdma_update_cwnd(r_xprt, credits);
152135 -       rpcrdma_post_recvs(r_xprt, false);
152137         req = rpcr_to_rdmar(rqst);
152138         if (unlikely(req->rl_reply))
152139 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
152140 index 52c759a8543e..3669661457c1 100644
152141 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
152142 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
152143 @@ -958,7 +958,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
152144         p = xdr_reserve_space(&sctxt->sc_stream,
152145                               rpcrdma_fixed_maxsz * sizeof(*p));
152146         if (!p)
152147 -               goto err0;
152148 +               goto err1;
152150         ret = svc_rdma_send_reply_chunk(rdma, rctxt, &rqstp->rq_res);
152151         if (ret < 0)
152152 @@ -970,11 +970,11 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
152153         *p = pcl_is_empty(&rctxt->rc_reply_pcl) ? rdma_msg : rdma_nomsg;
152155         if (svc_rdma_encode_read_list(sctxt) < 0)
152156 -               goto err0;
152157 +               goto err1;
152158         if (svc_rdma_encode_write_list(rctxt, sctxt) < 0)
152159 -               goto err0;
152160 +               goto err1;
152161         if (svc_rdma_encode_reply_chunk(rctxt, sctxt, ret) < 0)
152162 -               goto err0;
152163 +               goto err1;
152165         ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp);
152166         if (ret < 0)
152167 diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
152168 index 78d29d1bcc20..09953597d055 100644
152169 --- a/net/sunrpc/xprtrdma/transport.c
152170 +++ b/net/sunrpc/xprtrdma/transport.c
152171 @@ -262,8 +262,10 @@ xprt_rdma_connect_worker(struct work_struct *work)
152172   * xprt_rdma_inject_disconnect - inject a connection fault
152173   * @xprt: transport context
152174   *
152175 - * If @xprt is connected, disconnect it to simulate spurious connection
152176 - * loss.
152177 + * If @xprt is connected, disconnect it to simulate spurious
152178 + * connection loss. Caller must hold @xprt's send lock to
152179 + * ensure that data structures and hardware resources are
152180 + * stable during the rdma_disconnect() call.
152181   */
152182  static void
152183  xprt_rdma_inject_disconnect(struct rpc_xprt *xprt)
152184 diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
152185 index ec912cf9c618..f3fffc74ab0f 100644
152186 --- a/net/sunrpc/xprtrdma/verbs.c
152187 +++ b/net/sunrpc/xprtrdma/verbs.c
152188 @@ -535,7 +535,7 @@ int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt)
152189          * outstanding Receives.
152190          */
152191         rpcrdma_ep_get(ep);
152192 -       rpcrdma_post_recvs(r_xprt, true);
152193 +       rpcrdma_post_recvs(r_xprt, 1, true);
152195         rc = rdma_connect(ep->re_id, &ep->re_remote_cma);
152196         if (rc)
152197 @@ -1364,21 +1364,21 @@ int rpcrdma_post_sends(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
152198  /**
152199   * rpcrdma_post_recvs - Refill the Receive Queue
152200   * @r_xprt: controlling transport instance
152201 - * @temp: mark Receive buffers to be deleted after use
152202 + * @needed: current credit grant
152203 + * @temp: mark Receive buffers to be deleted after one use
152204   *
152205   */
152206 -void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
152207 +void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp)
152209         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
152210         struct rpcrdma_ep *ep = r_xprt->rx_ep;
152211         struct ib_recv_wr *wr, *bad_wr;
152212         struct rpcrdma_rep *rep;
152213 -       int needed, count, rc;
152214 +       int count, rc;
152216         rc = 0;
152217         count = 0;
152219 -       needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1);
152220         if (likely(ep->re_receive_count > needed))
152221                 goto out;
152222         needed -= ep->re_receive_count;
152223 diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
152224 index fe3be985e239..28af11fbe643 100644
152225 --- a/net/sunrpc/xprtrdma/xprt_rdma.h
152226 +++ b/net/sunrpc/xprtrdma/xprt_rdma.h
152227 @@ -461,7 +461,7 @@ int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt);
152228  void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt);
152230  int rpcrdma_post_sends(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
152231 -void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp);
152232 +void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp);
152235   * Buffer calls - xprtrdma/verbs.c
152236 diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
152237 index 97710ce36047..c89ce47c56cf 100644
152238 --- a/net/tipc/crypto.c
152239 +++ b/net/tipc/crypto.c
152240 @@ -1492,6 +1492,8 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
152241         /* Allocate statistic structure */
152242         c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC);
152243         if (!c->stats) {
152244 +               if (c->wq)
152245 +                       destroy_workqueue(c->wq);
152246                 kfree_sensitive(c);
152247                 return -ENOMEM;
152248         }
152249 diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
152250 index 5a1ce64039f7..0749df80454d 100644
152251 --- a/net/tipc/netlink_compat.c
152252 +++ b/net/tipc/netlink_compat.c
152253 @@ -696,7 +696,7 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
152254         if (err)
152255                 return err;
152257 -       link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
152258 +       link_info.dest = htonl(nla_get_flag(link[TIPC_NLA_LINK_DEST]));
152259         link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
152260         nla_strscpy(link_info.str, link[TIPC_NLA_LINK_NAME],
152261                     TIPC_MAX_LINK_NAME);
152262 diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
152263 index e4370b1b7494..902cb6dd710b 100644
152264 --- a/net/vmw_vsock/virtio_transport_common.c
152265 +++ b/net/vmw_vsock/virtio_transport_common.c
152266 @@ -733,6 +733,23 @@ static int virtio_transport_reset_no_sock(const struct virtio_transport *t,
152267         return t->send_pkt(reply);
152270 +/* This function should be called with sk_lock held and SOCK_DONE set */
152271 +static void virtio_transport_remove_sock(struct vsock_sock *vsk)
152273 +       struct virtio_vsock_sock *vvs = vsk->trans;
152274 +       struct virtio_vsock_pkt *pkt, *tmp;
152276 +       /* We don't need to take rx_lock, as the socket is closing and we are
152277 +        * removing it.
152278 +        */
152279 +       list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
152280 +               list_del(&pkt->list);
152281 +               virtio_transport_free_pkt(pkt);
152282 +       }
152284 +       vsock_remove_sock(vsk);
152287  static void virtio_transport_wait_close(struct sock *sk, long timeout)
152289         if (timeout) {
152290 @@ -765,7 +782,7 @@ static void virtio_transport_do_close(struct vsock_sock *vsk,
152291             (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
152292                 vsk->close_work_scheduled = false;
152294 -               vsock_remove_sock(vsk);
152295 +               virtio_transport_remove_sock(vsk);
152297                 /* Release refcnt obtained when we scheduled the timeout */
152298                 sock_put(sk);
152299 @@ -828,22 +845,15 @@ static bool virtio_transport_close(struct vsock_sock *vsk)
152301  void virtio_transport_release(struct vsock_sock *vsk)
152303 -       struct virtio_vsock_sock *vvs = vsk->trans;
152304 -       struct virtio_vsock_pkt *pkt, *tmp;
152305         struct sock *sk = &vsk->sk;
152306         bool remove_sock = true;
152308         if (sk->sk_type == SOCK_STREAM)
152309                 remove_sock = virtio_transport_close(vsk);
152311 -       list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
152312 -               list_del(&pkt->list);
152313 -               virtio_transport_free_pkt(pkt);
152314 -       }
152316         if (remove_sock) {
152317                 sock_set_flag(sk, SOCK_DONE);
152318 -               vsock_remove_sock(vsk);
152319 +               virtio_transport_remove_sock(vsk);
152320         }
152322  EXPORT_SYMBOL_GPL(virtio_transport_release);
152323 diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
152324 index 8b65323207db..1c9ecb18b8e6 100644
152325 --- a/net/vmw_vsock/vmci_transport.c
152326 +++ b/net/vmw_vsock/vmci_transport.c
152327 @@ -568,8 +568,7 @@ vmci_transport_queue_pair_alloc(struct vmci_qp **qpair,
152328                                peer, flags, VMCI_NO_PRIVILEGE_FLAGS);
152329  out:
152330         if (err < 0) {
152331 -               pr_err("Could not attach to queue pair with %d\n",
152332 -                      err);
152333 +               pr_err_once("Could not attach to queue pair with %d\n", err);
152334                 err = vmci_transport_error_to_vsock_error(err);
152335         }
152337 diff --git a/net/wireless/core.c b/net/wireless/core.c
152338 index a2785379df6e..589ee5a69a2e 100644
152339 --- a/net/wireless/core.c
152340 +++ b/net/wireless/core.c
152341 @@ -332,14 +332,29 @@ static void cfg80211_event_work(struct work_struct *work)
152342  void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev)
152344         struct wireless_dev *wdev, *tmp;
152345 +       bool found = false;
152347         ASSERT_RTNL();
152348 -       lockdep_assert_wiphy(&rdev->wiphy);
152350 +       list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
152351 +               if (wdev->nl_owner_dead) {
152352 +                       if (wdev->netdev)
152353 +                               dev_close(wdev->netdev);
152354 +                       found = true;
152355 +               }
152356 +       }
152358 +       if (!found)
152359 +               return;
152361 +       wiphy_lock(&rdev->wiphy);
152362         list_for_each_entry_safe(wdev, tmp, &rdev->wiphy.wdev_list, list) {
152363 -               if (wdev->nl_owner_dead)
152364 +               if (wdev->nl_owner_dead) {
152365 +                       cfg80211_leave(rdev, wdev);
152366                         rdev_del_virtual_intf(rdev, wdev);
152367 +               }
152368         }
152369 +       wiphy_unlock(&rdev->wiphy);
152372  static void cfg80211_destroy_iface_wk(struct work_struct *work)
152373 @@ -350,9 +365,7 @@ static void cfg80211_destroy_iface_wk(struct work_struct *work)
152374                             destroy_work);
152376         rtnl_lock();
152377 -       wiphy_lock(&rdev->wiphy);
152378         cfg80211_destroy_ifaces(rdev);
152379 -       wiphy_unlock(&rdev->wiphy);
152380         rtnl_unlock();
152383 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
152384 index b1df42e4f1eb..a5224da63832 100644
152385 --- a/net/wireless/nl80211.c
152386 +++ b/net/wireless/nl80211.c
152387 @@ -3929,7 +3929,7 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
152388         return err;
152391 -static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
152392 +static int _nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
152394         struct cfg80211_registered_device *rdev = info->user_ptr[0];
152395         struct vif_params params;
152396 @@ -3938,9 +3938,6 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
152397         int err;
152398         enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED;
152400 -       /* to avoid failing a new interface creation due to pending removal */
152401 -       cfg80211_destroy_ifaces(rdev);
152403         memset(&params, 0, sizeof(params));
152405         if (!info->attrs[NL80211_ATTR_IFNAME])
152406 @@ -4028,6 +4025,21 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
152407         return genlmsg_reply(msg, info);
152410 +static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
152412 +       struct cfg80211_registered_device *rdev = info->user_ptr[0];
152413 +       int ret;
152415 +       /* to avoid failing a new interface creation due to pending removal */
152416 +       cfg80211_destroy_ifaces(rdev);
152418 +       wiphy_lock(&rdev->wiphy);
152419 +       ret = _nl80211_new_interface(skb, info);
152420 +       wiphy_unlock(&rdev->wiphy);
152422 +       return ret;
152425  static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info)
152427         struct cfg80211_registered_device *rdev = info->user_ptr[0];
152428 @@ -15040,7 +15052,9 @@ static const struct genl_small_ops nl80211_small_ops[] = {
152429                 .doit = nl80211_new_interface,
152430                 .flags = GENL_UNS_ADMIN_PERM,
152431                 .internal_flags = NL80211_FLAG_NEED_WIPHY |
152432 -                                 NL80211_FLAG_NEED_RTNL,
152433 +                                 NL80211_FLAG_NEED_RTNL |
152434 +                                 /* we take the wiphy mutex later ourselves */
152435 +                                 NL80211_FLAG_NO_WIPHY_MTX,
152436         },
152437         {
152438                 .cmd = NL80211_CMD_DEL_INTERFACE,
152439 diff --git a/net/wireless/scan.c b/net/wireless/scan.c
152440 index 758eb7d2a706..caa8eafbd583 100644
152441 --- a/net/wireless/scan.c
152442 +++ b/net/wireless/scan.c
152443 @@ -1751,6 +1751,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
152445                 if (rdev->bss_entries >= bss_entries_limit &&
152446                     !cfg80211_bss_expire_oldest(rdev)) {
152447 +                       if (!list_empty(&new->hidden_list))
152448 +                               list_del(&new->hidden_list);
152449                         kfree(new);
152450                         goto drop;
152451                 }
152452 diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
152453 index 4faabd1ecfd1..143979ea4165 100644
152454 --- a/net/xdp/xsk.c
152455 +++ b/net/xdp/xsk.c
152456 @@ -454,12 +454,16 @@ static int xsk_generic_xmit(struct sock *sk)
152457         struct sk_buff *skb;
152458         unsigned long flags;
152459         int err = 0;
152460 +       u32 hr, tr;
152462         mutex_lock(&xs->mutex);
152464         if (xs->queue_id >= xs->dev->real_num_tx_queues)
152465                 goto out;
152467 +       hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
152468 +       tr = xs->dev->needed_tailroom;
152470         while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
152471                 char *buffer;
152472                 u64 addr;
152473 @@ -471,11 +475,13 @@ static int xsk_generic_xmit(struct sock *sk)
152474                 }
152476                 len = desc.len;
152477 -               skb = sock_alloc_send_skb(sk, len, 1, &err);
152478 +               skb = sock_alloc_send_skb(sk, hr + len + tr, 1, &err);
152479                 if (unlikely(!skb))
152480                         goto out;
152482 +               skb_reserve(skb, hr);
152483                 skb_put(skb, len);
152485                 addr = desc.addr;
152486                 buffer = xsk_buff_raw_get_data(xs->pool, addr);
152487                 err = skb_store_bits(skb, 0, buffer, len);
152488 diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
152489 index 2823b7c3302d..40f359bf2044 100644
152490 --- a/net/xdp/xsk_queue.h
152491 +++ b/net/xdp/xsk_queue.h
152492 @@ -128,13 +128,12 @@ static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
152493  static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
152494                                             struct xdp_desc *desc)
152496 -       u64 chunk, chunk_end;
152497 +       u64 chunk;
152499 -       chunk = xp_aligned_extract_addr(pool, desc->addr);
152500 -       chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len);
152501 -       if (chunk != chunk_end)
152502 +       if (desc->len > pool->chunk_size)
152503                 return false;
152505 +       chunk = xp_aligned_extract_addr(pool, desc->addr);
152506         if (chunk >= pool->addrs_cnt)
152507                 return false;
152509 diff --git a/samples/bpf/tracex1_kern.c b/samples/bpf/tracex1_kern.c
152510 index 3f4599c9a202..ef30d2b353b0 100644
152511 --- a/samples/bpf/tracex1_kern.c
152512 +++ b/samples/bpf/tracex1_kern.c
152513 @@ -26,7 +26,7 @@
152514  SEC("kprobe/__netif_receive_skb_core")
152515  int bpf_prog1(struct pt_regs *ctx)
152517 -       /* attaches to kprobe netif_receive_skb,
152518 +       /* attaches to kprobe __netif_receive_skb_core,
152519          * looks for packets on loobpack device and prints them
152520          */
152521         char devname[IFNAMSIZ];
152522 @@ -35,7 +35,7 @@ int bpf_prog1(struct pt_regs *ctx)
152523         int len;
152525         /* non-portable! works for the given kernel only */
152526 -       skb = (struct sk_buff *) PT_REGS_PARM1(ctx);
152527 +       bpf_probe_read_kernel(&skb, sizeof(skb), (void *)PT_REGS_PARM1(ctx));
152528         dev = _(skb->dev);
152529         len = _(skb->len);
152531 diff --git a/samples/kfifo/bytestream-example.c b/samples/kfifo/bytestream-example.c
152532 index c406f03ee551..5a90aa527877 100644
152533 --- a/samples/kfifo/bytestream-example.c
152534 +++ b/samples/kfifo/bytestream-example.c
152535 @@ -122,8 +122,10 @@ static ssize_t fifo_write(struct file *file, const char __user *buf,
152536         ret = kfifo_from_user(&test, buf, count, &copied);
152538         mutex_unlock(&write_lock);
152539 +       if (ret)
152540 +               return ret;
152542 -       return ret ? ret : copied;
152543 +       return copied;
152546  static ssize_t fifo_read(struct file *file, char __user *buf,
152547 @@ -138,8 +140,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf,
152548         ret = kfifo_to_user(&test, buf, count, &copied);
152550         mutex_unlock(&read_lock);
152551 +       if (ret)
152552 +               return ret;
152554 -       return ret ? ret : copied;
152555 +       return copied;
152558  static const struct proc_ops fifo_proc_ops = {
152559 diff --git a/samples/kfifo/inttype-example.c b/samples/kfifo/inttype-example.c
152560 index 78977fc4a23f..e5403d8c971a 100644
152561 --- a/samples/kfifo/inttype-example.c
152562 +++ b/samples/kfifo/inttype-example.c
152563 @@ -115,8 +115,10 @@ static ssize_t fifo_write(struct file *file, const char __user *buf,
152564         ret = kfifo_from_user(&test, buf, count, &copied);
152566         mutex_unlock(&write_lock);
152567 +       if (ret)
152568 +               return ret;
152570 -       return ret ? ret : copied;
152571 +       return copied;
152574  static ssize_t fifo_read(struct file *file, char __user *buf,
152575 @@ -131,8 +133,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf,
152576         ret = kfifo_to_user(&test, buf, count, &copied);
152578         mutex_unlock(&read_lock);
152579 +       if (ret)
152580 +               return ret;
152582 -       return ret ? ret : copied;
152583 +       return copied;
152586  static const struct proc_ops fifo_proc_ops = {
152587 diff --git a/samples/kfifo/record-example.c b/samples/kfifo/record-example.c
152588 index c507998a2617..f64f3d62d6c2 100644
152589 --- a/samples/kfifo/record-example.c
152590 +++ b/samples/kfifo/record-example.c
152591 @@ -129,8 +129,10 @@ static ssize_t fifo_write(struct file *file, const char __user *buf,
152592         ret = kfifo_from_user(&test, buf, count, &copied);
152594         mutex_unlock(&write_lock);
152595 +       if (ret)
152596 +               return ret;
152598 -       return ret ? ret : copied;
152599 +       return copied;
152602  static ssize_t fifo_read(struct file *file, char __user *buf,
152603 @@ -145,8 +147,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf,
152604         ret = kfifo_to_user(&test, buf, count, &copied);
152606         mutex_unlock(&read_lock);
152607 +       if (ret)
152608 +               return ret;
152610 -       return ret ? ret : copied;
152611 +       return copied;
152614  static const struct proc_ops fifo_proc_ops = {
152615 diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
152616 index 066beffca09a..4ca5579af4e4 100644
152617 --- a/scripts/Makefile.modpost
152618 +++ b/scripts/Makefile.modpost
152619 @@ -68,7 +68,20 @@ else
152620  ifeq ($(KBUILD_EXTMOD),)
152622  input-symdump := vmlinux.symvers
152623 -output-symdump := Module.symvers
152624 +output-symdump := modules-only.symvers
152626 +quiet_cmd_cat = GEN     $@
152627 +      cmd_cat = cat $(real-prereqs) > $@
152629 +ifneq ($(wildcard vmlinux.symvers),)
152631 +__modpost: Module.symvers
152632 +Module.symvers: vmlinux.symvers modules-only.symvers FORCE
152633 +       $(call if_changed,cat)
152635 +targets += Module.symvers
152637 +endif
152639  else
152641 diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
152642 index e0f965529166..af814b39b876 100644
152643 --- a/scripts/kconfig/nconf.c
152644 +++ b/scripts/kconfig/nconf.c
152645 @@ -504,8 +504,8 @@ static int get_mext_match(const char *match_str, match_f flag)
152646         else if (flag == FIND_NEXT_MATCH_UP)
152647                 --match_start;
152649 +       match_start = (match_start + items_num) % items_num;
152650         index = match_start;
152651 -       index = (index + items_num) % items_num;
152652         while (true) {
152653                 char *str = k_menu_items[index].str;
152654                 if (strcasestr(str, match_str) != NULL)
152655 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
152656 index 24725e50c7b4..10c3fba26f03 100644
152657 --- a/scripts/mod/modpost.c
152658 +++ b/scripts/mod/modpost.c
152659 @@ -2423,19 +2423,6 @@ static void read_dump(const char *fname)
152660         fatal("parse error in symbol dump file\n");
152663 -/* For normal builds always dump all symbols.
152664 - * For external modules only dump symbols
152665 - * that are not read from kernel Module.symvers.
152666 - **/
152667 -static int dump_sym(struct symbol *sym)
152669 -       if (!external_module)
152670 -               return 1;
152671 -       if (sym->module->from_dump)
152672 -               return 0;
152673 -       return 1;
152676  static void write_dump(const char *fname)
152678         struct buffer buf = { };
152679 @@ -2446,7 +2433,7 @@ static void write_dump(const char *fname)
152680         for (n = 0; n < SYMBOL_HASH_SIZE ; n++) {
152681                 symbol = symbolhash[n];
152682                 while (symbol) {
152683 -                       if (dump_sym(symbol)) {
152684 +                       if (!symbol->module->from_dump) {
152685                                 namespace = symbol->namespace;
152686                                 buf_printf(&buf, "0x%08x\t%s\t%s\t%s\t%s\n",
152687                                            symbol->crc, symbol->name,
152688 diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
152689 index 867860ea57da..7b83a1aaec98 100755
152690 --- a/scripts/recordmcount.pl
152691 +++ b/scripts/recordmcount.pl
152692 @@ -392,7 +392,7 @@ if ($arch eq "x86_64") {
152693      $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$";
152694  } elsif ($arch eq "riscv") {
152695      $function_regex = "^([0-9a-fA-F]+)\\s+<([^.0-9][0-9a-zA-Z_\\.]+)>:";
152696 -    $mcount_regex = "^\\s*([0-9a-fA-F]+):\\sR_RISCV_CALL\\s_mcount\$";
152697 +    $mcount_regex = "^\\s*([0-9a-fA-F]+):\\sR_RISCV_CALL(_PLT)?\\s_?mcount\$";
152698      $type = ".quad";
152699      $alignment = 2;
152700  } elsif ($arch eq "nds32") {
152701 diff --git a/scripts/setlocalversion b/scripts/setlocalversion
152702 index bb709eda96cd..cf323fa660b6 100755
152703 --- a/scripts/setlocalversion
152704 +++ b/scripts/setlocalversion
152705 @@ -54,7 +54,7 @@ scm_version()
152706                         # If only the short version is requested, don't bother
152707                         # running further git commands
152708                         if $short; then
152709 -                               echo "+"
152710 +                       #       echo "+"
152711                                 return
152712                         fi
152713                         # If we are past a tagged commit (like
152714 diff --git a/security/commoncap.c b/security/commoncap.c
152715 index 1c519c875217..5cdeb73ca8fa 100644
152716 --- a/security/commoncap.c
152717 +++ b/security/commoncap.c
152718 @@ -400,7 +400,7 @@ int cap_inode_getsecurity(struct user_namespace *mnt_userns,
152719                                       &tmpbuf, size, GFP_NOFS);
152720         dput(dentry);
152722 -       if (ret < 0)
152723 +       if (ret < 0 || !tmpbuf)
152724                 return ret;
152726         fs_ns = inode->i_sb->s_user_ns;
152727 diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
152728 index e22e510ae92d..4e081e650047 100644
152729 --- a/security/integrity/ima/ima_template.c
152730 +++ b/security/integrity/ima/ima_template.c
152731 @@ -494,8 +494,8 @@ int ima_restore_measurement_list(loff_t size, void *buf)
152732                         }
152733                 }
152735 -               entry->pcr = !ima_canonical_fmt ? *(hdr[HDR_PCR].data) :
152736 -                            le32_to_cpu(*(hdr[HDR_PCR].data));
152737 +               entry->pcr = !ima_canonical_fmt ? *(u32 *)(hdr[HDR_PCR].data) :
152738 +                            le32_to_cpu(*(u32 *)(hdr[HDR_PCR].data));
152739                 ret = ima_restore_measurement_entry(entry);
152740                 if (ret < 0)
152741                         break;
152742 diff --git a/security/keys/trusted-keys/trusted_tpm1.c b/security/keys/trusted-keys/trusted_tpm1.c
152743 index 493eb91ed017..56c9b48460d9 100644
152744 --- a/security/keys/trusted-keys/trusted_tpm1.c
152745 +++ b/security/keys/trusted-keys/trusted_tpm1.c
152746 @@ -500,10 +500,12 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
152748         ret = tpm_get_random(chip, td->nonceodd, TPM_NONCE_SIZE);
152749         if (ret < 0)
152750 -               return ret;
152751 +               goto out;
152753 -       if (ret != TPM_NONCE_SIZE)
152754 -               return -EIO;
152755 +       if (ret != TPM_NONCE_SIZE) {
152756 +               ret = -EIO;
152757 +               goto out;
152758 +       }
152760         ordinal = htonl(TPM_ORD_SEAL);
152761         datsize = htonl(datalen);
152762 @@ -791,13 +793,33 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
152763                                 return -EINVAL;
152764                         break;
152765                 case Opt_blobauth:
152766 -                       if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE)
152767 -                               return -EINVAL;
152768 -                       res = hex2bin(opt->blobauth, args[0].from,
152769 -                                     SHA1_DIGEST_SIZE);
152770 -                       if (res < 0)
152771 -                               return -EINVAL;
152772 +                       /*
152773 +                        * TPM 1.2 authorizations are sha1 hashes passed in as
152774 +                        * hex strings.  TPM 2.0 authorizations are simple
152775 +                        * passwords (although it can take a hash as well)
152776 +                        */
152777 +                       opt->blobauth_len = strlen(args[0].from);
152779 +                       if (opt->blobauth_len == 2 * TPM_DIGEST_SIZE) {
152780 +                               res = hex2bin(opt->blobauth, args[0].from,
152781 +                                             TPM_DIGEST_SIZE);
152782 +                               if (res < 0)
152783 +                                       return -EINVAL;
152785 +                               opt->blobauth_len = TPM_DIGEST_SIZE;
152786 +                               break;
152787 +                       }
152789 +                       if (tpm2 && opt->blobauth_len <= sizeof(opt->blobauth)) {
152790 +                               memcpy(opt->blobauth, args[0].from,
152791 +                                      opt->blobauth_len);
152792 +                               break;
152793 +                       }
152795 +                       return -EINVAL;
152797                         break;
152799                 case Opt_migratable:
152800                         if (*args[0].from == '0')
152801                                 pay->migratable = 0;
152802 diff --git a/security/keys/trusted-keys/trusted_tpm2.c b/security/keys/trusted-keys/trusted_tpm2.c
152803 index c87c4df8703d..4c19d3abddbe 100644
152804 --- a/security/keys/trusted-keys/trusted_tpm2.c
152805 +++ b/security/keys/trusted-keys/trusted_tpm2.c
152806 @@ -97,10 +97,12 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
152807                              TPM_DIGEST_SIZE);
152809         /* sensitive */
152810 -       tpm_buf_append_u16(&buf, 4 + TPM_DIGEST_SIZE + payload->key_len + 1);
152811 +       tpm_buf_append_u16(&buf, 4 + options->blobauth_len + payload->key_len + 1);
152813 +       tpm_buf_append_u16(&buf, options->blobauth_len);
152814 +       if (options->blobauth_len)
152815 +               tpm_buf_append(&buf, options->blobauth, options->blobauth_len);
152817 -       tpm_buf_append_u16(&buf, TPM_DIGEST_SIZE);
152818 -       tpm_buf_append(&buf, options->blobauth, TPM_DIGEST_SIZE);
152819         tpm_buf_append_u16(&buf, payload->key_len + 1);
152820         tpm_buf_append(&buf, payload->key, payload->key_len);
152821         tpm_buf_append_u8(&buf, payload->migratable);
152822 @@ -265,7 +267,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
152823                              NULL /* nonce */, 0,
152824                              TPM2_SA_CONTINUE_SESSION,
152825                              options->blobauth /* hmac */,
152826 -                            TPM_DIGEST_SIZE);
152827 +                            options->blobauth_len);
152829         rc = tpm_transmit_cmd(chip, &buf, 6, "unsealing");
152830         if (rc > 0)
152831 diff --git a/security/security.c b/security/security.c
152832 index 5ac96b16f8fa..8ef0ce0faba7 100644
152833 --- a/security/security.c
152834 +++ b/security/security.c
152835 @@ -727,24 +727,28 @@ int security_binder_set_context_mgr(struct task_struct *mgr)
152837         return call_int_hook(binder_set_context_mgr, 0, mgr);
152839 +EXPORT_SYMBOL(security_binder_set_context_mgr);
152841  int security_binder_transaction(struct task_struct *from,
152842                                 struct task_struct *to)
152844         return call_int_hook(binder_transaction, 0, from, to);
152846 +EXPORT_SYMBOL(security_binder_transaction);
152848  int security_binder_transfer_binder(struct task_struct *from,
152849                                     struct task_struct *to)
152851         return call_int_hook(binder_transfer_binder, 0, from, to);
152853 +EXPORT_SYMBOL(security_binder_transfer_binder);
152855  int security_binder_transfer_file(struct task_struct *from,
152856                                   struct task_struct *to, struct file *file)
152858         return call_int_hook(binder_transfer_file, 0, from, to, file);
152860 +EXPORT_SYMBOL(security_binder_transfer_file);
152862  int security_ptrace_access_check(struct task_struct *child, unsigned int mode)
152864 diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
152865 index ba2e01a6955c..62d19bccf3de 100644
152866 --- a/security/selinux/include/classmap.h
152867 +++ b/security/selinux/include/classmap.h
152868 @@ -242,11 +242,12 @@ struct security_class_mapping secclass_map[] = {
152869         { "infiniband_endport",
152870           { "manage_subnet", NULL } },
152871         { "bpf",
152872 -         {"map_create", "map_read", "map_write", "prog_load", "prog_run"} },
152873 +         { "map_create", "map_read", "map_write", "prog_load", "prog_run",
152874 +           NULL } },
152875         { "xdp_socket",
152876           { COMMON_SOCK_PERMS, NULL } },
152877         { "perf_event",
152878 -         {"open", "cpu", "kernel", "tracepoint", "read", "write"} },
152879 +         { "open", "cpu", "kernel", "tracepoint", "read", "write", NULL } },
152880         { "lockdown",
152881           { "integrity", "confidentiality", NULL } },
152882         { "anon_inode",
152883 diff --git a/sound/core/init.c b/sound/core/init.c
152884 index 45f4b01de23f..ef41f5b3a240 100644
152885 --- a/sound/core/init.c
152886 +++ b/sound/core/init.c
152887 @@ -398,10 +398,8 @@ int snd_card_disconnect(struct snd_card *card)
152888                 return 0;
152889         }
152890         card->shutdown = 1;
152891 -       spin_unlock(&card->files_lock);
152893         /* replace file->f_op with special dummy operations */
152894 -       spin_lock(&card->files_lock);
152895         list_for_each_entry(mfile, &card->files_list, list) {
152896                 /* it's critical part, use endless loop */
152897                 /* we have no room to fail */
152898 diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c
152899 index bbae04793c50..c18017e0a3d9 100644
152900 --- a/sound/firewire/bebob/bebob_stream.c
152901 +++ b/sound/firewire/bebob/bebob_stream.c
152902 @@ -517,20 +517,22 @@ int snd_bebob_stream_init_duplex(struct snd_bebob *bebob)
152903  static int keep_resources(struct snd_bebob *bebob, struct amdtp_stream *stream,
152904                           unsigned int rate, unsigned int index)
152906 -       struct snd_bebob_stream_formation *formation;
152907 +       unsigned int pcm_channels;
152908 +       unsigned int midi_ports;
152909         struct cmp_connection *conn;
152910         int err;
152912         if (stream == &bebob->tx_stream) {
152913 -               formation = bebob->tx_stream_formations + index;
152914 +               pcm_channels = bebob->tx_stream_formations[index].pcm;
152915 +               midi_ports = bebob->midi_input_ports;
152916                 conn = &bebob->out_conn;
152917         } else {
152918 -               formation = bebob->rx_stream_formations + index;
152919 +               pcm_channels = bebob->rx_stream_formations[index].pcm;
152920 +               midi_ports = bebob->midi_output_ports;
152921                 conn = &bebob->in_conn;
152922         }
152924 -       err = amdtp_am824_set_parameters(stream, rate, formation->pcm,
152925 -                                        formation->midi, false);
152926 +       err = amdtp_am824_set_parameters(stream, rate, pcm_channels, midi_ports, false);
152927         if (err < 0)
152928                 return err;
152930 diff --git a/sound/isa/sb/emu8000.c b/sound/isa/sb/emu8000.c
152931 index 0aa545ac6e60..1c90421a88dc 100644
152932 --- a/sound/isa/sb/emu8000.c
152933 +++ b/sound/isa/sb/emu8000.c
152934 @@ -1029,8 +1029,10 @@ snd_emu8000_create_mixer(struct snd_card *card, struct snd_emu8000 *emu)
152936         memset(emu->controls, 0, sizeof(emu->controls));
152937         for (i = 0; i < EMU8000_NUM_CONTROLS; i++) {
152938 -               if ((err = snd_ctl_add(card, emu->controls[i] = snd_ctl_new1(mixer_defs[i], emu))) < 0)
152939 +               if ((err = snd_ctl_add(card, emu->controls[i] = snd_ctl_new1(mixer_defs[i], emu))) < 0) {
152940 +                       emu->controls[i] = NULL;
152941                         goto __error;
152942 +               }
152943         }
152944         return 0;
152946 diff --git a/sound/isa/sb/sb16_csp.c b/sound/isa/sb/sb16_csp.c
152947 index 8635a2b6b36b..4789345a8fdd 100644
152948 --- a/sound/isa/sb/sb16_csp.c
152949 +++ b/sound/isa/sb/sb16_csp.c
152950 @@ -1045,10 +1045,14 @@ static int snd_sb_qsound_build(struct snd_sb_csp * p)
152952         spin_lock_init(&p->q_lock);
152954 -       if ((err = snd_ctl_add(card, p->qsound_switch = snd_ctl_new1(&snd_sb_qsound_switch, p))) < 0)
152955 +       if ((err = snd_ctl_add(card, p->qsound_switch = snd_ctl_new1(&snd_sb_qsound_switch, p))) < 0) {
152956 +               p->qsound_switch = NULL;
152957                 goto __error;
152958 -       if ((err = snd_ctl_add(card, p->qsound_space = snd_ctl_new1(&snd_sb_qsound_space, p))) < 0)
152959 +       }
152960 +       if ((err = snd_ctl_add(card, p->qsound_space = snd_ctl_new1(&snd_sb_qsound_space, p))) < 0) {
152961 +               p->qsound_space = NULL;
152962                 goto __error;
152963 +       }
152965         return 0;
152967 diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
152968 index f5cba7afd1c6..ff0fb2d16d82 100644
152969 --- a/sound/pci/hda/hda_generic.c
152970 +++ b/sound/pci/hda/hda_generic.c
152971 @@ -1202,11 +1202,17 @@ static const char *get_line_out_pfx(struct hda_codec *codec, int ch,
152972                 *index = ch;
152973                 return "Headphone";
152974         case AUTO_PIN_LINE_OUT:
152975 -               /* This deals with the case where we have two DACs and
152976 -                * one LO, one HP and one Speaker */
152977 -               if (!ch && cfg->speaker_outs && cfg->hp_outs) {
152978 -                       bool hp_lo_shared = !path_has_mixer(codec, spec->hp_paths[0], ctl_type);
152979 -                       bool spk_lo_shared = !path_has_mixer(codec, spec->speaker_paths[0], ctl_type);
152980 +               /* This deals with the case where one HP or one Speaker or
152981 +                * one HP + one Speaker need to share the DAC with LO
152982 +                */
152983 +               if (!ch) {
152984 +                       bool hp_lo_shared = false, spk_lo_shared = false;
152986 +                       if (cfg->speaker_outs)
152987 +                               spk_lo_shared = !path_has_mixer(codec,
152988 +                                                               spec->speaker_paths[0], ctl_type);
152989 +                       if (cfg->hp_outs)
152990 +                               hp_lo_shared = !path_has_mixer(codec, spec->hp_paths[0], ctl_type);
152991                         if (hp_lo_shared && spk_lo_shared)
152992                                 return spec->vmaster_mute.hook ? "PCM" : "Master";
152993                         if (hp_lo_shared)
152994 diff --git a/sound/pci/hda/ideapad_s740_helper.c b/sound/pci/hda/ideapad_s740_helper.c
152995 new file mode 100644
152996 index 000000000000..564b9086e52d
152997 --- /dev/null
152998 +++ b/sound/pci/hda/ideapad_s740_helper.c
152999 @@ -0,0 +1,492 @@
153000 +// SPDX-License-Identifier: GPL-2.0
153001 +/* Fixes for Lenovo Ideapad S740, to be included from codec driver */
153003 +static const struct hda_verb alc285_ideapad_s740_coefs[] = {
153004 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x10 },
153005 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0320 },
153006 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
153007 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0041 },
153008 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
153009 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0041 },
153010 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153011 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153012 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153013 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153014 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153015 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153016 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153017 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153018 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153019 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153020 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153021 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153022 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153023 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153024 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153025 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x007f },
153026 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153027 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153028 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153029 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153030 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x007f },
153031 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153032 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153033 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153034 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153035 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153036 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153037 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
153038 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153039 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
153040 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153041 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153042 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
153043 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153044 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
153045 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153046 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153047 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153048 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153049 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x003c },
153050 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153051 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0011 },
153052 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153053 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153054 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x003c },
153055 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153056 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0011 },
153057 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153058 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153059 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153060 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153061 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000c },
153062 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153063 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
153064 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153065 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153066 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000c },
153067 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153068 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
153069 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153070 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153071 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153072 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153073 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000f },
153074 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153075 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0042 },
153076 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153077 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153078 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000f },
153079 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153080 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0042 },
153081 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153082 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153083 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153084 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153085 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
153086 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153087 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
153088 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153089 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153090 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
153091 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153092 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
153093 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153094 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153095 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153096 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153097 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0003 },
153098 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153099 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0009 },
153100 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153101 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153102 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0003 },
153103 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153104 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0009 },
153105 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153106 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153107 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153108 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153109 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001c },
153110 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153111 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x004c },
153112 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153113 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153114 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001c },
153115 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153116 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x004c },
153117 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153118 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153119 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153120 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153121 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001d },
153122 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153123 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x004e },
153124 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153125 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153126 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001d },
153127 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153128 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x004e },
153129 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153130 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153131 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153132 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153133 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001b },
153134 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153135 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
153136 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153137 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153138 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001b },
153139 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153140 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
153141 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153142 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153143 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153144 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153145 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0019 },
153146 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153147 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0025 },
153148 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153149 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153150 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0019 },
153151 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153152 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0025 },
153153 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153154 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153155 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153156 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153157 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0018 },
153158 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153159 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0037 },
153160 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153161 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153162 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0018 },
153163 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153164 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0037 },
153165 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153166 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153167 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153168 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153169 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
153170 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153171 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
153172 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153173 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153174 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
153175 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153176 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
153177 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153178 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153179 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153180 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153181 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0016 },
153182 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153183 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0076 },
153184 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153185 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153186 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0016 },
153187 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153188 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0076 },
153189 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153190 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153191 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153192 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153193 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0017 },
153194 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153195 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
153196 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153197 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153198 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0017 },
153199 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153200 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
153201 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153202 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153203 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153204 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153205 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
153206 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153207 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
153208 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153209 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153210 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
153211 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153212 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
153213 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153214 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153215 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153216 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153217 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0007 },
153218 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153219 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0086 },
153220 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153221 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153222 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0007 },
153223 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153224 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0086 },
153225 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153226 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153227 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153228 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153229 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
153230 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153231 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
153232 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153233 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153234 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
153235 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153236 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
153237 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153238 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153239 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153240 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153241 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
153242 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153243 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153244 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153245 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153246 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
153247 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153248 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153249 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153250 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
153251 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0042 },
153252 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
153253 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0042 },
153254 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153255 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153256 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153257 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153258 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153259 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153260 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153261 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153262 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153263 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153264 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153265 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153266 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153267 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153268 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153269 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x007f },
153270 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153271 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153272 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153273 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153274 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x007f },
153275 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153276 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153277 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153278 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153279 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153280 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153281 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
153282 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153283 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
153284 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153285 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153286 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
153287 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153288 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
153289 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153290 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153291 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153292 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153293 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x003c },
153294 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153295 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0011 },
153296 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153297 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153298 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x003c },
153299 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153300 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0011 },
153301 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153302 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153303 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153304 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153305 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000c },
153306 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153307 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x002a },
153308 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153309 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153310 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000c },
153311 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153312 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x002a },
153313 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153314 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153315 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153316 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153317 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000f },
153318 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153319 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0046 },
153320 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153321 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153322 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x000f },
153323 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153324 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0046 },
153325 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153326 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153327 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153328 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153329 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
153330 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153331 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0044 },
153332 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153333 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153334 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
153335 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153336 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0044 },
153337 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153338 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153339 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153340 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153341 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0003 },
153342 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153343 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0009 },
153344 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153345 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153346 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0003 },
153347 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153348 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0009 },
153349 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153350 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153351 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153352 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153353 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001c },
153354 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153355 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x004c },
153356 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153357 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153358 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001c },
153359 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153360 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x004c },
153361 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153362 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153363 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153364 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153365 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153366 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153367 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001b },
153368 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153369 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
153370 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153371 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153372 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001b },
153373 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153374 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
153375 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153376 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153377 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153378 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153379 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0019 },
153380 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153381 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0025 },
153382 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153383 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153384 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0019 },
153385 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153386 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0025 },
153387 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153388 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153389 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153390 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153391 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0018 },
153392 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153393 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0037 },
153394 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153395 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153396 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0018 },
153397 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153398 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0037 },
153399 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153400 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153401 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153402 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153403 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
153404 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153405 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
153406 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153407 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153408 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x001a },
153409 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153410 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0040 },
153411 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153412 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153413 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153414 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153415 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0016 },
153416 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153417 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0076 },
153418 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153419 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153420 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0016 },
153421 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153422 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0076 },
153423 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153424 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153425 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153426 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153427 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0017 },
153428 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153429 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
153430 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153431 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153432 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0017 },
153433 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153434 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0010 },
153435 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153436 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153437 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153438 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153439 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
153440 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153441 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
153442 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153443 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153444 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
153445 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153446 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0015 },
153447 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153448 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153449 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153450 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153451 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0007 },
153452 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153453 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0086 },
153454 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153455 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153456 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0007 },
153457 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153458 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0086 },
153459 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153460 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153461 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153462 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153463 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
153464 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153465 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
153466 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153467 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153468 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
153469 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153470 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0001 },
153471 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153472 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153473 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x29 },
153474 +{ 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
153475 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0002 },
153476 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153477 +{ 0x20, AC_VERB_SET_PROC_COEF, 0x0000 },
153478 +{ 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
153482 +static void alc285_fixup_ideapad_s740_coef(struct hda_codec *codec,
153483 +                                          const struct hda_fixup *fix,
153484 +                                          int action)
153486 +       switch (action) {
153487 +       case HDA_FIXUP_ACT_PRE_PROBE:
153488 +               snd_hda_add_verbs(codec, alc285_ideapad_s740_coefs);
153489 +               break;
153490 +       }
153492 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
153493 index dfef9c17e140..d111258c6f45 100644
153494 --- a/sound/pci/hda/patch_conexant.c
153495 +++ b/sound/pci/hda/patch_conexant.c
153496 @@ -930,18 +930,18 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
153497         SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
153498         SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
153499         SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
153500 -       SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
153501 -       SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
153502 -       SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
153503 -       SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
153504 -       SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
153505         SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
153506         SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
153507 +       SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
153508         SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
153509 -       SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
153510 -       SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
153511 +       SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
153512         SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
153513         SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
153514 +       SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
153515 +       SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
153516 +       SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
153517 +       SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
153518 +       SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
153519         SND_PCI_QUIRK(0x103c, 0x8402, "HP ProBook 645 G4", CXT_FIXUP_MUTE_LED_GPIO),
153520         SND_PCI_QUIRK(0x103c, 0x8427, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED),
153521         SND_PCI_QUIRK(0x103c, 0x844f, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED),
153522 diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
153523 index 45ae845e82df..4b2cc8cb55c4 100644
153524 --- a/sound/pci/hda/patch_hdmi.c
153525 +++ b/sound/pci/hda/patch_hdmi.c
153526 @@ -1848,16 +1848,12 @@ static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
153527          */
153528         if (spec->intel_hsw_fixup) {
153529                 /*
153530 -                * On Intel platforms, device entries number is
153531 -                * changed dynamically. If there is a DP MST
153532 -                * hub connected, the device entries number is 3.
153533 -                * Otherwise, it is 1.
153534 -                * Here we manually set dev_num to 3, so that
153535 -                * we can initialize all the device entries when
153536 -                * bootup statically.
153537 +                * On Intel platforms, device entries count returned
153538 +                * by AC_PAR_DEVLIST_LEN is dynamic, and depends on
153539 +                * the type of receiver that is connected. Allocate pin
153540 +                * structures based on worst case.
153541                  */
153542 -               dev_num = 3;
153543 -               spec->dev_num = 3;
153544 +               dev_num = spec->dev_num;
153545         } else if (spec->dyn_pcm_assign && codec->dp_mst) {
153546                 dev_num = snd_hda_get_num_devices(codec, pin_nid) + 1;
153547                 /*
153548 @@ -2658,7 +2654,7 @@ static void generic_acomp_pin_eld_notify(void *audio_ptr, int port, int dev_id)
153549         /* skip notification during system suspend (but not in runtime PM);
153550          * the state will be updated at resume
153551          */
153552 -       if (snd_power_get_state(codec->card) != SNDRV_CTL_POWER_D0)
153553 +       if (codec->core.dev.power.power_state.event == PM_EVENT_SUSPEND)
153554                 return;
153555         /* ditto during suspend/resume process itself */
153556         if (snd_hdac_is_in_pm(&codec->core))
153557 @@ -2844,7 +2840,7 @@ static void intel_pin_eld_notify(void *audio_ptr, int port, int pipe)
153558         /* skip notification during system suspend (but not in runtime PM);
153559          * the state will be updated at resume
153560          */
153561 -       if (snd_power_get_state(codec->card) != SNDRV_CTL_POWER_D0)
153562 +       if (codec->core.dev.power.power_state.event == PM_EVENT_SUSPEND)
153563                 return;
153564         /* ditto during suspend/resume process itself */
153565         if (snd_hdac_is_in_pm(&codec->core))
153566 @@ -2942,7 +2938,7 @@ static int parse_intel_hdmi(struct hda_codec *codec)
153568  /* Intel Haswell and onwards; audio component with eld notifier */
153569  static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
153570 -                                const int *port_map, int port_num)
153571 +                                const int *port_map, int port_num, int dev_num)
153573         struct hdmi_spec *spec;
153574         int err;
153575 @@ -2957,6 +2953,7 @@ static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
153576         spec->port_map = port_map;
153577         spec->port_num = port_num;
153578         spec->intel_hsw_fixup = true;
153579 +       spec->dev_num = dev_num;
153581         intel_haswell_enable_all_pins(codec, true);
153582         intel_haswell_fixup_enable_dp12(codec);
153583 @@ -2982,12 +2979,12 @@ static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
153585  static int patch_i915_hsw_hdmi(struct hda_codec *codec)
153587 -       return intel_hsw_common_init(codec, 0x08, NULL, 0);
153588 +       return intel_hsw_common_init(codec, 0x08, NULL, 0, 3);
153591  static int patch_i915_glk_hdmi(struct hda_codec *codec)
153593 -       return intel_hsw_common_init(codec, 0x0b, NULL, 0);
153594 +       return intel_hsw_common_init(codec, 0x0b, NULL, 0, 3);
153597  static int patch_i915_icl_hdmi(struct hda_codec *codec)
153598 @@ -2998,7 +2995,7 @@ static int patch_i915_icl_hdmi(struct hda_codec *codec)
153599          */
153600         static const int map[] = {0x0, 0x4, 0x6, 0x8, 0xa, 0xb};
153602 -       return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map));
153603 +       return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 3);
153606  static int patch_i915_tgl_hdmi(struct hda_codec *codec)
153607 @@ -3010,7 +3007,7 @@ static int patch_i915_tgl_hdmi(struct hda_codec *codec)
153608         static const int map[] = {0x4, 0x6, 0x8, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf};
153609         int ret;
153611 -       ret = intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map));
153612 +       ret = intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 4);
153613         if (!ret) {
153614                 struct hdmi_spec *spec = codec->spec;
153616 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
153617 index a7544b77d3f7..1fe70f2fe4fe 100644
153618 --- a/sound/pci/hda/patch_realtek.c
153619 +++ b/sound/pci/hda/patch_realtek.c
153620 @@ -2470,13 +2470,13 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
153621                       ALC882_FIXUP_ACER_ASPIRE_8930G),
153622         SND_PCI_QUIRK(0x1025, 0x0146, "Acer Aspire 6935G",
153623                       ALC882_FIXUP_ACER_ASPIRE_8930G),
153624 +       SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G",
153625 +                     ALC882_FIXUP_ACER_ASPIRE_4930G),
153626 +       SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210),
153627         SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G",
153628                       ALC882_FIXUP_ACER_ASPIRE_4930G),
153629         SND_PCI_QUIRK(0x1025, 0x0166, "Acer Aspire 6530G",
153630                       ALC882_FIXUP_ACER_ASPIRE_4930G),
153631 -       SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G",
153632 -                     ALC882_FIXUP_ACER_ASPIRE_4930G),
153633 -       SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210),
153634         SND_PCI_QUIRK(0x1025, 0x021e, "Acer Aspire 5739G",
153635                       ALC882_FIXUP_ACER_ASPIRE_4930G),
153636         SND_PCI_QUIRK(0x1025, 0x0259, "Acer Aspire 5935", ALC889_FIXUP_DAC_ROUTE),
153637 @@ -2489,11 +2489,11 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
153638         SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
153639         SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS),
153640         SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
153641 +       SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
153642 +       SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
153643         SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
153644         SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
153645         SND_PCI_QUIRK(0x104d, 0x9060, "Sony Vaio VPCL14M1R", ALC882_FIXUP_NO_PRIMARY_HP),
153646 -       SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
153647 -       SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
153649         /* All Apple entries are in codec SSIDs */
153650         SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
153651 @@ -2536,9 +2536,19 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
153652         SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
153653         SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
153654         SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
153655 +       SND_PCI_QUIRK(0x1558, 0x50d3, "Clevo PC50[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
153656 +       SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
153657 +       SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
153658 +       SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
153659 +       SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
153660 +       SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
153661 +       SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
153662 +       SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
153663 +       SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
153664 +       SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
153665         SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
153666         SND_PCI_QUIRK(0x1558, 0x9506, "Clevo P955HQ", ALC1220_FIXUP_CLEVO_P950),
153667 -       SND_PCI_QUIRK(0x1558, 0x950A, "Clevo P955H[PR]", ALC1220_FIXUP_CLEVO_P950),
153668 +       SND_PCI_QUIRK(0x1558, 0x950a, "Clevo P955H[PR]", ALC1220_FIXUP_CLEVO_P950),
153669         SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
153670         SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
153671         SND_PCI_QUIRK(0x1558, 0x95e3, "Clevo P955[ER]T", ALC1220_FIXUP_CLEVO_P950),
153672 @@ -2548,14 +2558,6 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
153673         SND_PCI_QUIRK(0x1558, 0x96e1, "Clevo P960[ER][CDFN]-K", ALC1220_FIXUP_CLEVO_P950),
153674         SND_PCI_QUIRK(0x1558, 0x97e1, "Clevo P970[ER][CDFN]", ALC1220_FIXUP_CLEVO_P950),
153675         SND_PCI_QUIRK(0x1558, 0x97e2, "Clevo P970RC-M", ALC1220_FIXUP_CLEVO_P950),
153676 -       SND_PCI_QUIRK(0x1558, 0x50d3, "Clevo PC50[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
153677 -       SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
153678 -       SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
153679 -       SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
153680 -       SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
153681 -       SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
153682 -       SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
153683 -       SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
153684         SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
153685         SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
153686         SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
153687 @@ -4329,6 +4331,35 @@ static void alc245_fixup_hp_x360_amp(struct hda_codec *codec,
153688         }
153691 +/* toggle GPIO2 at each time stream is started; we use PREPARE state instead */
153692 +static void alc274_hp_envy_pcm_hook(struct hda_pcm_stream *hinfo,
153693 +                                   struct hda_codec *codec,
153694 +                                   struct snd_pcm_substream *substream,
153695 +                                   int action)
153697 +       switch (action) {
153698 +       case HDA_GEN_PCM_ACT_PREPARE:
153699 +               alc_update_gpio_data(codec, 0x04, true);
153700 +               break;
153701 +       case HDA_GEN_PCM_ACT_CLEANUP:
153702 +               alc_update_gpio_data(codec, 0x04, false);
153703 +               break;
153704 +       }
153707 +static void alc274_fixup_hp_envy_gpio(struct hda_codec *codec,
153708 +                                     const struct hda_fixup *fix,
153709 +                                     int action)
153711 +       struct alc_spec *spec = codec->spec;
153713 +       if (action == HDA_FIXUP_ACT_PROBE) {
153714 +               spec->gpio_mask |= 0x04;
153715 +               spec->gpio_dir |= 0x04;
153716 +               spec->gen.pcm_playback_hook = alc274_hp_envy_pcm_hook;
153717 +       }
153720  static void alc_update_coef_led(struct hda_codec *codec,
153721                                 struct alc_coef_led *led,
153722                                 bool polarity, bool on)
153723 @@ -4438,6 +4469,25 @@ static void alc236_fixup_hp_mute_led(struct hda_codec *codec,
153724         alc236_fixup_hp_coef_micmute_led(codec, fix, action);
153727 +static void alc236_fixup_hp_micmute_led_vref(struct hda_codec *codec,
153728 +                               const struct hda_fixup *fix, int action)
153730 +       struct alc_spec *spec = codec->spec;
153732 +       if (action == HDA_FIXUP_ACT_PRE_PROBE) {
153733 +               spec->cap_mute_led_nid = 0x1a;
153734 +               snd_hda_gen_add_micmute_led_cdev(codec, vref_micmute_led_set);
153735 +               codec->power_filter = led_power_filter;
153736 +       }
153739 +static void alc236_fixup_hp_mute_led_micmute_vref(struct hda_codec *codec,
153740 +                               const struct hda_fixup *fix, int action)
153742 +       alc236_fixup_hp_mute_led_coefbit(codec, fix, action);
153743 +       alc236_fixup_hp_micmute_led_vref(codec, fix, action);
153746  #if IS_REACHABLE(CONFIG_INPUT)
153747  static void gpio2_mic_hotkey_event(struct hda_codec *codec,
153748                                    struct hda_jack_callback *event)
153749 @@ -6232,6 +6282,9 @@ static void alc_fixup_thinkpad_acpi(struct hda_codec *codec,
153750  /* for alc295_fixup_hp_top_speakers */
153751  #include "hp_x360_helper.c"
153753 +/* for alc285_fixup_ideapad_s740_coef() */
153754 +#include "ideapad_s740_helper.c"
153756  enum {
153757         ALC269_FIXUP_GPIO2,
153758         ALC269_FIXUP_SONY_VAIO,
153759 @@ -6400,6 +6453,7 @@ enum {
153760         ALC285_FIXUP_HP_MUTE_LED,
153761         ALC236_FIXUP_HP_GPIO_LED,
153762         ALC236_FIXUP_HP_MUTE_LED,
153763 +       ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
153764         ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
153765         ALC295_FIXUP_ASUS_MIC_NO_PRESENCE,
153766         ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS,
153767 @@ -6415,10 +6469,13 @@ enum {
153768         ALC269_FIXUP_LEMOTE_A1802,
153769         ALC269_FIXUP_LEMOTE_A190X,
153770         ALC256_FIXUP_INTEL_NUC8_RUGGED,
153771 +       ALC233_FIXUP_INTEL_NUC8_DMIC,
153772 +       ALC233_FIXUP_INTEL_NUC8_BOOST,
153773         ALC256_FIXUP_INTEL_NUC10,
153774         ALC255_FIXUP_XIAOMI_HEADSET_MIC,
153775         ALC274_FIXUP_HP_MIC,
153776         ALC274_FIXUP_HP_HEADSET_MIC,
153777 +       ALC274_FIXUP_HP_ENVY_GPIO,
153778         ALC256_FIXUP_ASUS_HPE,
153779         ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
153780         ALC287_FIXUP_HP_GPIO_LED,
153781 @@ -6427,6 +6484,7 @@ enum {
153782         ALC282_FIXUP_ACER_DISABLE_LINEOUT,
153783         ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST,
153784         ALC256_FIXUP_ACER_HEADSET_MIC,
153785 +       ALC285_FIXUP_IDEAPAD_S740_COEF,
153788  static const struct hda_fixup alc269_fixups[] = {
153789 @@ -7136,6 +7194,16 @@ static const struct hda_fixup alc269_fixups[] = {
153790                 .type = HDA_FIXUP_FUNC,
153791                 .v.func = alc233_fixup_lenovo_line2_mic_hotkey,
153792         },
153793 +       [ALC233_FIXUP_INTEL_NUC8_DMIC] = {
153794 +               .type = HDA_FIXUP_FUNC,
153795 +               .v.func = alc_fixup_inv_dmic,
153796 +               .chained = true,
153797 +               .chain_id = ALC233_FIXUP_INTEL_NUC8_BOOST,
153798 +       },
153799 +       [ALC233_FIXUP_INTEL_NUC8_BOOST] = {
153800 +               .type = HDA_FIXUP_FUNC,
153801 +               .v.func = alc269_fixup_limit_int_mic_boost
153802 +       },
153803         [ALC255_FIXUP_DELL_SPK_NOISE] = {
153804                 .type = HDA_FIXUP_FUNC,
153805                 .v.func = alc_fixup_disable_aamix,
153806 @@ -7646,6 +7714,10 @@ static const struct hda_fixup alc269_fixups[] = {
153807                 .type = HDA_FIXUP_FUNC,
153808                 .v.func = alc236_fixup_hp_mute_led,
153809         },
153810 +       [ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF] = {
153811 +               .type = HDA_FIXUP_FUNC,
153812 +               .v.func = alc236_fixup_hp_mute_led_micmute_vref,
153813 +       },
153814         [ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET] = {
153815                 .type = HDA_FIXUP_VERBS,
153816                 .v.verbs = (const struct hda_verb[]) {
153817 @@ -7844,6 +7916,10 @@ static const struct hda_fixup alc269_fixups[] = {
153818                 .chained = true,
153819                 .chain_id = ALC274_FIXUP_HP_MIC
153820         },
153821 +       [ALC274_FIXUP_HP_ENVY_GPIO] = {
153822 +               .type = HDA_FIXUP_FUNC,
153823 +               .v.func = alc274_fixup_hp_envy_gpio,
153824 +       },
153825         [ALC256_FIXUP_ASUS_HPE] = {
153826                 .type = HDA_FIXUP_VERBS,
153827                 .v.verbs = (const struct hda_verb[]) {
153828 @@ -7901,6 +7977,12 @@ static const struct hda_fixup alc269_fixups[] = {
153829                 .chained = true,
153830                 .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
153831         },
153832 +       [ALC285_FIXUP_IDEAPAD_S740_COEF] = {
153833 +               .type = HDA_FIXUP_FUNC,
153834 +               .v.func = alc285_fixup_ideapad_s740_coef,
153835 +               .chained = true,
153836 +               .chain_id = ALC269_FIXUP_THINKPAD_ACPI,
153837 +       },
153840  static const struct snd_pci_quirk alc269_fixup_tbl[] = {
153841 @@ -7909,12 +7991,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
153842         SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC),
153843         SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700),
153844         SND_PCI_QUIRK(0x1025, 0x072d, "Acer Aspire V5-571G", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
153845 -       SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
153846         SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
153847         SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
153848         SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
153849         SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
153850         SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
153851 +       SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
153852         SND_PCI_QUIRK(0x1025, 0x0840, "Acer Aspire E1", ALC269VB_FIXUP_ASPIRE_E1_COEF),
153853         SND_PCI_QUIRK(0x1025, 0x101c, "Acer Veriton N2510G", ALC269_FIXUP_LIFEBOOK),
153854         SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
153855 @@ -7970,8 +8052,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
153856         SND_PCI_QUIRK(0x1028, 0x0738, "Dell Precision 5820", ALC269_FIXUP_NO_SHUTUP),
153857         SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
153858         SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
153859 -       SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
153860         SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
153861 +       SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
153862         SND_PCI_QUIRK(0x1028, 0x080c, "Dell WYSE", ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE),
153863         SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
153864         SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
153865 @@ -7981,8 +8063,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
153866         SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE),
153867         SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE),
153868         SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
153869 -       SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
153870         SND_PCI_QUIRK(0x1028, 0x097d, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
153871 +       SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
153872         SND_PCI_QUIRK(0x1028, 0x098d, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
153873         SND_PCI_QUIRK(0x1028, 0x09bf, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
153874         SND_PCI_QUIRK(0x1028, 0x0a2e, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
153875 @@ -7993,35 +8075,18 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
153876         SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
153877         SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED),
153878         SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED),
153879 -       SND_PCI_QUIRK(0x103c, 0x225f, "HP", ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY),
153880 -       /* ALC282 */
153881         SND_PCI_QUIRK(0x103c, 0x21f9, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153882         SND_PCI_QUIRK(0x103c, 0x2210, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153883         SND_PCI_QUIRK(0x103c, 0x2214, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153884 +       SND_PCI_QUIRK(0x103c, 0x221b, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
153885 +       SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
153886 +       SND_PCI_QUIRK(0x103c, 0x2221, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
153887 +       SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
153888         SND_PCI_QUIRK(0x103c, 0x2236, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
153889         SND_PCI_QUIRK(0x103c, 0x2237, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
153890         SND_PCI_QUIRK(0x103c, 0x2238, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
153891         SND_PCI_QUIRK(0x103c, 0x2239, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
153892         SND_PCI_QUIRK(0x103c, 0x224b, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
153893 -       SND_PCI_QUIRK(0x103c, 0x2268, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153894 -       SND_PCI_QUIRK(0x103c, 0x226a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153895 -       SND_PCI_QUIRK(0x103c, 0x226b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153896 -       SND_PCI_QUIRK(0x103c, 0x226e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153897 -       SND_PCI_QUIRK(0x103c, 0x2271, "HP", ALC286_FIXUP_HP_GPIO_LED),
153898 -       SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC280_FIXUP_HP_DOCK_PINS),
153899 -       SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC280_FIXUP_HP_DOCK_PINS),
153900 -       SND_PCI_QUIRK(0x103c, 0x229e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153901 -       SND_PCI_QUIRK(0x103c, 0x22b2, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153902 -       SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153903 -       SND_PCI_QUIRK(0x103c, 0x22bf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153904 -       SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153905 -       SND_PCI_QUIRK(0x103c, 0x22db, "HP", ALC280_FIXUP_HP_9480M),
153906 -       SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
153907 -       SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
153908 -       /* ALC290 */
153909 -       SND_PCI_QUIRK(0x103c, 0x221b, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
153910 -       SND_PCI_QUIRK(0x103c, 0x2221, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
153911 -       SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
153912         SND_PCI_QUIRK(0x103c, 0x2253, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
153913         SND_PCI_QUIRK(0x103c, 0x2254, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
153914         SND_PCI_QUIRK(0x103c, 0x2255, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
153915 @@ -8029,28 +8094,45 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
153916         SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
153917         SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
153918         SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED),
153919 +       SND_PCI_QUIRK(0x103c, 0x225f, "HP", ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY),
153920         SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153921         SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153922         SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153923         SND_PCI_QUIRK(0x103c, 0x2265, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153924 +       SND_PCI_QUIRK(0x103c, 0x2268, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153925 +       SND_PCI_QUIRK(0x103c, 0x226a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153926 +       SND_PCI_QUIRK(0x103c, 0x226b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153927 +       SND_PCI_QUIRK(0x103c, 0x226e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153928 +       SND_PCI_QUIRK(0x103c, 0x2271, "HP", ALC286_FIXUP_HP_GPIO_LED),
153929         SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
153930 +       SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC280_FIXUP_HP_DOCK_PINS),
153931         SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
153932 +       SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC280_FIXUP_HP_DOCK_PINS),
153933         SND_PCI_QUIRK(0x103c, 0x2278, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
153934         SND_PCI_QUIRK(0x103c, 0x227f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153935         SND_PCI_QUIRK(0x103c, 0x2282, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153936         SND_PCI_QUIRK(0x103c, 0x228b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153937         SND_PCI_QUIRK(0x103c, 0x228e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153938 +       SND_PCI_QUIRK(0x103c, 0x229e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153939 +       SND_PCI_QUIRK(0x103c, 0x22b2, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153940 +       SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153941 +       SND_PCI_QUIRK(0x103c, 0x22bf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153942 +       SND_PCI_QUIRK(0x103c, 0x22c4, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153943         SND_PCI_QUIRK(0x103c, 0x22c5, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153944         SND_PCI_QUIRK(0x103c, 0x22c7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153945         SND_PCI_QUIRK(0x103c, 0x22c8, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153946 -       SND_PCI_QUIRK(0x103c, 0x22c4, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153947 +       SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153948 +       SND_PCI_QUIRK(0x103c, 0x22db, "HP", ALC280_FIXUP_HP_9480M),
153949 +       SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
153950 +       SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
153951         SND_PCI_QUIRK(0x103c, 0x2334, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153952         SND_PCI_QUIRK(0x103c, 0x2335, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153953         SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153954         SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
153955 -       SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
153956         SND_PCI_QUIRK(0x103c, 0x802e, "HP Z240 SFF", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
153957         SND_PCI_QUIRK(0x103c, 0x802f, "HP Z240", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
153958 +       SND_PCI_QUIRK(0x103c, 0x8077, "HP", ALC256_FIXUP_HP_HEADSET_MIC),
153959 +       SND_PCI_QUIRK(0x103c, 0x8158, "HP", ALC256_FIXUP_HP_HEADSET_MIC),
153960         SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
153961         SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
153962         SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
153963 @@ -8061,8 +8143,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
153964         SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
153965         SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
153966         SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
153967 +       SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
153968         SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED),
153969         SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
153970 +       SND_PCI_QUIRK(0x103c, 0x8730, "HP ProBook 445 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
153971         SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
153972         SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED),
153973         SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
153974 @@ -8087,16 +8171,18 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
153975         SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
153976         SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
153977         SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
153978 +       SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
153979         SND_PCI_QUIRK(0x1043, 0x1271, "ASUS X430UN", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
153980         SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
153981         SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
153982 -       SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
153983         SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
153984 +       SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
153985         SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
153986         SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
153987         SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
153988         SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
153989         SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
153990 +       SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
153991         SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
153992         SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
153993         SND_PCI_QUIRK(0x1043, 0x194e, "ASUS UX563FD", ALC294_FIXUP_ASUS_HPE),
153994 @@ -8109,31 +8195,31 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
153995         SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
153996         SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
153997         SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
153998 -       SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
153999         SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
154000         SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
154001         SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
154002 +       SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
154003         SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
154004 -       SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
154005         SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
154006         SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
154007         SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
154008         SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
154009         SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
154010         SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101),
154011 -       SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
154012 -       SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
154013         SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2),
154014         SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
154015         SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
154016         SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX),
154017 +       SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
154018 +       SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
154019         SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
154020         SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT),
154021         SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
154022 -       SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
154023         SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC),
154024 +       SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
154025         SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
154026         SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
154027 +       SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
154028         SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
154029         SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
154030         SND_PCI_QUIRK(0x10ec, 0x1254, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
154031 @@ -8143,9 +8229,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
154032         SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
154033         SND_PCI_QUIRK(0x144d, 0xc189, "Samsung Galaxy Flex Book (NT950QCG-X716)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
154034         SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NP930XCJ-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
154035 -       SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
154036         SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
154037         SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
154038 +       SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
154039         SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
154040         SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
154041         SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
154042 @@ -8201,9 +8287,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
154043         SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
154044         SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
154045         SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
154046 +       SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK),
154047         SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST),
154048         SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
154049 -       SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK),
154050         SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
154051         SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
154052         SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK),
154053 @@ -8244,9 +8330,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
154054         SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
154055         SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
154056         SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
154057 +       SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
154058         SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
154059         SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
154060         SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
154061 +       SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
154062         SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
154063         SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
154064         SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
154065 @@ -8265,20 +8353,19 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
154066         SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
154067         SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
154068         SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
154069 -       SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
154070         SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
154071         SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
154072         SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20),
154073         SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI),
154074         SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101),
154075         SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
154076 +       SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
154077 +       SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
154078         SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
154079         SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
154080         SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
154081         SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
154082 -       SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
154083 -       SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
154084 -       SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
154085 +       SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
154086         SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
154087         SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
154089 @@ -8733,12 +8820,17 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
154090                 {0x12, 0x90a60130},
154091                 {0x19, 0x03a11020},
154092                 {0x21, 0x0321101f}),
154093 -       SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
154094 +       SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
154095 +               {0x12, 0x90a60130},
154096                 {0x14, 0x90170110},
154097                 {0x19, 0x04a11040},
154098                 {0x21, 0x04211020}),
154099         SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
154100 -               {0x12, 0x90a60130},
154101 +               {0x14, 0x90170110},
154102 +               {0x19, 0x04a11040},
154103 +               {0x1d, 0x40600001},
154104 +               {0x21, 0x04211020}),
154105 +       SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
154106                 {0x14, 0x90170110},
154107                 {0x19, 0x04a11040},
154108                 {0x21, 0x04211020}),
154109 @@ -9224,8 +9316,7 @@ static const struct snd_pci_quirk alc861_fixup_tbl[] = {
154110         SND_PCI_QUIRK(0x1043, 0x1393, "ASUS A6Rp", ALC861_FIXUP_ASUS_A6RP),
154111         SND_PCI_QUIRK_VENDOR(0x1043, "ASUS laptop", ALC861_FIXUP_AMP_VREF_0F),
154112         SND_PCI_QUIRK(0x1462, 0x7254, "HP DX2200", ALC861_FIXUP_NO_JACK_DETECT),
154113 -       SND_PCI_QUIRK(0x1584, 0x2b01, "Haier W18", ALC861_FIXUP_AMP_VREF_0F),
154114 -       SND_PCI_QUIRK(0x1584, 0x0000, "Uniwill ECS M31EI", ALC861_FIXUP_AMP_VREF_0F),
154115 +       SND_PCI_QUIRK_VENDOR(0x1584, "Haier/Uniwill", ALC861_FIXUP_AMP_VREF_0F),
154116         SND_PCI_QUIRK(0x1734, 0x10c7, "FSC Amilo Pi1505", ALC861_FIXUP_FSC_AMILO_PI1505),
154117         {}
154119 @@ -10020,6 +10111,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
154120         SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),
154121         SND_PCI_QUIRK(0x1025, 0x034a, "Gateway LT27", ALC662_FIXUP_INV_DMIC),
154122         SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
154123 +       SND_PCI_QUIRK(0x1025, 0x0566, "Acer Aspire Ethos 8951G", ALC669_FIXUP_ACER_ASPIRE_ETHOS),
154124         SND_PCI_QUIRK(0x1025, 0x123c, "Acer Nitro N50-600", ALC662_FIXUP_ACER_NITRO_HEADSET_MODE),
154125         SND_PCI_QUIRK(0x1025, 0x124e, "Acer 2660G", ALC662_FIXUP_ACER_X2660G_HEADSET_MODE),
154126         SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
154127 @@ -10036,9 +10128,9 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
154128         SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2),
154129         SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
154130         SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
154131 -       SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
154132         SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50),
154133         SND_PCI_QUIRK(0x1043, 0x12ff, "ASUS G751", ALC668_FIXUP_ASUS_G751),
154134 +       SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
154135         SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
154136         SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
154137         SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
154138 @@ -10058,7 +10150,6 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
154139         SND_PCI_QUIRK(0x1b0a, 0x01b8, "ACER Veriton", ALC662_FIXUP_ACER_VERITON),
154140         SND_PCI_QUIRK(0x1b35, 0x1234, "CZC ET26", ALC662_FIXUP_CZC_ET26),
154141         SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
154142 -       SND_PCI_QUIRK(0x1025, 0x0566, "Acer Aspire Ethos 8951G", ALC669_FIXUP_ACER_ASPIRE_ETHOS),
154144  #if 0
154145         /* Below is a quirk table taken from the old code.
154146 diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
154147 index cdc4b6106252..159c40ec680d 100644
154148 --- a/sound/pci/maestro3.c
154149 +++ b/sound/pci/maestro3.c
154150 @@ -1990,7 +1990,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip)
154151                 outw(0, io + GPIO_DATA);
154152                 outw(dir | GPO_PRIMARY_AC97, io + GPIO_DIRECTION);
154154 -               schedule_timeout_uninterruptible(msecs_to_jiffies(delay1));
154155 +               schedule_msec_hrtimeout_uninterruptible((delay1));
154157                 outw(GPO_PRIMARY_AC97, io + GPIO_DATA);
154158                 udelay(5);
154159 @@ -1998,7 +1998,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip)
154160                 outw(IO_SRAM_ENABLE | SERIAL_AC_LINK_ENABLE, io + RING_BUS_CTRL_A);
154161                 outw(~0, io + GPIO_MASK);
154163 -               schedule_timeout_uninterruptible(msecs_to_jiffies(delay2));
154164 +               schedule_msec_hrtimeout_uninterruptible((delay2));
154166                 if (! snd_m3_try_read_vendor(chip))
154167                         break;
154168 diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
154169 index 4cf879c42dc4..720297cbdf87 100644
154170 --- a/sound/pci/rme9652/hdsp.c
154171 +++ b/sound/pci/rme9652/hdsp.c
154172 @@ -5390,7 +5390,8 @@ static int snd_hdsp_free(struct hdsp *hdsp)
154173         if (hdsp->port)
154174                 pci_release_regions(hdsp->pci);
154176 -       pci_disable_device(hdsp->pci);
154177 +       if (pci_is_enabled(hdsp->pci))
154178 +               pci_disable_device(hdsp->pci);
154179         return 0;
154182 diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
154183 index 8d900c132f0f..97a0bff96b28 100644
154184 --- a/sound/pci/rme9652/hdspm.c
154185 +++ b/sound/pci/rme9652/hdspm.c
154186 @@ -6883,7 +6883,8 @@ static int snd_hdspm_free(struct hdspm * hdspm)
154187         if (hdspm->port)
154188                 pci_release_regions(hdspm->pci);
154190 -       pci_disable_device(hdspm->pci);
154191 +       if (pci_is_enabled(hdspm->pci))
154192 +               pci_disable_device(hdspm->pci);
154193         return 0;
154196 diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
154197 index 4df992e846f2..7a4d395abcee 100644
154198 --- a/sound/pci/rme9652/rme9652.c
154199 +++ b/sound/pci/rme9652/rme9652.c
154200 @@ -1731,7 +1731,8 @@ static int snd_rme9652_free(struct snd_rme9652 *rme9652)
154201         if (rme9652->port)
154202                 pci_release_regions(rme9652->pci);
154204 -       pci_disable_device(rme9652->pci);
154205 +       if (pci_is_enabled(rme9652->pci))
154206 +               pci_disable_device(rme9652->pci);
154207         return 0;
154210 diff --git a/sound/soc/codecs/ak5558.c b/sound/soc/codecs/ak5558.c
154211 index 85bdd0534180..80b3b162ca5b 100644
154212 --- a/sound/soc/codecs/ak5558.c
154213 +++ b/sound/soc/codecs/ak5558.c
154214 @@ -272,7 +272,7 @@ static void ak5558_power_off(struct ak5558_priv *ak5558)
154215         if (!ak5558->reset_gpiod)
154216                 return;
154218 -       gpiod_set_value_cansleep(ak5558->reset_gpiod, 0);
154219 +       gpiod_set_value_cansleep(ak5558->reset_gpiod, 1);
154220         usleep_range(1000, 2000);
154223 @@ -281,7 +281,7 @@ static void ak5558_power_on(struct ak5558_priv *ak5558)
154224         if (!ak5558->reset_gpiod)
154225                 return;
154227 -       gpiod_set_value_cansleep(ak5558->reset_gpiod, 1);
154228 +       gpiod_set_value_cansleep(ak5558->reset_gpiod, 0);
154229         usleep_range(1000, 2000);
154232 diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
154233 index 8abe232ca4a4..ff23a7d4d2ac 100644
154234 --- a/sound/soc/codecs/rt286.c
154235 +++ b/sound/soc/codecs/rt286.c
154236 @@ -171,6 +171,9 @@ static bool rt286_readable_register(struct device *dev, unsigned int reg)
154237         case RT286_PROC_COEF:
154238         case RT286_SET_AMP_GAIN_ADC_IN1:
154239         case RT286_SET_AMP_GAIN_ADC_IN2:
154240 +       case RT286_SET_GPIO_MASK:
154241 +       case RT286_SET_GPIO_DIRECTION:
154242 +       case RT286_SET_GPIO_DATA:
154243         case RT286_SET_POWER(RT286_DAC_OUT1):
154244         case RT286_SET_POWER(RT286_DAC_OUT2):
154245         case RT286_SET_POWER(RT286_ADC_IN1):
154246 @@ -1117,12 +1120,11 @@ static const struct dmi_system_id force_combo_jack_table[] = {
154247         { }
154250 -static const struct dmi_system_id dmi_dell_dino[] = {
154251 +static const struct dmi_system_id dmi_dell[] = {
154252         {
154253 -               .ident = "Dell Dino",
154254 +               .ident = "Dell",
154255                 .matches = {
154256                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
154257 -                       DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343")
154258                 }
154259         },
154260         { }
154261 @@ -1133,7 +1135,7 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
154263         struct rt286_platform_data *pdata = dev_get_platdata(&i2c->dev);
154264         struct rt286_priv *rt286;
154265 -       int i, ret, val;
154266 +       int i, ret, vendor_id;
154268         rt286 = devm_kzalloc(&i2c->dev, sizeof(*rt286),
154269                                 GFP_KERNEL);
154270 @@ -1149,14 +1151,15 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
154271         }
154273         ret = regmap_read(rt286->regmap,
154274 -               RT286_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &val);
154275 +               RT286_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &vendor_id);
154276         if (ret != 0) {
154277                 dev_err(&i2c->dev, "I2C error %d\n", ret);
154278                 return ret;
154279         }
154280 -       if (val != RT286_VENDOR_ID && val != RT288_VENDOR_ID) {
154281 +       if (vendor_id != RT286_VENDOR_ID && vendor_id != RT288_VENDOR_ID) {
154282                 dev_err(&i2c->dev,
154283 -                       "Device with ID register %#x is not rt286\n", val);
154284 +                       "Device with ID register %#x is not rt286\n",
154285 +                       vendor_id);
154286                 return -ENODEV;
154287         }
154289 @@ -1180,8 +1183,8 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
154290         if (pdata)
154291                 rt286->pdata = *pdata;
154293 -       if (dmi_check_system(force_combo_jack_table) ||
154294 -               dmi_check_system(dmi_dell_dino))
154295 +       if ((vendor_id == RT288_VENDOR_ID && dmi_check_system(dmi_dell)) ||
154296 +               dmi_check_system(force_combo_jack_table))
154297                 rt286->pdata.cbj_en = true;
154299         regmap_write(rt286->regmap, RT286_SET_AUDIO_POWER, AC_PWRST_D3);
154300 @@ -1220,7 +1223,7 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
154301         regmap_update_bits(rt286->regmap, RT286_DEPOP_CTRL3, 0xf777, 0x4737);
154302         regmap_update_bits(rt286->regmap, RT286_DEPOP_CTRL4, 0x00ff, 0x003f);
154304 -       if (dmi_check_system(dmi_dell_dino)) {
154305 +       if (vendor_id == RT288_VENDOR_ID && dmi_check_system(dmi_dell)) {
154306                 regmap_update_bits(rt286->regmap,
154307                         RT286_SET_GPIO_MASK, 0x40, 0x40);
154308                 regmap_update_bits(rt286->regmap,
154309 diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c
154310 index 653da3eaf355..d77d12902594 100644
154311 --- a/sound/soc/codecs/rt5631.c
154312 +++ b/sound/soc/codecs/rt5631.c
154313 @@ -417,7 +417,7 @@ static void onebit_depop_mute_stage(struct snd_soc_component *component, int ena
154314         hp_zc = snd_soc_component_read(component, RT5631_INT_ST_IRQ_CTRL_2);
154315         snd_soc_component_write(component, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff);
154316         if (enable) {
154317 -               schedule_timeout_uninterruptible(msecs_to_jiffies(10));
154318 +               schedule_msec_hrtimeout_uninterruptible((10));
154319                 /* config one-bit depop parameter */
154320                 rt5631_write_index(component, RT5631_SPK_INTL_CTRL, 0x307f);
154321                 snd_soc_component_update_bits(component, RT5631_HP_OUT_VOL,
154322 @@ -529,7 +529,7 @@ static void depop_seq_mute_stage(struct snd_soc_component *component, int enable
154323         hp_zc = snd_soc_component_read(component, RT5631_INT_ST_IRQ_CTRL_2);
154324         snd_soc_component_write(component, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff);
154325         if (enable) {
154326 -               schedule_timeout_uninterruptible(msecs_to_jiffies(10));
154327 +               schedule_msec_hrtimeout_uninterruptible((10));
154329                 /* config depop sequence parameter */
154330                 rt5631_write_index(component, RT5631_SPK_INTL_CTRL, 0x302f);
154331 diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
154332 index 4063aac2a443..dd69d874bad2 100644
154333 --- a/sound/soc/codecs/rt5670.c
154334 +++ b/sound/soc/codecs/rt5670.c
154335 @@ -2980,6 +2980,18 @@ static const struct dmi_system_id dmi_platform_intel_quirks[] = {
154336                                                  RT5670_GPIO1_IS_IRQ |
154337                                                  RT5670_JD_MODE3),
154338         },
154339 +       {
154340 +               .callback = rt5670_quirk_cb,
154341 +               .ident = "Dell Venue 10 Pro 5055",
154342 +               .matches = {
154343 +                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
154344 +                       DMI_MATCH(DMI_PRODUCT_NAME, "Venue 10 Pro 5055"),
154345 +               },
154346 +               .driver_data = (unsigned long *)(RT5670_DMIC_EN |
154347 +                                                RT5670_DMIC2_INR |
154348 +                                                RT5670_GPIO1_IS_IRQ |
154349 +                                                RT5670_JD_MODE1),
154350 +       },
154351         {
154352                 .callback = rt5670_quirk_cb,
154353                 .ident = "Aegex 10 tablet (RU2)",
154354 diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
154355 index f04f88c8d425..b689f26fc4be 100644
154356 --- a/sound/soc/codecs/tlv320aic32x4.c
154357 +++ b/sound/soc/codecs/tlv320aic32x4.c
154358 @@ -577,12 +577,12 @@ static const struct regmap_range_cfg aic32x4_regmap_pages[] = {
154359                 .window_start = 0,
154360                 .window_len = 128,
154361                 .range_min = 0,
154362 -               .range_max = AIC32X4_RMICPGAVOL,
154363 +               .range_max = AIC32X4_REFPOWERUP,
154364         },
154367  const struct regmap_config aic32x4_regmap_config = {
154368 -       .max_register = AIC32X4_RMICPGAVOL,
154369 +       .max_register = AIC32X4_REFPOWERUP,
154370         .ranges = aic32x4_regmap_pages,
154371         .num_ranges = ARRAY_SIZE(aic32x4_regmap_pages),
154373 @@ -1243,6 +1243,10 @@ int aic32x4_probe(struct device *dev, struct regmap *regmap)
154374         if (ret)
154375                 goto err_disable_regulators;
154377 +       ret = aic32x4_register_clocks(dev, aic32x4->mclk_name);
154378 +       if (ret)
154379 +               goto err_disable_regulators;
154381         ret = devm_snd_soc_register_component(dev,
154382                         &soc_component_dev_aic32x4, &aic32x4_dai, 1);
154383         if (ret) {
154384 @@ -1250,10 +1254,6 @@ int aic32x4_probe(struct device *dev, struct regmap *regmap)
154385                 goto err_disable_regulators;
154386         }
154388 -       ret = aic32x4_register_clocks(dev, aic32x4->mclk_name);
154389 -       if (ret)
154390 -               goto err_disable_regulators;
154392         return 0;
154394  err_disable_regulators:
154395 diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
154396 index 15d42ce3b21d..897fced9589b 100644
154397 --- a/sound/soc/codecs/wm8350.c
154398 +++ b/sound/soc/codecs/wm8350.c
154399 @@ -234,10 +234,10 @@ static void wm8350_pga_work(struct work_struct *work)
154400                     out2->ramp == WM8350_RAMP_UP) {
154401                         /* delay is longer over 0dB as increases are larger */
154402                         if (i >= WM8350_OUTn_0dB)
154403 -                               schedule_timeout_interruptible(msecs_to_jiffies
154404 +                               schedule_msec_hrtimeout_interruptible(
154405                                                                (2));
154406                         else
154407 -                               schedule_timeout_interruptible(msecs_to_jiffies
154408 +                               schedule_msec_hrtimeout_interruptible(
154409                                                                (1));
154410                 } else
154411                         udelay(50);     /* doesn't matter if we delay longer */
154412 @@ -1121,7 +1121,7 @@ static int wm8350_set_bias_level(struct snd_soc_component *component,
154413                                          (platform->dis_out4 << 6));
154415                         /* wait for discharge */
154416 -                       schedule_timeout_interruptible(msecs_to_jiffies
154417 +                       schedule_msec_hrtimeout_interruptible(
154418                                                        (platform->
154419                                                         cap_discharge_msecs));
154421 @@ -1137,7 +1137,7 @@ static int wm8350_set_bias_level(struct snd_soc_component *component,
154422                                          WM8350_VBUFEN);
154424                         /* wait for vmid */
154425 -                       schedule_timeout_interruptible(msecs_to_jiffies
154426 +                       schedule_msec_hrtimeout_interruptible(
154427                                                        (platform->
154428                                                         vmid_charge_msecs));
154430 @@ -1188,7 +1188,7 @@ static int wm8350_set_bias_level(struct snd_soc_component *component,
154431                 wm8350_reg_write(wm8350, WM8350_POWER_MGMT_1, pm1);
154433                 /* wait */
154434 -               schedule_timeout_interruptible(msecs_to_jiffies
154435 +               schedule_msec_hrtimeout_interruptible(
154436                                                (platform->
154437                                                 vmid_discharge_msecs));
154439 @@ -1206,7 +1206,7 @@ static int wm8350_set_bias_level(struct snd_soc_component *component,
154440                                  pm1 | WM8350_OUTPUT_DRAIN_EN);
154442                 /* wait */
154443 -               schedule_timeout_interruptible(msecs_to_jiffies
154444 +               schedule_msec_hrtimeout_interruptible(
154445                                                (platform->drain_msecs));
154447                 pm1 &= ~WM8350_BIASEN;
154448 diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
154449 index a9a6d766a176..45bf31de6282 100644
154450 --- a/sound/soc/codecs/wm8900.c
154451 +++ b/sound/soc/codecs/wm8900.c
154452 @@ -1104,7 +1104,7 @@ static int wm8900_set_bias_level(struct snd_soc_component *component,
154453                 /* Need to let things settle before stopping the clock
154454                  * to ensure that restart works, see "Stopping the
154455                  * master clock" in the datasheet. */
154456 -               schedule_timeout_interruptible(msecs_to_jiffies(1));
154457 +               schedule_msec_hrtimeout_interruptible(1);
154458                 snd_soc_component_write(component, WM8900_REG_POWER2,
154459                              WM8900_REG_POWER2_SYSCLK_ENA);
154460                 break;
154461 diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
154462 index cda9cd935d4f..9e621a254392 100644
154463 --- a/sound/soc/codecs/wm8960.c
154464 +++ b/sound/soc/codecs/wm8960.c
154465 @@ -608,10 +608,6 @@ static const int bclk_divs[] = {
154466   *             - lrclk      = sysclk / dac_divs
154467   *             - 10 * bclk  = sysclk / bclk_divs
154468   *
154469 - *     If we cannot find an exact match for (sysclk, lrclk, bclk)
154470 - *     triplet, we relax the bclk such that bclk is chosen as the
154471 - *     closest available frequency greater than expected bclk.
154473   * @wm8960: codec private data
154474   * @mclk: MCLK used to derive sysclk
154475   * @sysclk_idx: sysclk_divs index for found sysclk
154476 @@ -629,7 +625,7 @@ int wm8960_configure_sysclk(struct wm8960_priv *wm8960, int mclk,
154478         int sysclk, bclk, lrclk;
154479         int i, j, k;
154480 -       int diff, closest = mclk;
154481 +       int diff;
154483         /* marker for no match */
154484         *bclk_idx = -1;
154485 @@ -653,12 +649,6 @@ int wm8960_configure_sysclk(struct wm8960_priv *wm8960, int mclk,
154486                                         *bclk_idx = k;
154487                                         break;
154488                                 }
154489 -                               if (diff > 0 && closest > diff) {
154490 -                                       *sysclk_idx = i;
154491 -                                       *dac_idx = j;
154492 -                                       *bclk_idx = k;
154493 -                                       closest = diff;
154494 -                               }
154495                         }
154496                         if (k != ARRAY_SIZE(bclk_divs))
154497                                 break;
154498 diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
154499 index e0ce32dd4a81..eb91c0282aad 100644
154500 --- a/sound/soc/codecs/wm9713.c
154501 +++ b/sound/soc/codecs/wm9713.c
154502 @@ -199,7 +199,7 @@ static int wm9713_voice_shutdown(struct snd_soc_dapm_widget *w,
154504         /* Gracefully shut down the voice interface. */
154505         snd_soc_component_update_bits(component, AC97_HANDSET_RATE, 0x0f00, 0x0200);
154506 -       schedule_timeout_interruptible(msecs_to_jiffies(1));
154507 +       schedule_msec_hrtimeout_interruptible(1);
154508         snd_soc_component_update_bits(component, AC97_HANDSET_RATE, 0x0f00, 0x0f00);
154509         snd_soc_component_update_bits(component, AC97_EXTENDED_MID, 0x1000, 0x1000);
154511 @@ -868,7 +868,7 @@ static int wm9713_set_pll(struct snd_soc_component *component,
154512         wm9713->pll_in = freq_in;
154514         /* wait 10ms AC97 link frames for the link to stabilise */
154515 -       schedule_timeout_interruptible(msecs_to_jiffies(10));
154516 +       schedule_msec_hrtimeout_interruptible((10));
154517         return 0;
154520 diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
154521 index 8c5cdcdc8713..e81b5cf0d37a 100644
154522 --- a/sound/soc/generic/audio-graph-card.c
154523 +++ b/sound/soc/generic/audio-graph-card.c
154524 @@ -380,7 +380,7 @@ static int graph_dai_link_of(struct asoc_simple_priv *priv,
154525         struct device_node *top = dev->of_node;
154526         struct asoc_simple_dai *cpu_dai;
154527         struct asoc_simple_dai *codec_dai;
154528 -       int ret, single_cpu;
154529 +       int ret, single_cpu = 0;
154531         /* Do it only CPU turn */
154532         if (!li->cpu)
154533 diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
154534 index 75365c7bb393..d916ec69c24f 100644
154535 --- a/sound/soc/generic/simple-card.c
154536 +++ b/sound/soc/generic/simple-card.c
154537 @@ -258,7 +258,7 @@ static int simple_dai_link_of(struct asoc_simple_priv *priv,
154538         struct device_node *plat = NULL;
154539         char prop[128];
154540         char *prefix = "";
154541 -       int ret, single_cpu;
154542 +       int ret, single_cpu = 0;
154544         /*
154545          *       |CPU   |Codec   : turn
154546 diff --git a/sound/soc/intel/Makefile b/sound/soc/intel/Makefile
154547 index 4e0248d2accc..7c5038803be7 100644
154548 --- a/sound/soc/intel/Makefile
154549 +++ b/sound/soc/intel/Makefile
154550 @@ -5,7 +5,7 @@ obj-$(CONFIG_SND_SOC) += common/
154551  # Platform Support
154552  obj-$(CONFIG_SND_SST_ATOM_HIFI2_PLATFORM) += atom/
154553  obj-$(CONFIG_SND_SOC_INTEL_CATPT) += catpt/
154554 -obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += skylake/
154555 +obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE_COMMON) += skylake/
154556  obj-$(CONFIG_SND_SOC_INTEL_KEEMBAY) += keembay/
154558  # Machine support
154559 diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
154560 index 5d48cc359c3d..22912cab5e63 100644
154561 --- a/sound/soc/intel/boards/bytcr_rt5640.c
154562 +++ b/sound/soc/intel/boards/bytcr_rt5640.c
154563 @@ -482,6 +482,9 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
154564                         DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TAF"),
154565                 },
154566                 .driver_data = (void *)(BYT_RT5640_IN1_MAP |
154567 +                                       BYT_RT5640_JD_SRC_JD2_IN4N |
154568 +                                       BYT_RT5640_OVCD_TH_2000UA |
154569 +                                       BYT_RT5640_OVCD_SF_0P75 |
154570                                         BYT_RT5640_MONO_SPEAKER |
154571                                         BYT_RT5640_DIFF_MIC |
154572                                         BYT_RT5640_SSP0_AIF2 |
154573 @@ -515,6 +518,23 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
154574                                         BYT_RT5640_SSP0_AIF1 |
154575                                         BYT_RT5640_MCLK_EN),
154576         },
154577 +       {
154578 +               /* Chuwi Hi8 (CWI509) */
154579 +               .matches = {
154580 +                       DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
154581 +                       DMI_MATCH(DMI_BOARD_NAME, "BYT-PA03C"),
154582 +                       DMI_MATCH(DMI_SYS_VENDOR, "ilife"),
154583 +                       DMI_MATCH(DMI_PRODUCT_NAME, "S806"),
154584 +               },
154585 +               .driver_data = (void *)(BYT_RT5640_IN1_MAP |
154586 +                                       BYT_RT5640_JD_SRC_JD2_IN4N |
154587 +                                       BYT_RT5640_OVCD_TH_2000UA |
154588 +                                       BYT_RT5640_OVCD_SF_0P75 |
154589 +                                       BYT_RT5640_MONO_SPEAKER |
154590 +                                       BYT_RT5640_DIFF_MIC |
154591 +                                       BYT_RT5640_SSP0_AIF1 |
154592 +                                       BYT_RT5640_MCLK_EN),
154593 +       },
154594         {
154595                 .matches = {
154596                         DMI_MATCH(DMI_SYS_VENDOR, "Circuitco"),
154597 diff --git a/sound/soc/intel/boards/kbl_da7219_max98927.c b/sound/soc/intel/boards/kbl_da7219_max98927.c
154598 index cc9a2509ace2..e0149cf6127d 100644
154599 --- a/sound/soc/intel/boards/kbl_da7219_max98927.c
154600 +++ b/sound/soc/intel/boards/kbl_da7219_max98927.c
154601 @@ -282,11 +282,33 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
154602         struct snd_interval *chan = hw_param_interval(params,
154603                         SNDRV_PCM_HW_PARAM_CHANNELS);
154604         struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
154605 -       struct snd_soc_dpcm *dpcm = container_of(
154606 -                       params, struct snd_soc_dpcm, hw_params);
154607 -       struct snd_soc_dai_link *fe_dai_link = dpcm->fe->dai_link;
154608 -       struct snd_soc_dai_link *be_dai_link = dpcm->be->dai_link;
154609 +       struct snd_soc_dpcm *dpcm, *rtd_dpcm = NULL;
154611 +       /*
154612 +        * The following loop will be called only for playback stream
154613 +        * In this platform, there is only one playback device on every SSP
154614 +        */
154615 +       for_each_dpcm_fe(rtd, SNDRV_PCM_STREAM_PLAYBACK, dpcm) {
154616 +               rtd_dpcm = dpcm;
154617 +               break;
154618 +       }
154620 +       /*
154621 +        * This following loop will be called only for capture stream
154622 +        * In this platform, there is only one capture device on every SSP
154623 +        */
154624 +       for_each_dpcm_fe(rtd, SNDRV_PCM_STREAM_CAPTURE, dpcm) {
154625 +               rtd_dpcm = dpcm;
154626 +               break;
154627 +       }
154629 +       if (!rtd_dpcm)
154630 +               return -EINVAL;
154632 +       /*
154633 +        * The above 2 loops are mutually exclusive based on the stream direction,
154634 +        * thus rtd_dpcm variable will never be overwritten
154635 +        */
154636         /*
154637          * Topology for kblda7219m98373 & kblmax98373 supports only S24_LE,
154638          * where as kblda7219m98927 & kblmax98927 supports S16_LE by default.
154639 @@ -309,9 +331,9 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
154640         /*
154641          * The ADSP will convert the FE rate to 48k, stereo, 24 bit
154642          */
154643 -       if (!strcmp(fe_dai_link->name, "Kbl Audio Port") ||
154644 -           !strcmp(fe_dai_link->name, "Kbl Audio Headset Playback") ||
154645 -           !strcmp(fe_dai_link->name, "Kbl Audio Capture Port")) {
154646 +       if (!strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Port") ||
154647 +           !strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Headset Playback") ||
154648 +           !strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Capture Port")) {
154649                 rate->min = rate->max = 48000;
154650                 chan->min = chan->max = 2;
154651                 snd_mask_none(fmt);
154652 @@ -322,7 +344,7 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
154653          * The speaker on the SSP0 supports S16_LE and not S24_LE.
154654          * thus changing the mask here
154655          */
154656 -       if (!strcmp(be_dai_link->name, "SSP0-Codec"))
154657 +       if (!strcmp(rtd_dpcm->be->dai_link->name, "SSP0-Codec"))
154658                 snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE);
154660         return 0;
154661 diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
154662 index 8adce6417b02..ecd3f90f4bbe 100644
154663 --- a/sound/soc/intel/boards/sof_sdw.c
154664 +++ b/sound/soc/intel/boards/sof_sdw.c
154665 @@ -187,6 +187,17 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
154666                                         SOF_RT715_DAI_ID_FIX |
154667                                         SOF_SDW_FOUR_SPK),
154668         },
154669 +       /* AlderLake devices */
154670 +       {
154671 +               .callback = sof_sdw_quirk_cb,
154672 +               .matches = {
154673 +                       DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
154674 +                       DMI_MATCH(DMI_PRODUCT_NAME, "Alder Lake Client Platform"),
154675 +               },
154676 +               .driver_data = (void *)(SOF_RT711_JD_SRC_JD1 |
154677 +                                       SOF_SDW_TGL_HDMI |
154678 +                                       SOF_SDW_PCH_DMIC),
154679 +       },
154680         {}
154683 diff --git a/sound/soc/intel/boards/sof_wm8804.c b/sound/soc/intel/boards/sof_wm8804.c
154684 index a46ba13e8eb0..6a181e45143d 100644
154685 --- a/sound/soc/intel/boards/sof_wm8804.c
154686 +++ b/sound/soc/intel/boards/sof_wm8804.c
154687 @@ -124,7 +124,11 @@ static int sof_wm8804_hw_params(struct snd_pcm_substream *substream,
154688         }
154690         snd_soc_dai_set_clkdiv(codec_dai, WM8804_MCLK_DIV, mclk_div);
154691 -       snd_soc_dai_set_pll(codec_dai, 0, 0, sysclk, mclk_freq);
154692 +       ret = snd_soc_dai_set_pll(codec_dai, 0, 0, sysclk, mclk_freq);
154693 +       if (ret < 0) {
154694 +               dev_err(rtd->card->dev, "Failed to set WM8804 PLL\n");
154695 +               return ret;
154696 +       }
154698         ret = snd_soc_dai_set_sysclk(codec_dai, WM8804_TX_CLKSRC_PLL,
154699                                      sysclk, SND_SOC_CLOCK_OUT);
154700 diff --git a/sound/soc/intel/skylake/Makefile b/sound/soc/intel/skylake/Makefile
154701 index dd39149b89b1..1c4649bccec5 100644
154702 --- a/sound/soc/intel/skylake/Makefile
154703 +++ b/sound/soc/intel/skylake/Makefile
154704 @@ -7,7 +7,7 @@ ifdef CONFIG_DEBUG_FS
154705    snd-soc-skl-objs += skl-debug.o
154706  endif
154708 -obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += snd-soc-skl.o
154709 +obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE_COMMON) += snd-soc-skl.o
154711  #Skylake Clock device support
154712  snd-soc-skl-ssp-clk-objs := skl-ssp-clk.o
154713 diff --git a/sound/soc/qcom/qdsp6/q6afe-clocks.c b/sound/soc/qcom/qdsp6/q6afe-clocks.c
154714 index f0362f061652..9431656283cd 100644
154715 --- a/sound/soc/qcom/qdsp6/q6afe-clocks.c
154716 +++ b/sound/soc/qcom/qdsp6/q6afe-clocks.c
154717 @@ -11,33 +11,29 @@
154718  #include <linux/slab.h>
154719  #include "q6afe.h"
154721 -#define Q6AFE_CLK(id) &(struct q6afe_clk) {            \
154722 +#define Q6AFE_CLK(id) {                                        \
154723                 .clk_id = id,                           \
154724                 .afe_clk_id     = Q6AFE_##id,           \
154725                 .name = #id,                            \
154726 -               .attributes = LPASS_CLK_ATTRIBUTE_COUPLE_NO, \
154727                 .rate = 19200000,                       \
154728 -               .hw.init = &(struct clk_init_data) {    \
154729 -                       .ops = &clk_q6afe_ops,          \
154730 -                       .name = #id,                    \
154731 -               },                                      \
154732         }
154734 -#define Q6AFE_VOTE_CLK(id, blkid, n) &(struct q6afe_clk) { \
154735 +#define Q6AFE_VOTE_CLK(id, blkid, n) {                 \
154736                 .clk_id = id,                           \
154737                 .afe_clk_id = blkid,                    \
154738 -               .name = #n,                             \
154739 -               .hw.init = &(struct clk_init_data) {    \
154740 -                       .ops = &clk_vote_q6afe_ops,     \
154741 -                       .name = #id,                    \
154742 -               },                                      \
154743 +               .name = n,                              \
154744         }
154746 -struct q6afe_clk {
154747 -       struct device *dev;
154748 +struct q6afe_clk_init {
154749         int clk_id;
154750         int afe_clk_id;
154751         char *name;
154752 +       int rate;
154755 +struct q6afe_clk {
154756 +       struct device *dev;
154757 +       int afe_clk_id;
154758         int attributes;
154759         int rate;
154760         uint32_t handle;
154761 @@ -48,8 +44,7 @@ struct q6afe_clk {
154763  struct q6afe_cc {
154764         struct device *dev;
154765 -       struct q6afe_clk **clks;
154766 -       int num_clks;
154767 +       struct q6afe_clk *clks[Q6AFE_MAX_CLK_ID];
154770  static int clk_q6afe_prepare(struct clk_hw *hw)
154771 @@ -105,7 +100,7 @@ static int clk_vote_q6afe_block(struct clk_hw *hw)
154772         struct q6afe_clk *clk = to_q6afe_clk(hw);
154774         return q6afe_vote_lpass_core_hw(clk->dev, clk->afe_clk_id,
154775 -                                       clk->name, &clk->handle);
154776 +                                       clk_hw_get_name(&clk->hw), &clk->handle);
154779  static void clk_unvote_q6afe_block(struct clk_hw *hw)
154780 @@ -120,84 +115,76 @@ static const struct clk_ops clk_vote_q6afe_ops = {
154781         .unprepare      = clk_unvote_q6afe_block,
154784 -static struct q6afe_clk *q6afe_clks[Q6AFE_MAX_CLK_ID] = {
154785 -       [LPASS_CLK_ID_PRI_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_MI2S_IBIT),
154786 -       [LPASS_CLK_ID_PRI_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_MI2S_EBIT),
154787 -       [LPASS_CLK_ID_SEC_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_MI2S_IBIT),
154788 -       [LPASS_CLK_ID_SEC_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_MI2S_EBIT),
154789 -       [LPASS_CLK_ID_TER_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_MI2S_IBIT),
154790 -       [LPASS_CLK_ID_TER_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_MI2S_EBIT),
154791 -       [LPASS_CLK_ID_QUAD_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_MI2S_IBIT),
154792 -       [LPASS_CLK_ID_QUAD_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_MI2S_EBIT),
154793 -       [LPASS_CLK_ID_SPEAKER_I2S_IBIT] =
154794 -                               Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_IBIT),
154795 -       [LPASS_CLK_ID_SPEAKER_I2S_EBIT] =
154796 -                               Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_EBIT),
154797 -       [LPASS_CLK_ID_SPEAKER_I2S_OSR] =
154798 -                               Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_OSR),
154799 -       [LPASS_CLK_ID_QUI_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_IBIT),
154800 -       [LPASS_CLK_ID_QUI_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_EBIT),
154801 -       [LPASS_CLK_ID_SEN_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEN_MI2S_IBIT),
154802 -       [LPASS_CLK_ID_SEN_MI2S_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEN_MI2S_EBIT),
154803 -       [LPASS_CLK_ID_INT0_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT0_MI2S_IBIT),
154804 -       [LPASS_CLK_ID_INT1_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT1_MI2S_IBIT),
154805 -       [LPASS_CLK_ID_INT2_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT2_MI2S_IBIT),
154806 -       [LPASS_CLK_ID_INT3_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT3_MI2S_IBIT),
154807 -       [LPASS_CLK_ID_INT4_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT4_MI2S_IBIT),
154808 -       [LPASS_CLK_ID_INT5_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT5_MI2S_IBIT),
154809 -       [LPASS_CLK_ID_INT6_MI2S_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_INT6_MI2S_IBIT),
154810 -       [LPASS_CLK_ID_QUI_MI2S_OSR] = Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_OSR),
154811 -       [LPASS_CLK_ID_PRI_PCM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_PCM_IBIT),
154812 -       [LPASS_CLK_ID_PRI_PCM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_PCM_EBIT),
154813 -       [LPASS_CLK_ID_SEC_PCM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_PCM_IBIT),
154814 -       [LPASS_CLK_ID_SEC_PCM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_PCM_EBIT),
154815 -       [LPASS_CLK_ID_TER_PCM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_PCM_IBIT),
154816 -       [LPASS_CLK_ID_TER_PCM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_PCM_EBIT),
154817 -       [LPASS_CLK_ID_QUAD_PCM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_PCM_IBIT),
154818 -       [LPASS_CLK_ID_QUAD_PCM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_PCM_EBIT),
154819 -       [LPASS_CLK_ID_QUIN_PCM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUIN_PCM_IBIT),
154820 -       [LPASS_CLK_ID_QUIN_PCM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUIN_PCM_EBIT),
154821 -       [LPASS_CLK_ID_QUI_PCM_OSR] = Q6AFE_CLK(LPASS_CLK_ID_QUI_PCM_OSR),
154822 -       [LPASS_CLK_ID_PRI_TDM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_TDM_IBIT),
154823 -       [LPASS_CLK_ID_PRI_TDM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_PRI_TDM_EBIT),
154824 -       [LPASS_CLK_ID_SEC_TDM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_TDM_IBIT),
154825 -       [LPASS_CLK_ID_SEC_TDM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_SEC_TDM_EBIT),
154826 -       [LPASS_CLK_ID_TER_TDM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_TDM_IBIT),
154827 -       [LPASS_CLK_ID_TER_TDM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_TER_TDM_EBIT),
154828 -       [LPASS_CLK_ID_QUAD_TDM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_TDM_IBIT),
154829 -       [LPASS_CLK_ID_QUAD_TDM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUAD_TDM_EBIT),
154830 -       [LPASS_CLK_ID_QUIN_TDM_IBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_IBIT),
154831 -       [LPASS_CLK_ID_QUIN_TDM_EBIT] = Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_EBIT),
154832 -       [LPASS_CLK_ID_QUIN_TDM_OSR] = Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_OSR),
154833 -       [LPASS_CLK_ID_MCLK_1] = Q6AFE_CLK(LPASS_CLK_ID_MCLK_1),
154834 -       [LPASS_CLK_ID_MCLK_2] = Q6AFE_CLK(LPASS_CLK_ID_MCLK_2),
154835 -       [LPASS_CLK_ID_MCLK_3] = Q6AFE_CLK(LPASS_CLK_ID_MCLK_3),
154836 -       [LPASS_CLK_ID_MCLK_4] = Q6AFE_CLK(LPASS_CLK_ID_MCLK_4),
154837 -       [LPASS_CLK_ID_INTERNAL_DIGITAL_CODEC_CORE] =
154838 -               Q6AFE_CLK(LPASS_CLK_ID_INTERNAL_DIGITAL_CODEC_CORE),
154839 -       [LPASS_CLK_ID_INT_MCLK_0] = Q6AFE_CLK(LPASS_CLK_ID_INT_MCLK_0),
154840 -       [LPASS_CLK_ID_INT_MCLK_1] = Q6AFE_CLK(LPASS_CLK_ID_INT_MCLK_1),
154841 -       [LPASS_CLK_ID_WSA_CORE_MCLK] = Q6AFE_CLK(LPASS_CLK_ID_WSA_CORE_MCLK),
154842 -       [LPASS_CLK_ID_WSA_CORE_NPL_MCLK] =
154843 -                               Q6AFE_CLK(LPASS_CLK_ID_WSA_CORE_NPL_MCLK),
154844 -       [LPASS_CLK_ID_VA_CORE_MCLK] = Q6AFE_CLK(LPASS_CLK_ID_VA_CORE_MCLK),
154845 -       [LPASS_CLK_ID_TX_CORE_MCLK] = Q6AFE_CLK(LPASS_CLK_ID_TX_CORE_MCLK),
154846 -       [LPASS_CLK_ID_TX_CORE_NPL_MCLK] =
154847 -                       Q6AFE_CLK(LPASS_CLK_ID_TX_CORE_NPL_MCLK),
154848 -       [LPASS_CLK_ID_RX_CORE_MCLK] = Q6AFE_CLK(LPASS_CLK_ID_RX_CORE_MCLK),
154849 -       [LPASS_CLK_ID_RX_CORE_NPL_MCLK] =
154850 -                               Q6AFE_CLK(LPASS_CLK_ID_RX_CORE_NPL_MCLK),
154851 -       [LPASS_CLK_ID_VA_CORE_2X_MCLK] =
154852 -                               Q6AFE_CLK(LPASS_CLK_ID_VA_CORE_2X_MCLK),
154853 -       [LPASS_HW_AVTIMER_VOTE] = Q6AFE_VOTE_CLK(LPASS_HW_AVTIMER_VOTE,
154854 -                                                Q6AFE_LPASS_CORE_AVTIMER_BLOCK,
154855 -                                                "LPASS_AVTIMER_MACRO"),
154856 -       [LPASS_HW_MACRO_VOTE] = Q6AFE_VOTE_CLK(LPASS_HW_MACRO_VOTE,
154857 -                                               Q6AFE_LPASS_CORE_HW_MACRO_BLOCK,
154858 -                                               "LPASS_HW_MACRO"),
154859 -       [LPASS_HW_DCODEC_VOTE] = Q6AFE_VOTE_CLK(LPASS_HW_DCODEC_VOTE,
154860 -                                       Q6AFE_LPASS_CORE_HW_DCODEC_BLOCK,
154861 -                                       "LPASS_HW_DCODEC"),
154862 +static const struct q6afe_clk_init q6afe_clks[] = {
154863 +       Q6AFE_CLK(LPASS_CLK_ID_PRI_MI2S_IBIT),
154864 +       Q6AFE_CLK(LPASS_CLK_ID_PRI_MI2S_EBIT),
154865 +       Q6AFE_CLK(LPASS_CLK_ID_SEC_MI2S_IBIT),
154866 +       Q6AFE_CLK(LPASS_CLK_ID_SEC_MI2S_EBIT),
154867 +       Q6AFE_CLK(LPASS_CLK_ID_TER_MI2S_IBIT),
154868 +       Q6AFE_CLK(LPASS_CLK_ID_TER_MI2S_EBIT),
154869 +       Q6AFE_CLK(LPASS_CLK_ID_QUAD_MI2S_IBIT),
154870 +       Q6AFE_CLK(LPASS_CLK_ID_QUAD_MI2S_EBIT),
154871 +       Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_IBIT),
154872 +       Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_EBIT),
154873 +       Q6AFE_CLK(LPASS_CLK_ID_SPEAKER_I2S_OSR),
154874 +       Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_IBIT),
154875 +       Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_EBIT),
154876 +       Q6AFE_CLK(LPASS_CLK_ID_SEN_MI2S_IBIT),
154877 +       Q6AFE_CLK(LPASS_CLK_ID_SEN_MI2S_EBIT),
154878 +       Q6AFE_CLK(LPASS_CLK_ID_INT0_MI2S_IBIT),
154879 +       Q6AFE_CLK(LPASS_CLK_ID_INT1_MI2S_IBIT),
154880 +       Q6AFE_CLK(LPASS_CLK_ID_INT2_MI2S_IBIT),
154881 +       Q6AFE_CLK(LPASS_CLK_ID_INT3_MI2S_IBIT),
154882 +       Q6AFE_CLK(LPASS_CLK_ID_INT4_MI2S_IBIT),
154883 +       Q6AFE_CLK(LPASS_CLK_ID_INT5_MI2S_IBIT),
154884 +       Q6AFE_CLK(LPASS_CLK_ID_INT6_MI2S_IBIT),
154885 +       Q6AFE_CLK(LPASS_CLK_ID_QUI_MI2S_OSR),
154886 +       Q6AFE_CLK(LPASS_CLK_ID_PRI_PCM_IBIT),
154887 +       Q6AFE_CLK(LPASS_CLK_ID_PRI_PCM_EBIT),
154888 +       Q6AFE_CLK(LPASS_CLK_ID_SEC_PCM_IBIT),
154889 +       Q6AFE_CLK(LPASS_CLK_ID_SEC_PCM_EBIT),
154890 +       Q6AFE_CLK(LPASS_CLK_ID_TER_PCM_IBIT),
154891 +       Q6AFE_CLK(LPASS_CLK_ID_TER_PCM_EBIT),
154892 +       Q6AFE_CLK(LPASS_CLK_ID_QUAD_PCM_IBIT),
154893 +       Q6AFE_CLK(LPASS_CLK_ID_QUAD_PCM_EBIT),
154894 +       Q6AFE_CLK(LPASS_CLK_ID_QUIN_PCM_IBIT),
154895 +       Q6AFE_CLK(LPASS_CLK_ID_QUIN_PCM_EBIT),
154896 +       Q6AFE_CLK(LPASS_CLK_ID_QUI_PCM_OSR),
154897 +       Q6AFE_CLK(LPASS_CLK_ID_PRI_TDM_IBIT),
154898 +       Q6AFE_CLK(LPASS_CLK_ID_PRI_TDM_EBIT),
154899 +       Q6AFE_CLK(LPASS_CLK_ID_SEC_TDM_IBIT),
154900 +       Q6AFE_CLK(LPASS_CLK_ID_SEC_TDM_EBIT),
154901 +       Q6AFE_CLK(LPASS_CLK_ID_TER_TDM_IBIT),
154902 +       Q6AFE_CLK(LPASS_CLK_ID_TER_TDM_EBIT),
154903 +       Q6AFE_CLK(LPASS_CLK_ID_QUAD_TDM_IBIT),
154904 +       Q6AFE_CLK(LPASS_CLK_ID_QUAD_TDM_EBIT),
154905 +       Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_IBIT),
154906 +       Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_EBIT),
154907 +       Q6AFE_CLK(LPASS_CLK_ID_QUIN_TDM_OSR),
154908 +       Q6AFE_CLK(LPASS_CLK_ID_MCLK_1),
154909 +       Q6AFE_CLK(LPASS_CLK_ID_MCLK_2),
154910 +       Q6AFE_CLK(LPASS_CLK_ID_MCLK_3),
154911 +       Q6AFE_CLK(LPASS_CLK_ID_MCLK_4),
154912 +       Q6AFE_CLK(LPASS_CLK_ID_INTERNAL_DIGITAL_CODEC_CORE),
154913 +       Q6AFE_CLK(LPASS_CLK_ID_INT_MCLK_0),
154914 +       Q6AFE_CLK(LPASS_CLK_ID_INT_MCLK_1),
154915 +       Q6AFE_CLK(LPASS_CLK_ID_WSA_CORE_MCLK),
154916 +       Q6AFE_CLK(LPASS_CLK_ID_WSA_CORE_NPL_MCLK),
154917 +       Q6AFE_CLK(LPASS_CLK_ID_VA_CORE_MCLK),
154918 +       Q6AFE_CLK(LPASS_CLK_ID_TX_CORE_MCLK),
154919 +       Q6AFE_CLK(LPASS_CLK_ID_TX_CORE_NPL_MCLK),
154920 +       Q6AFE_CLK(LPASS_CLK_ID_RX_CORE_MCLK),
154921 +       Q6AFE_CLK(LPASS_CLK_ID_RX_CORE_NPL_MCLK),
154922 +       Q6AFE_CLK(LPASS_CLK_ID_VA_CORE_2X_MCLK),
154923 +       Q6AFE_VOTE_CLK(LPASS_HW_AVTIMER_VOTE,
154924 +                      Q6AFE_LPASS_CORE_AVTIMER_BLOCK,
154925 +                      "LPASS_AVTIMER_MACRO"),
154926 +       Q6AFE_VOTE_CLK(LPASS_HW_MACRO_VOTE,
154927 +                      Q6AFE_LPASS_CORE_HW_MACRO_BLOCK,
154928 +                      "LPASS_HW_MACRO"),
154929 +       Q6AFE_VOTE_CLK(LPASS_HW_DCODEC_VOTE,
154930 +                      Q6AFE_LPASS_CORE_HW_DCODEC_BLOCK,
154931 +                      "LPASS_HW_DCODEC"),
154934  static struct clk_hw *q6afe_of_clk_hw_get(struct of_phandle_args *clkspec,
154935 @@ -207,7 +194,7 @@ static struct clk_hw *q6afe_of_clk_hw_get(struct of_phandle_args *clkspec,
154936         unsigned int idx = clkspec->args[0];
154937         unsigned int attr = clkspec->args[1];
154939 -       if (idx >= cc->num_clks || attr > LPASS_CLK_ATTRIBUTE_COUPLE_DIVISOR) {
154940 +       if (idx >= Q6AFE_MAX_CLK_ID || attr > LPASS_CLK_ATTRIBUTE_COUPLE_DIVISOR) {
154941                 dev_err(cc->dev, "Invalid clk specifier (%d, %d)\n", idx, attr);
154942                 return ERR_PTR(-EINVAL);
154943         }
154944 @@ -230,20 +217,36 @@ static int q6afe_clock_dev_probe(struct platform_device *pdev)
154945         if (!cc)
154946                 return -ENOMEM;
154948 -       cc->clks = &q6afe_clks[0];
154949 -       cc->num_clks = ARRAY_SIZE(q6afe_clks);
154950 +       cc->dev = dev;
154951         for (i = 0; i < ARRAY_SIZE(q6afe_clks); i++) {
154952 -               if (!q6afe_clks[i])
154953 -                       continue;
154954 +               unsigned int id = q6afe_clks[i].clk_id;
154955 +               struct clk_init_data init = {
154956 +                       .name =  q6afe_clks[i].name,
154957 +               };
154958 +               struct q6afe_clk *clk;
154960 +               clk = devm_kzalloc(dev, sizeof(*clk), GFP_KERNEL);
154961 +               if (!clk)
154962 +                       return -ENOMEM;
154964 +               clk->dev = dev;
154965 +               clk->afe_clk_id = q6afe_clks[i].afe_clk_id;
154966 +               clk->rate = q6afe_clks[i].rate;
154967 +               clk->hw.init = &init;
154969 +               if (clk->rate)
154970 +                       init.ops = &clk_q6afe_ops;
154971 +               else
154972 +                       init.ops = &clk_vote_q6afe_ops;
154974 -               q6afe_clks[i]->dev = dev;
154975 +               cc->clks[id] = clk;
154977 -               ret = devm_clk_hw_register(dev, &q6afe_clks[i]->hw);
154978 +               ret = devm_clk_hw_register(dev, &clk->hw);
154979                 if (ret)
154980                         return ret;
154981         }
154983 -       ret = of_clk_add_hw_provider(dev->of_node, q6afe_of_clk_hw_get, cc);
154984 +       ret = devm_of_clk_add_hw_provider(dev, q6afe_of_clk_hw_get, cc);
154985         if (ret)
154986                 return ret;
154988 diff --git a/sound/soc/qcom/qdsp6/q6afe.c b/sound/soc/qcom/qdsp6/q6afe.c
154989 index cad1cd1bfdf0..4327b72162ec 100644
154990 --- a/sound/soc/qcom/qdsp6/q6afe.c
154991 +++ b/sound/soc/qcom/qdsp6/q6afe.c
154992 @@ -1681,7 +1681,7 @@ int q6afe_unvote_lpass_core_hw(struct device *dev, uint32_t hw_block_id,
154993  EXPORT_SYMBOL(q6afe_unvote_lpass_core_hw);
154995  int q6afe_vote_lpass_core_hw(struct device *dev, uint32_t hw_block_id,
154996 -                            char *client_name, uint32_t *client_handle)
154997 +                            const char *client_name, uint32_t *client_handle)
154999         struct q6afe *afe = dev_get_drvdata(dev->parent);
155000         struct afe_cmd_remote_lpass_core_hw_vote_request *vote_cfg;
155001 diff --git a/sound/soc/qcom/qdsp6/q6afe.h b/sound/soc/qcom/qdsp6/q6afe.h
155002 index 22e10269aa10..3845b56c0ed3 100644
155003 --- a/sound/soc/qcom/qdsp6/q6afe.h
155004 +++ b/sound/soc/qcom/qdsp6/q6afe.h
155005 @@ -236,7 +236,7 @@ int q6afe_port_set_sysclk(struct q6afe_port *port, int clk_id,
155006  int q6afe_set_lpass_clock(struct device *dev, int clk_id, int clk_src,
155007                           int clk_root, unsigned int freq);
155008  int q6afe_vote_lpass_core_hw(struct device *dev, uint32_t hw_block_id,
155009 -                            char *client_name, uint32_t *client_handle);
155010 +                            const char *client_name, uint32_t *client_handle);
155011  int q6afe_unvote_lpass_core_hw(struct device *dev, uint32_t hw_block_id,
155012                                uint32_t client_handle);
155013  #endif /* __Q6AFE_H__ */
155014 diff --git a/sound/soc/samsung/tm2_wm5110.c b/sound/soc/samsung/tm2_wm5110.c
155015 index 9300fef9bf26..125e07f65d2b 100644
155016 --- a/sound/soc/samsung/tm2_wm5110.c
155017 +++ b/sound/soc/samsung/tm2_wm5110.c
155018 @@ -553,7 +553,7 @@ static int tm2_probe(struct platform_device *pdev)
155020                 ret = of_parse_phandle_with_args(dev->of_node, "i2s-controller",
155021                                                  cells_name, i, &args);
155022 -               if (!args.np) {
155023 +               if (ret) {
155024                         dev_err(dev, "i2s-controller property parse error: %d\n", i);
155025                         ret = -EINVAL;
155026                         goto dai_node_put;
155027 diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
155028 index 1029d8d9d800..d2b4632d9c2a 100644
155029 --- a/sound/soc/sh/rcar/core.c
155030 +++ b/sound/soc/sh/rcar/core.c
155031 @@ -1428,8 +1428,75 @@ static int rsnd_hw_params(struct snd_soc_component *component,
155032                 }
155033                 if (io->converted_chan)
155034                         dev_dbg(dev, "convert channels = %d\n", io->converted_chan);
155035 -               if (io->converted_rate)
155036 +               if (io->converted_rate) {
155037 +                       /*
155038 +                        * SRC supports convert rates from params_rate(hw_params)/k_down
155039 +                        * to params_rate(hw_params)*k_up, where k_up is always 6, and
155040 +                        * k_down depends on number of channels and SRC unit.
155041 +                        * So all SRC units can upsample audio up to 6 times regardless
155042 +                        * its number of channels. And all SRC units can downsample
155043 +                        * 2 channel audio up to 6 times too.
155044 +                        */
155045 +                       int k_up = 6;
155046 +                       int k_down = 6;
155047 +                       int channel;
155048 +                       struct rsnd_mod *src_mod = rsnd_io_to_mod_src(io);
155050                         dev_dbg(dev, "convert rate     = %d\n", io->converted_rate);
155052 +                       channel = io->converted_chan ? io->converted_chan :
155053 +                                 params_channels(hw_params);
155055 +                       switch (rsnd_mod_id(src_mod)) {
155056 +                       /*
155057 +                        * SRC0 can downsample 4, 6 and 8 channel audio up to 4 times.
155058 +                        * SRC1, SRC3 and SRC4 can downsample 4 channel audio
155059 +                        * up to 4 times.
155060 +                        * SRC1, SRC3 and SRC4 can downsample 6 and 8 channel audio
155061 +                        * no more than twice.
155062 +                        */
155063 +                       case 1:
155064 +                       case 3:
155065 +                       case 4:
155066 +                               if (channel > 4) {
155067 +                                       k_down = 2;
155068 +                                       break;
155069 +                               }
155070 +                               fallthrough;
155071 +                       case 0:
155072 +                               if (channel > 2)
155073 +                                       k_down = 4;
155074 +                               break;
155076 +                       /* Other SRC units do not support more than 2 channels */
155077 +                       default:
155078 +                               if (channel > 2)
155079 +                                       return -EINVAL;
155080 +                       }
155082 +                       if (params_rate(hw_params) > io->converted_rate * k_down) {
155083 +                               hw_param_interval(hw_params, SNDRV_PCM_HW_PARAM_RATE)->min =
155084 +                                       io->converted_rate * k_down;
155085 +                               hw_param_interval(hw_params, SNDRV_PCM_HW_PARAM_RATE)->max =
155086 +                                       io->converted_rate * k_down;
155087 +                               hw_params->cmask |= SNDRV_PCM_HW_PARAM_RATE;
155088 +                       } else if (params_rate(hw_params) * k_up < io->converted_rate) {
155089 +                               hw_param_interval(hw_params, SNDRV_PCM_HW_PARAM_RATE)->min =
155090 +                                       (io->converted_rate + k_up - 1) / k_up;
155091 +                               hw_param_interval(hw_params, SNDRV_PCM_HW_PARAM_RATE)->max =
155092 +                                       (io->converted_rate + k_up - 1) / k_up;
155093 +                               hw_params->cmask |= SNDRV_PCM_HW_PARAM_RATE;
155094 +                       }
155096 +                       /*
155097 +                        * TBD: Max SRC input and output rates also depend on number
155098 +                        * of channels and SRC unit:
155099 +                        * SRC1, SRC3 and SRC4 do not support more than 128kHz
155100 +                        * for 6 channel and 96kHz for 8 channel audio.
155101 +                        * Perhaps this function should return EINVAL if the input or
155102 +                        * the output rate exceeds the limitation.
155103 +                        */
155104 +               }
155105         }
155107         return rsnd_dai_call(hw_params, io, substream, hw_params);
155108 diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
155109 index d0ded427a836..042207c11651 100644
155110 --- a/sound/soc/sh/rcar/ssi.c
155111 +++ b/sound/soc/sh/rcar/ssi.c
155112 @@ -507,10 +507,15 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
155113                          struct rsnd_priv *priv)
155115         struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
155116 +       int ret;
155118         if (!rsnd_ssi_is_run_mods(mod, io))
155119                 return 0;
155121 +       ret = rsnd_ssi_master_clk_start(mod, io);
155122 +       if (ret < 0)
155123 +               return ret;
155125         ssi->usrcnt++;
155127         rsnd_mod_power_on(mod);
155128 @@ -792,7 +797,6 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
155129                                                        SSI_SYS_STATUS(i * 2),
155130                                                        0xf << (id * 4));
155131                                         stop = true;
155132 -                                       break;
155133                                 }
155134                         }
155135                         break;
155136 @@ -810,7 +814,6 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
155137                                                 SSI_SYS_STATUS((i * 2) + 1),
155138                                                 0xf << 4);
155139                                         stop = true;
155140 -                                       break;
155141                                 }
155142                         }
155143                         break;
155144 @@ -1060,13 +1063,6 @@ static int rsnd_ssi_pio_pointer(struct rsnd_mod *mod,
155145         return 0;
155148 -static int rsnd_ssi_prepare(struct rsnd_mod *mod,
155149 -                           struct rsnd_dai_stream *io,
155150 -                           struct rsnd_priv *priv)
155152 -       return rsnd_ssi_master_clk_start(mod, io);
155155  static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
155156         .name           = SSI_NAME,
155157         .probe          = rsnd_ssi_common_probe,
155158 @@ -1079,7 +1075,6 @@ static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
155159         .pointer        = rsnd_ssi_pio_pointer,
155160         .pcm_new        = rsnd_ssi_pcm_new,
155161         .hw_params      = rsnd_ssi_hw_params,
155162 -       .prepare        = rsnd_ssi_prepare,
155163         .get_status     = rsnd_ssi_get_status,
155166 @@ -1166,7 +1161,6 @@ static struct rsnd_mod_ops rsnd_ssi_dma_ops = {
155167         .pcm_new        = rsnd_ssi_pcm_new,
155168         .fallback       = rsnd_ssi_fallback,
155169         .hw_params      = rsnd_ssi_hw_params,
155170 -       .prepare        = rsnd_ssi_prepare,
155171         .get_status     = rsnd_ssi_get_status,
155174 diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
155175 index 246a5e32e22a..b4810266f5e5 100644
155176 --- a/sound/soc/soc-compress.c
155177 +++ b/sound/soc/soc-compress.c
155178 @@ -153,7 +153,9 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
155179         fe->dpcm[stream].state = SND_SOC_DPCM_STATE_OPEN;
155180         fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
155182 +       mutex_lock_nested(&fe->card->pcm_mutex, fe->card->pcm_subclass);
155183         snd_soc_runtime_activate(fe, stream);
155184 +       mutex_unlock(&fe->card->pcm_mutex);
155186         mutex_unlock(&fe->card->mutex);
155188 @@ -181,7 +183,9 @@ static int soc_compr_free_fe(struct snd_compr_stream *cstream)
155190         mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
155192 +       mutex_lock_nested(&fe->card->pcm_mutex, fe->card->pcm_subclass);
155193         snd_soc_runtime_deactivate(fe, stream);
155194 +       mutex_unlock(&fe->card->pcm_mutex);
155196         fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
155198 diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
155199 index b005f9eadd71..2f75a449c45c 100644
155200 --- a/sound/soc/soc-dapm.c
155201 +++ b/sound/soc/soc-dapm.c
155202 @@ -154,7 +154,7 @@ static void dapm_assert_locked(struct snd_soc_dapm_context *dapm)
155203  static void pop_wait(u32 pop_time)
155205         if (pop_time)
155206 -               schedule_timeout_uninterruptible(msecs_to_jiffies(pop_time));
155207 +               schedule_msec_hrtimeout_uninterruptible((pop_time));
155210  __printf(3, 4)
155211 diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
155212 index 6740df541508..3d22c1be6f3d 100644
155213 --- a/sound/soc/tegra/tegra30_i2s.c
155214 +++ b/sound/soc/tegra/tegra30_i2s.c
155215 @@ -58,8 +58,18 @@ static int tegra30_i2s_runtime_resume(struct device *dev)
155216         }
155218         regcache_cache_only(i2s->regmap, false);
155219 +       regcache_mark_dirty(i2s->regmap);
155221 +       ret = regcache_sync(i2s->regmap);
155222 +       if (ret)
155223 +               goto disable_clocks;
155225         return 0;
155227 +disable_clocks:
155228 +       clk_disable_unprepare(i2s->clk_i2s);
155230 +       return ret;
155233  static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai,
155234 @@ -551,37 +561,11 @@ static int tegra30_i2s_platform_remove(struct platform_device *pdev)
155235         return 0;
155238 -#ifdef CONFIG_PM_SLEEP
155239 -static int tegra30_i2s_suspend(struct device *dev)
155241 -       struct tegra30_i2s *i2s = dev_get_drvdata(dev);
155243 -       regcache_mark_dirty(i2s->regmap);
155245 -       return 0;
155248 -static int tegra30_i2s_resume(struct device *dev)
155250 -       struct tegra30_i2s *i2s = dev_get_drvdata(dev);
155251 -       int ret;
155253 -       ret = pm_runtime_get_sync(dev);
155254 -       if (ret < 0) {
155255 -               pm_runtime_put(dev);
155256 -               return ret;
155257 -       }
155258 -       ret = regcache_sync(i2s->regmap);
155259 -       pm_runtime_put(dev);
155261 -       return ret;
155263 -#endif
155265  static const struct dev_pm_ops tegra30_i2s_pm_ops = {
155266         SET_RUNTIME_PM_OPS(tegra30_i2s_runtime_suspend,
155267                            tegra30_i2s_runtime_resume, NULL)
155268 -       SET_SYSTEM_SLEEP_PM_OPS(tegra30_i2s_suspend, tegra30_i2s_resume)
155269 +       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
155270 +                               pm_runtime_force_resume)
155273  static struct platform_driver tegra30_i2s_driver = {
155274 diff --git a/sound/usb/card.c b/sound/usb/card.c
155275 index 0826a437f8fc..7b7526d3a56e 100644
155276 --- a/sound/usb/card.c
155277 +++ b/sound/usb/card.c
155278 @@ -181,9 +181,8 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int
155279                                 ctrlif, interface);
155280                         return -EINVAL;
155281                 }
155282 -               usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L);
155284 -               return 0;
155285 +               return usb_driver_claim_interface(&usb_audio_driver, iface,
155286 +                                                 USB_AUDIO_IFACE_UNUSED);
155287         }
155289         if ((altsd->bInterfaceClass != USB_CLASS_AUDIO &&
155290 @@ -203,7 +202,8 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int
155292         if (! snd_usb_parse_audio_interface(chip, interface)) {
155293                 usb_set_interface(dev, interface, 0); /* reset the current interface */
155294 -               usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L);
155295 +               return usb_driver_claim_interface(&usb_audio_driver, iface,
155296 +                                                 USB_AUDIO_IFACE_UNUSED);
155297         }
155299         return 0;
155300 @@ -862,7 +862,7 @@ static void usb_audio_disconnect(struct usb_interface *intf)
155301         struct snd_card *card;
155302         struct list_head *p;
155304 -       if (chip == (void *)-1L)
155305 +       if (chip == USB_AUDIO_IFACE_UNUSED)
155306                 return;
155308         card = chip->card;
155309 @@ -992,7 +992,7 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
155310         struct usb_mixer_interface *mixer;
155311         struct list_head *p;
155313 -       if (chip == (void *)-1L)
155314 +       if (chip == USB_AUDIO_IFACE_UNUSED)
155315                 return 0;
155317         if (!chip->num_suspended_intf++) {
155318 @@ -1022,7 +1022,7 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
155319         struct list_head *p;
155320         int err = 0;
155322 -       if (chip == (void *)-1L)
155323 +       if (chip == USB_AUDIO_IFACE_UNUSED)
155324                 return 0;
155326         atomic_inc(&chip->active); /* avoid autopm */
155327 diff --git a/sound/usb/clock.c b/sound/usb/clock.c
155328 index a746802d0ac3..17bbde73d4d1 100644
155329 --- a/sound/usb/clock.c
155330 +++ b/sound/usb/clock.c
155331 @@ -296,7 +296,7 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
155333         selector = snd_usb_find_clock_selector(chip->ctrl_intf, entity_id);
155334         if (selector) {
155335 -               int ret, i, cur;
155336 +               int ret, i, cur, err;
155338                 if (selector->bNrInPins == 1) {
155339                         ret = 1;
155340 @@ -324,13 +324,17 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip,
155341                 ret = __uac_clock_find_source(chip, fmt,
155342                                               selector->baCSourceID[ret - 1],
155343                                               visited, validate);
155344 +               if (ret > 0) {
155345 +                       err = uac_clock_selector_set_val(chip, entity_id, cur);
155346 +                       if (err < 0)
155347 +                               return err;
155348 +               }
155350                 if (!validate || ret > 0 || !chip->autoclock)
155351                         return ret;
155353                 /* The current clock source is invalid, try others. */
155354                 for (i = 1; i <= selector->bNrInPins; i++) {
155355 -                       int err;
155357                         if (i == cur)
155358                                 continue;
155360 @@ -396,7 +400,7 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip,
155362         selector = snd_usb_find_clock_selector_v3(chip->ctrl_intf, entity_id);
155363         if (selector) {
155364 -               int ret, i, cur;
155365 +               int ret, i, cur, err;
155367                 /* the entity ID we are looking for is a selector.
155368                  * find out what it currently selects */
155369 @@ -418,6 +422,12 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip,
155370                 ret = __uac3_clock_find_source(chip, fmt,
155371                                                selector->baCSourceID[ret - 1],
155372                                                visited, validate);
155373 +               if (ret > 0) {
155374 +                       err = uac_clock_selector_set_val(chip, entity_id, cur);
155375 +                       if (err < 0)
155376 +                               return err;
155377 +               }
155379                 if (!validate || ret > 0 || !chip->autoclock)
155380                         return ret;
155382 diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
155383 index 102d53515a76..933586a895e7 100644
155384 --- a/sound/usb/endpoint.c
155385 +++ b/sound/usb/endpoint.c
155386 @@ -1442,11 +1442,11 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep)
155387         if (snd_BUG_ON(!atomic_read(&ep->running)))
155388                 return;
155390 -       if (ep->sync_source)
155391 -               WRITE_ONCE(ep->sync_source->sync_sink, NULL);
155393 -       if (!atomic_dec_return(&ep->running))
155394 +       if (!atomic_dec_return(&ep->running)) {
155395 +               if (ep->sync_source)
155396 +                       WRITE_ONCE(ep->sync_source->sync_sink, NULL);
155397                 stop_urbs(ep, false);
155398 +       }
155401  /**
155402 diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
155403 index fdbdfb7bce92..fa8e8faf3eb3 100644
155404 --- a/sound/usb/line6/pcm.c
155405 +++ b/sound/usb/line6/pcm.c
155406 @@ -127,7 +127,7 @@ static void line6_wait_clear_audio_urbs(struct snd_line6_pcm *line6pcm,
155407                 if (!alive)
155408                         break;
155409                 set_current_state(TASK_UNINTERRUPTIBLE);
155410 -               schedule_timeout(1);
155411 +               schedule_min_hrtimeout();
155412         } while (--timeout > 0);
155413         if (alive)
155414                 dev_err(line6pcm->line6->ifcdev,
155415 diff --git a/sound/usb/midi.c b/sound/usb/midi.c
155416 index 0c23fa6d8525..cd46ca7cd28d 100644
155417 --- a/sound/usb/midi.c
155418 +++ b/sound/usb/midi.c
155419 @@ -1332,7 +1332,7 @@ static int snd_usbmidi_in_endpoint_create(struct snd_usb_midi *umidi,
155421   error:
155422         snd_usbmidi_in_endpoint_delete(ep);
155423 -       return -ENOMEM;
155424 +       return err;
155428 diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
155429 index 646deb6244b1..c5794e83fd80 100644
155430 --- a/sound/usb/mixer_maps.c
155431 +++ b/sound/usb/mixer_maps.c
155432 @@ -337,6 +337,13 @@ static const struct usbmix_name_map bose_companion5_map[] = {
155433         { 0 }   /* terminator */
155436 +/* Sennheiser Communications Headset [PC 8], the dB value is reported as -6 negative maximum  */
155437 +static const struct usbmix_dB_map sennheiser_pc8_dB = {-9500, 0};
155438 +static const struct usbmix_name_map sennheiser_pc8_map[] = {
155439 +       { 9, NULL, .dB = &sennheiser_pc8_dB },
155440 +       { 0 }   /* terminator */
155444   * Dell usb dock with ALC4020 codec had a firmware problem where it got
155445   * screwed up when zero volume is passed; just skip it as a workaround
155446 @@ -593,6 +600,11 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = {
155447                 .id = USB_ID(0x17aa, 0x1046),
155448                 .map = lenovo_p620_rear_map,
155449         },
155450 +       {
155451 +               /* Sennheiser Communications Headset [PC 8] */
155452 +               .id = USB_ID(0x1395, 0x0025),
155453 +               .map = sennheiser_pc8_map,
155454 +       },
155455         { 0 } /* terminator */
155458 diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
155459 index 1165a5ac60f2..8a8fe2b980a1 100644
155460 --- a/sound/usb/quirks-table.h
155461 +++ b/sound/usb/quirks-table.h
155462 @@ -2376,6 +2376,16 @@ YAMAHA_DEVICE(0x7010, "UB99"),
155463         }
155467 +       USB_DEVICE_VENDOR_SPEC(0x0944, 0x0204),
155468 +       .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
155469 +               .vendor_name = "KORG, Inc.",
155470 +               /* .product_name = "ToneLab EX", */
155471 +               .ifnum = 3,
155472 +               .type = QUIRK_MIDI_STANDARD_INTERFACE,
155473 +       }
155476  /* AKAI devices */
155478         USB_DEVICE(0x09e8, 0x0062),
155479 @@ -3817,6 +3827,69 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
155480                 }
155481         }
155484 +       /*
155485 +        * Pioneer DJ DJM-850
155486 +        * 8 channels playback and 8 channels capture @ 44.1/48/96kHz S24LE
155487 +        * Playback on EP 0x05
155488 +        * Capture on EP 0x86
155489 +        */
155490 +       USB_DEVICE_VENDOR_SPEC(0x08e4, 0x0163),
155491 +       .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
155492 +               .ifnum = QUIRK_ANY_INTERFACE,
155493 +               .type = QUIRK_COMPOSITE,
155494 +               .data = (const struct snd_usb_audio_quirk[]) {
155495 +                       {
155496 +                               .ifnum = 0,
155497 +                               .type = QUIRK_AUDIO_FIXED_ENDPOINT,
155498 +                               .data = &(const struct audioformat) {
155499 +                                       .formats = SNDRV_PCM_FMTBIT_S24_3LE,
155500 +                                       .channels = 8,
155501 +                                       .iface = 0,
155502 +                                       .altsetting = 1,
155503 +                                       .altset_idx = 1,
155504 +                                       .endpoint = 0x05,
155505 +                                       .ep_attr = USB_ENDPOINT_XFER_ISOC|
155506 +                                           USB_ENDPOINT_SYNC_ASYNC|
155507 +                                               USB_ENDPOINT_USAGE_DATA,
155508 +                                       .rates = SNDRV_PCM_RATE_44100|
155509 +                                               SNDRV_PCM_RATE_48000|
155510 +                                               SNDRV_PCM_RATE_96000,
155511 +                                       .rate_min = 44100,
155512 +                                       .rate_max = 96000,
155513 +                                       .nr_rates = 3,
155514 +                                       .rate_table = (unsigned int[]) { 44100, 48000, 96000 }
155515 +                               }
155516 +                       },
155517 +                       {
155518 +                               .ifnum = 0,
155519 +                               .type = QUIRK_AUDIO_FIXED_ENDPOINT,
155520 +                               .data = &(const struct audioformat) {
155521 +                                       .formats = SNDRV_PCM_FMTBIT_S24_3LE,
155522 +                                       .channels = 8,
155523 +                                       .iface = 0,
155524 +                                       .altsetting = 1,
155525 +                                       .altset_idx = 1,
155526 +                                       .endpoint = 0x86,
155527 +                                       .ep_idx = 1,
155528 +                                       .ep_attr = USB_ENDPOINT_XFER_ISOC|
155529 +                                               USB_ENDPOINT_SYNC_ASYNC|
155530 +                                               USB_ENDPOINT_USAGE_DATA,
155531 +                                       .rates = SNDRV_PCM_RATE_44100|
155532 +                                               SNDRV_PCM_RATE_48000|
155533 +                                               SNDRV_PCM_RATE_96000,
155534 +                                       .rate_min = 44100,
155535 +                                       .rate_max = 96000,
155536 +                                       .nr_rates = 3,
155537 +                                       .rate_table = (unsigned int[]) { 44100, 48000, 96000 }
155538 +                               }
155539 +                       },
155540 +                       {
155541 +                               .ifnum = -1
155542 +                       }
155543 +               }
155544 +       }
155547         /*
155548          * Pioneer DJ DJM-450
155549 diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
155550 index 176437a441e6..7c6e83eee71d 100644
155551 --- a/sound/usb/quirks.c
155552 +++ b/sound/usb/quirks.c
155553 @@ -55,8 +55,12 @@ static int create_composite_quirk(struct snd_usb_audio *chip,
155554                 if (!iface)
155555                         continue;
155556                 if (quirk->ifnum != probed_ifnum &&
155557 -                   !usb_interface_claimed(iface))
155558 -                       usb_driver_claim_interface(driver, iface, (void *)-1L);
155559 +                   !usb_interface_claimed(iface)) {
155560 +                       err = usb_driver_claim_interface(driver, iface,
155561 +                                                        USB_AUDIO_IFACE_UNUSED);
155562 +                       if (err < 0)
155563 +                               return err;
155564 +               }
155565         }
155567         return 0;
155568 @@ -426,8 +430,12 @@ static int create_autodetect_quirks(struct snd_usb_audio *chip,
155569                         continue;
155571                 err = create_autodetect_quirk(chip, iface, driver);
155572 -               if (err >= 0)
155573 -                       usb_driver_claim_interface(driver, iface, (void *)-1L);
155574 +               if (err >= 0) {
155575 +                       err = usb_driver_claim_interface(driver, iface,
155576 +                                                        USB_AUDIO_IFACE_UNUSED);
155577 +                       if (err < 0)
155578 +                               return err;
155579 +               }
155580         }
155582         return 0;
155583 diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
155584 index 60b9dd7df6bb..8794c8658ab9 100644
155585 --- a/sound/usb/usbaudio.h
155586 +++ b/sound/usb/usbaudio.h
155587 @@ -61,6 +61,8 @@ struct snd_usb_audio {
155588         struct media_intf_devnode *ctl_intf_media_devnode;
155591 +#define USB_AUDIO_IFACE_UNUSED ((void *)-1L)
155593  #define usb_audio_err(chip, fmt, args...) \
155594         dev_err(&(chip)->dev->dev, fmt, ##args)
155595  #define usb_audio_warn(chip, fmt, args...) \
155596 diff --git a/tools/arch/x86/include/asm/unistd_64.h b/tools/arch/x86/include/asm/unistd_64.h
155597 index 4205ed4158bf..b65c51e8d675 100644
155598 --- a/tools/arch/x86/include/asm/unistd_64.h
155599 +++ b/tools/arch/x86/include/asm/unistd_64.h
155600 @@ -17,3 +17,15 @@
155601  #ifndef __NR_setns
155602  #define __NR_setns 308
155603  #endif
155605 +#ifndef __NR_futex_wait
155606 +# define __NR_futex_wait 443
155607 +#endif
155609 +#ifndef __NR_futex_wake
155610 +# define __NR_futex_wake 444
155611 +#endif
155613 +#ifndef __NR_futex_requeue
155614 +# define __NR_futex_requeue 446
155615 +#endif
155616 diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c
155617 index fe9e7b3a4b50..1326fff3629b 100644
155618 --- a/tools/bpf/bpftool/btf.c
155619 +++ b/tools/bpf/bpftool/btf.c
155620 @@ -538,6 +538,7 @@ static int do_dump(int argc, char **argv)
155621                         NEXT_ARG();
155622                         if (argc < 1) {
155623                                 p_err("expecting value for 'format' option\n");
155624 +                               err = -EINVAL;
155625                                 goto done;
155626                         }
155627                         if (strcmp(*argv, "c") == 0) {
155628 @@ -547,11 +548,13 @@ static int do_dump(int argc, char **argv)
155629                         } else {
155630                                 p_err("unrecognized format specifier: '%s', possible values: raw, c",
155631                                       *argv);
155632 +                               err = -EINVAL;
155633                                 goto done;
155634                         }
155635                         NEXT_ARG();
155636                 } else {
155637                         p_err("unrecognized option: '%s'", *argv);
155638 +                       err = -EINVAL;
155639                         goto done;
155640                 }
155641         }
155642 diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
155643 index b86f450e6fce..d9afb730136a 100644
155644 --- a/tools/bpf/bpftool/main.c
155645 +++ b/tools/bpf/bpftool/main.c
155646 @@ -276,7 +276,7 @@ static int do_batch(int argc, char **argv)
155647         int n_argc;
155648         FILE *fp;
155649         char *cp;
155650 -       int err;
155651 +       int err = 0;
155652         int i;
155654         if (argc < 2) {
155655 @@ -370,7 +370,6 @@ static int do_batch(int argc, char **argv)
155656         } else {
155657                 if (!json_output)
155658                         printf("processed %d commands\n", lines);
155659 -               err = 0;
155660         }
155661  err_close:
155662         if (fp != stdin)
155663 diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
155664 index b400364ee054..09ae0381205b 100644
155665 --- a/tools/bpf/bpftool/map.c
155666 +++ b/tools/bpf/bpftool/map.c
155667 @@ -100,7 +100,7 @@ static int do_dump_btf(const struct btf_dumper *d,
155668                        void *value)
155670         __u32 value_id;
155671 -       int ret;
155672 +       int ret = 0;
155674         /* start of key-value pair */
155675         jsonw_start_object(d->jw);
155676 diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
155677 index ce58cff99b66..2a6adca37fe9 100644
155678 --- a/tools/include/uapi/asm-generic/unistd.h
155679 +++ b/tools/include/uapi/asm-generic/unistd.h
155680 @@ -864,8 +864,17 @@ __SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2)
155681  #define __NR_mount_setattr 442
155682  __SYSCALL(__NR_mount_setattr, sys_mount_setattr)
155684 +#define __NR_futex_wait 443
155685 +__SYSCALL(__NR_futex_wait, sys_futex_wait)
155687 +#define __NR_futex_wake 444
155688 +__SYSCALL(__NR_futex_wake, sys_futex_wake)
155690 +#define __NR_futex_waitv 445
155691 +__SC_COMP(__NR_futex_waitv, sys_futex_waitv, compat_sys_futex_waitv)
155693  #undef __NR_syscalls
155694 -#define __NR_syscalls 443
155695 +#define __NR_syscalls 446
155698   * 32 bit systems traditionally used different
155699 diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h
155700 index 53b3e199fb25..09ebe3db5f2f 100644
155701 --- a/tools/lib/bpf/bpf_core_read.h
155702 +++ b/tools/lib/bpf/bpf_core_read.h
155703 @@ -88,11 +88,19 @@ enum bpf_enum_value_kind {
155704         const void *p = (const void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \
155705         unsigned long long val;                                               \
155706                                                                               \
155707 +       /* This is a so-called barrier_var() operation that makes specified   \
155708 +        * variable "a black box" for optimizing compiler.                    \
155709 +        * It forces compiler to perform BYTE_OFFSET relocation on p and use  \
155710 +        * its calculated value in the switch below, instead of applying      \
155711 +        * the same relocation 4 times for each individual memory load.       \
155712 +        */                                                                   \
155713 +       asm volatile("" : "=r"(p) : "0"(p));                                  \
155714 +                                                                             \
155715         switch (__CORE_RELO(s, field, BYTE_SIZE)) {                           \
155716 -       case 1: val = *(const unsigned char *)p;                              \
155717 -       case 2: val = *(const unsigned short *)p;                             \
155718 -       case 4: val = *(const unsigned int *)p;                               \
155719 -       case 8: val = *(const unsigned long long *)p;                         \
155720 +       case 1: val = *(const unsigned char *)p; break;                       \
155721 +       case 2: val = *(const unsigned short *)p; break;                      \
155722 +       case 4: val = *(const unsigned int *)p; break;                        \
155723 +       case 8: val = *(const unsigned long long *)p; break;                  \
155724         }                                                                     \
155725         val <<= __CORE_RELO(s, field, LSHIFT_U64);                            \
155726         if (__CORE_RELO(s, field, SIGNED))                                    \
155727 diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
155728 index f9ef37707888..1c2e91ee041d 100644
155729 --- a/tools/lib/bpf/bpf_tracing.h
155730 +++ b/tools/lib/bpf/bpf_tracing.h
155731 @@ -413,20 +413,38 @@ typeof(name(0)) name(struct pt_regs *ctx)                             \
155732  }                                                                          \
155733  static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
155735 +#define ___bpf_fill0(arr, p, x) do {} while (0)
155736 +#define ___bpf_fill1(arr, p, x) arr[p] = x
155737 +#define ___bpf_fill2(arr, p, x, args...) arr[p] = x; ___bpf_fill1(arr, p + 1, args)
155738 +#define ___bpf_fill3(arr, p, x, args...) arr[p] = x; ___bpf_fill2(arr, p + 1, args)
155739 +#define ___bpf_fill4(arr, p, x, args...) arr[p] = x; ___bpf_fill3(arr, p + 1, args)
155740 +#define ___bpf_fill5(arr, p, x, args...) arr[p] = x; ___bpf_fill4(arr, p + 1, args)
155741 +#define ___bpf_fill6(arr, p, x, args...) arr[p] = x; ___bpf_fill5(arr, p + 1, args)
155742 +#define ___bpf_fill7(arr, p, x, args...) arr[p] = x; ___bpf_fill6(arr, p + 1, args)
155743 +#define ___bpf_fill8(arr, p, x, args...) arr[p] = x; ___bpf_fill7(arr, p + 1, args)
155744 +#define ___bpf_fill9(arr, p, x, args...) arr[p] = x; ___bpf_fill8(arr, p + 1, args)
155745 +#define ___bpf_fill10(arr, p, x, args...) arr[p] = x; ___bpf_fill9(arr, p + 1, args)
155746 +#define ___bpf_fill11(arr, p, x, args...) arr[p] = x; ___bpf_fill10(arr, p + 1, args)
155747 +#define ___bpf_fill12(arr, p, x, args...) arr[p] = x; ___bpf_fill11(arr, p + 1, args)
155748 +#define ___bpf_fill(arr, args...) \
155749 +       ___bpf_apply(___bpf_fill, ___bpf_narg(args))(arr, 0, args)
155752   * BPF_SEQ_PRINTF to wrap bpf_seq_printf to-be-printed values
155753   * in a structure.
155754   */
155755 -#define BPF_SEQ_PRINTF(seq, fmt, args...)                                  \
155756 -       ({                                                                  \
155757 -               _Pragma("GCC diagnostic push")                              \
155758 -               _Pragma("GCC diagnostic ignored \"-Wint-conversion\"")      \
155759 -               static const char ___fmt[] = fmt;                           \
155760 -               unsigned long long ___param[] = { args };                   \
155761 -               _Pragma("GCC diagnostic pop")                               \
155762 -               int ___ret = bpf_seq_printf(seq, ___fmt, sizeof(___fmt),    \
155763 -                                           ___param, sizeof(___param));    \
155764 -               ___ret;                                                     \
155765 -       })
155766 +#define BPF_SEQ_PRINTF(seq, fmt, args...)                      \
155767 +({                                                             \
155768 +       static const char ___fmt[] = fmt;                       \
155769 +       unsigned long long ___param[___bpf_narg(args)];         \
155770 +                                                               \
155771 +       _Pragma("GCC diagnostic push")                          \
155772 +       _Pragma("GCC diagnostic ignored \"-Wint-conversion\"")  \
155773 +       ___bpf_fill(___param, args);                            \
155774 +       _Pragma("GCC diagnostic pop")                           \
155775 +                                                               \
155776 +       bpf_seq_printf(seq, ___fmt, sizeof(___fmt),             \
155777 +                      ___param, sizeof(___param));             \
155780  #endif
155781 diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
155782 index 1237bcd1dd17..5b8a6ea44b38 100644
155783 --- a/tools/lib/bpf/btf.h
155784 +++ b/tools/lib/bpf/btf.h
155785 @@ -173,6 +173,7 @@ struct btf_dump_emit_type_decl_opts {
155786         int indent_level;
155787         /* strip all the const/volatile/restrict mods */
155788         bool strip_mods;
155789 +       size_t :0;
155791  #define btf_dump_emit_type_decl_opts__last_field strip_mods
155793 diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
155794 index 3c35eb401931..3d690d4e785c 100644
155795 --- a/tools/lib/bpf/libbpf.h
155796 +++ b/tools/lib/bpf/libbpf.h
155797 @@ -507,6 +507,7 @@ struct xdp_link_info {
155798  struct bpf_xdp_set_link_opts {
155799         size_t sz;
155800         int old_fd;
155801 +       size_t :0;
155803  #define bpf_xdp_set_link_opts__last_field old_fd
155805 diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c
155806 index e7a8d847161f..1d80ad4e0de8 100644
155807 --- a/tools/lib/bpf/ringbuf.c
155808 +++ b/tools/lib/bpf/ringbuf.c
155809 @@ -202,9 +202,11 @@ static inline int roundup_len(__u32 len)
155810         return (len + 7) / 8 * 8;
155813 -static int ringbuf_process_ring(struct ring* r)
155814 +static int64_t ringbuf_process_ring(struct ring* r)
155816 -       int *len_ptr, len, err, cnt = 0;
155817 +       int *len_ptr, len, err;
155818 +       /* 64-bit to avoid overflow in case of extreme application behavior */
155819 +       int64_t cnt = 0;
155820         unsigned long cons_pos, prod_pos;
155821         bool got_new_data;
155822         void *sample;
155823 @@ -244,12 +246,14 @@ static int ringbuf_process_ring(struct ring* r)
155826  /* Consume available ring buffer(s) data without event polling.
155827 - * Returns number of records consumed across all registered ring buffers, or
155828 - * negative number if any of the callbacks return error.
155829 + * Returns number of records consumed across all registered ring buffers (or
155830 + * INT_MAX, whichever is less), or negative number if any of the callbacks
155831 + * return error.
155832   */
155833  int ring_buffer__consume(struct ring_buffer *rb)
155835 -       int i, err, res = 0;
155836 +       int64_t err, res = 0;
155837 +       int i;
155839         for (i = 0; i < rb->ring_cnt; i++) {
155840                 struct ring *ring = &rb->rings[i];
155841 @@ -259,18 +263,24 @@ int ring_buffer__consume(struct ring_buffer *rb)
155842                         return err;
155843                 res += err;
155844         }
155845 +       if (res > INT_MAX)
155846 +               return INT_MAX;
155847         return res;
155850  /* Poll for available data and consume records, if any are available.
155851 - * Returns number of records consumed, or negative number, if any of the
155852 - * registered callbacks returned error.
155853 + * Returns number of records consumed (or INT_MAX, whichever is less), or
155854 + * negative number, if any of the registered callbacks returned error.
155855   */
155856  int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
155858 -       int i, cnt, err, res = 0;
155859 +       int i, cnt;
155860 +       int64_t err, res = 0;
155862         cnt = epoll_wait(rb->epoll_fd, rb->events, rb->ring_cnt, timeout_ms);
155863 +       if (cnt < 0)
155864 +               return -errno;
155866         for (i = 0; i < cnt; i++) {
155867                 __u32 ring_id = rb->events[i].data.fd;
155868                 struct ring *ring = &rb->rings[ring_id];
155869 @@ -280,7 +290,9 @@ int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
155870                         return err;
155871                 res += err;
155872         }
155873 -       return cnt < 0 ? -errno : res;
155874 +       if (res > INT_MAX)
155875 +               return INT_MAX;
155876 +       return res;
155879  /* Get an fd that can be used to sleep until data is available in the ring(s) */
155880 diff --git a/tools/lib/perf/include/perf/event.h b/tools/lib/perf/include/perf/event.h
155881 index d82054225fcc..4d0c02ba3f7d 100644
155882 --- a/tools/lib/perf/include/perf/event.h
155883 +++ b/tools/lib/perf/include/perf/event.h
155884 @@ -8,6 +8,8 @@
155885  #include <linux/bpf.h>
155886  #include <sys/types.h> /* pid_t */
155888 +#define event_contains(obj, mem) ((obj).header.size > offsetof(typeof(obj), mem))
155890  struct perf_record_mmap {
155891         struct perf_event_header header;
155892         __u32                    pid, tid;
155893 @@ -346,8 +348,9 @@ struct perf_record_time_conv {
155894         __u64                    time_zero;
155895         __u64                    time_cycles;
155896         __u64                    time_mask;
155897 -       bool                     cap_user_time_zero;
155898 -       bool                     cap_user_time_short;
155899 +       __u8                     cap_user_time_zero;
155900 +       __u8                     cap_user_time_short;
155901 +       __u8                     reserved[6];   /* For alignment */
155904  struct perf_record_header_feature {
155905 diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
155906 index d8e59d31399a..c955cd683e22 100644
155907 --- a/tools/perf/Makefile.config
155908 +++ b/tools/perf/Makefile.config
155909 @@ -530,6 +530,7 @@ ifndef NO_LIBELF
155910        ifdef LIBBPF_DYNAMIC
155911          ifeq ($(feature-libbpf), 1)
155912            EXTLIBS += -lbpf
155913 +          $(call detected,CONFIG_LIBBPF_DYNAMIC)
155914          else
155915            dummy := $(error Error: No libbpf devel library found, please install libbpf-devel);
155916          endif
155917 diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
155918 index 7bf01cbe582f..86d1b0fae558 100644
155919 --- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
155920 +++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
155921 @@ -364,6 +364,10 @@
155922  440    common  process_madvise         sys_process_madvise
155923  441    common  epoll_pwait2            sys_epoll_pwait2
155924  442    common  mount_setattr           sys_mount_setattr
155925 +443    common  futex_wait              sys_futex_wait
155926 +444    common  futex_wake              sys_futex_wake
155927 +445    common  futex_waitv             sys_futex_waitv
155928 +446    common  futex_requeue           sys_futex_requeue
155931  # Due to a historical design error, certain syscalls are numbered differently
155932 diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h
155933 index eac36afab2b3..12346844b354 100644
155934 --- a/tools/perf/bench/bench.h
155935 +++ b/tools/perf/bench/bench.h
155936 @@ -38,9 +38,13 @@ int bench_mem_memcpy(int argc, const char **argv);
155937  int bench_mem_memset(int argc, const char **argv);
155938  int bench_mem_find_bit(int argc, const char **argv);
155939  int bench_futex_hash(int argc, const char **argv);
155940 +int bench_futex2_hash(int argc, const char **argv);
155941  int bench_futex_wake(int argc, const char **argv);
155942 +int bench_futex2_wake(int argc, const char **argv);
155943  int bench_futex_wake_parallel(int argc, const char **argv);
155944 +int bench_futex2_wake_parallel(int argc, const char **argv);
155945  int bench_futex_requeue(int argc, const char **argv);
155946 +int bench_futex2_requeue(int argc, const char **argv);
155947  /* pi futexes */
155948  int bench_futex_lock_pi(int argc, const char **argv);
155949  int bench_epoll_wait(int argc, const char **argv);
155950 diff --git a/tools/perf/bench/futex-hash.c b/tools/perf/bench/futex-hash.c
155951 index b65373ce5c4f..1068749af40c 100644
155952 --- a/tools/perf/bench/futex-hash.c
155953 +++ b/tools/perf/bench/futex-hash.c
155954 @@ -33,7 +33,7 @@ static unsigned int nthreads = 0;
155955  static unsigned int nsecs    = 10;
155956  /* amount of futexes per thread */
155957  static unsigned int nfutexes = 1024;
155958 -static bool fshared = false, done = false, silent = false;
155959 +static bool fshared = false, done = false, silent = false, futex2 = false;
155960  static int futex_flag = 0;
155962  struct timeval bench__start, bench__end, bench__runtime;
155963 @@ -85,7 +85,10 @@ static void *workerfn(void *arg)
155964                          * such as internal waitqueue handling, thus enlarging
155965                          * the critical region protected by hb->lock.
155966                          */
155967 -                       ret = futex_wait(&w->futex[i], 1234, NULL, futex_flag);
155968 +                       if (!futex2)
155969 +                               ret = futex_wait(&w->futex[i], 1234, NULL, futex_flag);
155970 +                       else
155971 +                               ret = futex2_wait(&w->futex[i], 1234, futex_flag, NULL);
155972                         if (!silent &&
155973                             (!ret || errno != EAGAIN || errno != EWOULDBLOCK))
155974                                 warn("Non-expected futex return call");
155975 @@ -116,7 +119,7 @@ static void print_summary(void)
155976                (int)bench__runtime.tv_sec);
155979 -int bench_futex_hash(int argc, const char **argv)
155980 +static int __bench_futex_hash(int argc, const char **argv)
155982         int ret = 0;
155983         cpu_set_t cpuset;
155984 @@ -148,7 +151,9 @@ int bench_futex_hash(int argc, const char **argv)
155985         if (!worker)
155986                 goto errmem;
155988 -       if (!fshared)
155989 +       if (futex2)
155990 +               futex_flag = FUTEX_32 | (fshared * FUTEX_SHARED_FLAG);
155991 +       else if (!fshared)
155992                 futex_flag = FUTEX_PRIVATE_FLAG;
155994         printf("Run summary [PID %d]: %d threads, each operating on %d [%s] futexes for %d secs.\n\n",
155995 @@ -228,3 +233,14 @@ int bench_futex_hash(int argc, const char **argv)
155996  errmem:
155997         err(EXIT_FAILURE, "calloc");
156000 +int bench_futex_hash(int argc, const char **argv)
156002 +       return __bench_futex_hash(argc, argv);
156005 +int bench_futex2_hash(int argc, const char **argv)
156007 +       futex2 = true;
156008 +       return __bench_futex_hash(argc, argv);
156010 diff --git a/tools/perf/bench/futex-requeue.c b/tools/perf/bench/futex-requeue.c
156011 index 5fa23295ee5f..6cdd649b54f4 100644
156012 --- a/tools/perf/bench/futex-requeue.c
156013 +++ b/tools/perf/bench/futex-requeue.c
156014 @@ -2,8 +2,8 @@
156016   * Copyright (C) 2013  Davidlohr Bueso <davidlohr@hp.com>
156017   *
156018 - * futex-requeue: Block a bunch of threads on futex1 and requeue them
156019 - *                on futex2, N at a time.
156020 + * futex-requeue: Block a bunch of threads on addr1 and requeue them
156021 + *                on addr2, N at a time.
156022   *
156023   * This program is particularly useful to measure the latency of nthread
156024   * requeues without waking up any tasks -- thus mimicking a regular futex_wait.
156025 @@ -28,7 +28,10 @@
156026  #include <stdlib.h>
156027  #include <sys/time.h>
156029 -static u_int32_t futex1 = 0, futex2 = 0;
156030 +static u_int32_t addr1 = 0, addr2 = 0;
156032 +static struct futex_requeue rq1 = { .uaddr = &addr1, .flags = FUTEX_32 };
156033 +static struct futex_requeue rq2 = { .uaddr = &addr2, .flags = FUTEX_32 };
156036   * How many tasks to requeue at a time.
156037 @@ -37,7 +40,7 @@ static u_int32_t futex1 = 0, futex2 = 0;
156038  static unsigned int nrequeue = 1;
156040  static pthread_t *worker;
156041 -static bool done = false, silent = false, fshared = false;
156042 +static bool done = false, silent = false, fshared = false, futex2 = false;
156043  static pthread_mutex_t thread_lock;
156044  static pthread_cond_t thread_parent, thread_worker;
156045  static struct stats requeuetime_stats, requeued_stats;
156046 @@ -79,7 +82,11 @@ static void *workerfn(void *arg __maybe_unused)
156047         pthread_cond_wait(&thread_worker, &thread_lock);
156048         pthread_mutex_unlock(&thread_lock);
156050 -       futex_wait(&futex1, 0, NULL, futex_flag);
156051 +       if (!futex2)
156052 +               futex_wait(&addr1, 0, NULL, futex_flag);
156053 +       else
156054 +               futex2_wait(&addr1, 0, futex_flag, NULL);
156056         return NULL;
156059 @@ -111,7 +118,7 @@ static void toggle_done(int sig __maybe_unused,
156060         done = true;
156063 -int bench_futex_requeue(int argc, const char **argv)
156064 +static int __bench_futex_requeue(int argc, const char **argv)
156066         int ret = 0;
156067         unsigned int i, j;
156068 @@ -139,15 +146,20 @@ int bench_futex_requeue(int argc, const char **argv)
156069         if (!worker)
156070                 err(EXIT_FAILURE, "calloc");
156072 -       if (!fshared)
156073 +       if (futex2) {
156074 +               futex_flag = FUTEX_32 | (fshared * FUTEX_SHARED_FLAG);
156075 +               rq1.flags |= FUTEX_SHARED_FLAG * fshared;
156076 +               rq2.flags |= FUTEX_SHARED_FLAG * fshared;
156077 +       } else if (!fshared) {
156078                 futex_flag = FUTEX_PRIVATE_FLAG;
156079 +       }
156081         if (nrequeue > nthreads)
156082                 nrequeue = nthreads;
156084         printf("Run summary [PID %d]: Requeuing %d threads (from [%s] %p to %p), "
156085                "%d at a time.\n\n",  getpid(), nthreads,
156086 -              fshared ? "shared":"private", &futex1, &futex2, nrequeue);
156087 +              fshared ? "shared":"private", &addr1, &addr2, nrequeue);
156089         init_stats(&requeued_stats);
156090         init_stats(&requeuetime_stats);
156091 @@ -176,11 +188,15 @@ int bench_futex_requeue(int argc, const char **argv)
156092                 gettimeofday(&start, NULL);
156093                 while (nrequeued < nthreads) {
156094                         /*
156095 -                        * Do not wakeup any tasks blocked on futex1, allowing
156096 +                        * Do not wakeup any tasks blocked on addr1, allowing
156097                          * us to really measure futex_wait functionality.
156098                          */
156099 -                       nrequeued += futex_cmp_requeue(&futex1, 0, &futex2, 0,
156100 -                                                      nrequeue, futex_flag);
156101 +                       if (!futex2)
156102 +                               nrequeued += futex_cmp_requeue(&addr1, 0, &addr2,
156103 +                                                       0, nrequeue, futex_flag);
156104 +                       else
156105 +                               nrequeued += futex2_requeue(&rq1, &rq2,
156106 +                                                       0, nrequeue, 0, 0);
156107                 }
156109                 gettimeofday(&end, NULL);
156110 @@ -194,8 +210,12 @@ int bench_futex_requeue(int argc, const char **argv)
156111                                j + 1, nrequeued, nthreads, runtime.tv_usec / (double)USEC_PER_MSEC);
156112                 }
156114 -               /* everybody should be blocked on futex2, wake'em up */
156115 -               nrequeued = futex_wake(&futex2, nrequeued, futex_flag);
156116 +               /* everybody should be blocked on addr2, wake'em up */
156117 +               if (!futex2)
156118 +                       nrequeued = futex_wake(&addr2, nrequeued, futex_flag);
156119 +               else
156120 +                       nrequeued = futex2_wake(&addr2, nrequeued, futex_flag);
156122                 if (nthreads != nrequeued)
156123                         warnx("couldn't wakeup all tasks (%d/%d)", nrequeued, nthreads);
156125 @@ -220,3 +240,14 @@ int bench_futex_requeue(int argc, const char **argv)
156126         usage_with_options(bench_futex_requeue_usage, options);
156127         exit(EXIT_FAILURE);
156130 +int bench_futex_requeue(int argc, const char **argv)
156132 +       return __bench_futex_requeue(argc, argv);
156135 +int bench_futex2_requeue(int argc, const char **argv)
156137 +       futex2 = true;
156138 +       return __bench_futex_requeue(argc, argv);
156140 diff --git a/tools/perf/bench/futex-wake-parallel.c b/tools/perf/bench/futex-wake-parallel.c
156141 index 6e6f5247e1fe..cac90fc0bfb3 100644
156142 --- a/tools/perf/bench/futex-wake-parallel.c
156143 +++ b/tools/perf/bench/futex-wake-parallel.c
156144 @@ -17,6 +17,12 @@ int bench_futex_wake_parallel(int argc __maybe_unused, const char **argv __maybe
156145         pr_err("%s: pthread_barrier_t unavailable, disabling this test...\n", __func__);
156146         return 0;
156149 +int bench_futex2_wake_parallel(int argc __maybe_unused, const char **argv __maybe_unused)
156151 +       pr_err("%s: pthread_barrier_t unavailable, disabling this test...\n", __func__);
156152 +       return 0;
156154  #else /* HAVE_PTHREAD_BARRIER */
156155  /* For the CLR_() macros */
156156  #include <string.h>
156157 @@ -47,7 +53,7 @@ static unsigned int nwakes = 1;
156158  static u_int32_t futex = 0;
156160  static pthread_t *blocked_worker;
156161 -static bool done = false, silent = false, fshared = false;
156162 +static bool done = false, silent = false, fshared = false, futex2 = false;
156163  static unsigned int nblocked_threads = 0, nwaking_threads = 0;
156164  static pthread_mutex_t thread_lock;
156165  static pthread_cond_t thread_parent, thread_worker;
156166 @@ -78,7 +84,11 @@ static void *waking_workerfn(void *arg)
156168         gettimeofday(&start, NULL);
156170 -       waker->nwoken = futex_wake(&futex, nwakes, futex_flag);
156171 +       if (!futex2)
156172 +               waker->nwoken = futex_wake(&futex, nwakes, futex_flag);
156173 +       else
156174 +               waker->nwoken = futex2_wake(&futex, nwakes, futex_flag);
156176         if (waker->nwoken != nwakes)
156177                 warnx("couldn't wakeup all tasks (%d/%d)",
156178                       waker->nwoken, nwakes);
156179 @@ -129,8 +139,13 @@ static void *blocked_workerfn(void *arg __maybe_unused)
156180         pthread_mutex_unlock(&thread_lock);
156182         while (1) { /* handle spurious wakeups */
156183 -               if (futex_wait(&futex, 0, NULL, futex_flag) != EINTR)
156184 -                       break;
156185 +               if (!futex2) {
156186 +                       if (futex_wait(&futex, 0, NULL, futex_flag) != EINTR)
156187 +                               break;
156188 +               } else {
156189 +                       if (futex2_wait(&futex, 0, futex_flag, NULL) != EINTR)
156190 +                               break;
156191 +               }
156192         }
156194         pthread_exit(NULL);
156195 @@ -217,7 +232,7 @@ static void toggle_done(int sig __maybe_unused,
156196         done = true;
156199 -int bench_futex_wake_parallel(int argc, const char **argv)
156200 +static int __bench_futex_wake_parallel(int argc, const char **argv)
156202         int ret = 0;
156203         unsigned int i, j;
156204 @@ -261,7 +276,9 @@ int bench_futex_wake_parallel(int argc, const char **argv)
156205         if (!blocked_worker)
156206                 err(EXIT_FAILURE, "calloc");
156208 -       if (!fshared)
156209 +       if (futex2)
156210 +               futex_flag = FUTEX_32 | (fshared * FUTEX_SHARED_FLAG);
156211 +       else if (!fshared)
156212                 futex_flag = FUTEX_PRIVATE_FLAG;
156214         printf("Run summary [PID %d]: blocking on %d threads (at [%s] "
156215 @@ -321,4 +338,16 @@ int bench_futex_wake_parallel(int argc, const char **argv)
156216         free(blocked_worker);
156217         return ret;
156220 +int bench_futex_wake_parallel(int argc, const char **argv)
156222 +       return __bench_futex_wake_parallel(argc, argv);
156225 +int bench_futex2_wake_parallel(int argc, const char **argv)
156227 +       futex2 = true;
156228 +       return __bench_futex_wake_parallel(argc, argv);
156231  #endif /* HAVE_PTHREAD_BARRIER */
156232 diff --git a/tools/perf/bench/futex-wake.c b/tools/perf/bench/futex-wake.c
156233 index 6d217868f53c..546d2818eed8 100644
156234 --- a/tools/perf/bench/futex-wake.c
156235 +++ b/tools/perf/bench/futex-wake.c
156236 @@ -38,7 +38,7 @@ static u_int32_t futex1 = 0;
156237  static unsigned int nwakes = 1;
156239  pthread_t *worker;
156240 -static bool done = false, silent = false, fshared = false;
156241 +static bool done = false, silent = false, fshared = false, futex2 = false;
156242  static pthread_mutex_t thread_lock;
156243  static pthread_cond_t thread_parent, thread_worker;
156244  static struct stats waketime_stats, wakeup_stats;
156245 @@ -68,8 +68,13 @@ static void *workerfn(void *arg __maybe_unused)
156246         pthread_mutex_unlock(&thread_lock);
156248         while (1) {
156249 -               if (futex_wait(&futex1, 0, NULL, futex_flag) != EINTR)
156250 -                       break;
156251 +               if (!futex2) {
156252 +                       if (futex_wait(&futex1, 0, NULL, futex_flag) != EINTR)
156253 +                               break;
156254 +               } else {
156255 +                       if (futex2_wait(&futex1, 0, futex_flag, NULL) != EINTR)
156256 +                               break;
156257 +               }
156258         }
156260         pthread_exit(NULL);
156261 @@ -117,7 +122,7 @@ static void toggle_done(int sig __maybe_unused,
156262         done = true;
156265 -int bench_futex_wake(int argc, const char **argv)
156266 +static int __bench_futex_wake(int argc, const char **argv)
156268         int ret = 0;
156269         unsigned int i, j;
156270 @@ -147,7 +152,9 @@ int bench_futex_wake(int argc, const char **argv)
156271         if (!worker)
156272                 err(EXIT_FAILURE, "calloc");
156274 -       if (!fshared)
156275 +       if (futex2)
156276 +               futex_flag = FUTEX_32 | (fshared * FUTEX_SHARED_FLAG);
156277 +       else if (!fshared)
156278                 futex_flag = FUTEX_PRIVATE_FLAG;
156280         printf("Run summary [PID %d]: blocking on %d threads (at [%s] futex %p), "
156281 @@ -179,9 +186,14 @@ int bench_futex_wake(int argc, const char **argv)
156283                 /* Ok, all threads are patiently blocked, start waking folks up */
156284                 gettimeofday(&start, NULL);
156285 -               while (nwoken != nthreads)
156286 -                       nwoken += futex_wake(&futex1, nwakes, futex_flag);
156287 +               while (nwoken != nthreads) {
156288 +                       if (!futex2)
156289 +                               nwoken += futex_wake(&futex1, nwakes, futex_flag);
156290 +                       else
156291 +                               nwoken += futex2_wake(&futex1, nwakes, futex_flag);
156292 +               }
156293                 gettimeofday(&end, NULL);
156295                 timersub(&end, &start, &runtime);
156297                 update_stats(&wakeup_stats, nwoken);
156298 @@ -211,3 +223,14 @@ int bench_futex_wake(int argc, const char **argv)
156299         free(worker);
156300         return ret;
156303 +int bench_futex_wake(int argc, const char **argv)
156305 +       return __bench_futex_wake(argc, argv);
156308 +int bench_futex2_wake(int argc, const char **argv)
156310 +       futex2 = true;
156311 +       return __bench_futex_wake(argc, argv);
156313 diff --git a/tools/perf/bench/futex.h b/tools/perf/bench/futex.h
156314 index 31b53cc7d5bc..6b2213cf3f64 100644
156315 --- a/tools/perf/bench/futex.h
156316 +++ b/tools/perf/bench/futex.h
156317 @@ -86,4 +86,51 @@ futex_cmp_requeue(u_int32_t *uaddr, u_int32_t val, u_int32_t *uaddr2, int nr_wak
156318         return futex(uaddr, FUTEX_CMP_REQUEUE, nr_wake, nr_requeue, uaddr2,
156319                  val, opflags);
156323 + * futex2_wait - Wait at uaddr if *uaddr == val, until timo.
156324 + * @uaddr: User address to wait for
156325 + * @val:   Expected value at uaddr
156326 + * @flags: Operation options
156327 + * @timo:  Optional timeout
156329 + * Return: 0 on success, error code otherwise
156330 + */
156331 +static inline int futex2_wait(volatile void *uaddr, unsigned long val,
156332 +                             unsigned long flags, struct timespec *timo)
156334 +       return syscall(__NR_futex_wait, uaddr, val, flags, timo);
156338 + * futex2_wake - Wake a number of waiters waiting at uaddr
156339 + * @uaddr: Address to wake
156340 + * @nr:    Number of waiters to wake
156341 + * @flags: Operation options
156343 + * Return: number of waked futexes
156344 + */
156345 +static inline int futex2_wake(volatile void *uaddr, unsigned int nr, unsigned long flags)
156347 +       return syscall(__NR_futex_wake, uaddr, nr, flags);
156351 + * futex2_requeue - Requeue waiters from an address to another one
156352 + * @uaddr1:     Address where waiters are currently waiting on
156353 + * @uaddr2:     New address to wait
156354 + * @nr_wake:    Number of waiters at uaddr1 to be wake
156355 + * @nr_requeue: After waking nr_wake, number of waiters to be requeued
156356 + * @cmpval:     Expected value at uaddr1
156357 + * @flags: Operation options
156359 + * Return: waked futexes + requeued futexes at uaddr1
156360 + */
156361 +static inline int futex2_requeue(volatile struct futex_requeue *uaddr1,
156362 +                                volatile struct futex_requeue *uaddr2,
156363 +                                unsigned int nr_wake, unsigned int nr_requeue,
156364 +                                unsigned int cmpval, unsigned long flags)
156366 +       return syscall(__NR_futex_requeue, uaddr1, uaddr2, nr_wake, nr_requeue, cmpval, flags);
156368  #endif /* _FUTEX_H */
156369 diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c
156370 index 62a7b7420a44..e41a95ad2db6 100644
156371 --- a/tools/perf/builtin-bench.c
156372 +++ b/tools/perf/builtin-bench.c
156373 @@ -12,10 +12,11 @@
156374   *
156375   *  sched ... scheduler and IPC performance
156376   *  syscall ... System call performance
156377 - *  mem   ... memory access performance
156378 - *  numa  ... NUMA scheduling and MM performance
156379 - *  futex ... Futex performance
156380 - *  epoll ... Event poll performance
156381 + *  mem    ... memory access performance
156382 + *  numa   ... NUMA scheduling and MM performance
156383 + *  futex  ... Futex performance
156384 + *  futex2 ... Futex2 performance
156385 + *  epoll  ... Event poll performance
156386   */
156387  #include <subcmd/parse-options.h>
156388  #include "builtin.h"
156389 @@ -75,6 +76,14 @@ static struct bench futex_benchmarks[] = {
156390         { NULL,         NULL,                                           NULL                    }
156393 +static struct bench futex2_benchmarks[] = {
156394 +       { "hash",          "Benchmark for futex2 hash table",            bench_futex2_hash      },
156395 +       { "wake",          "Benchmark for futex2 wake calls",            bench_futex2_wake      },
156396 +       { "wake-parallel", "Benchmark for parallel futex2 wake calls",   bench_futex2_wake_parallel },
156397 +       { "requeue",       "Benchmark for futex2 requeue calls",         bench_futex2_requeue   },
156398 +       { NULL,         NULL,                                           NULL                    }
156401  #ifdef HAVE_EVENTFD_SUPPORT
156402  static struct bench epoll_benchmarks[] = {
156403         { "wait",       "Benchmark epoll concurrent epoll_waits",       bench_epoll_wait        },
156404 @@ -105,6 +114,7 @@ static struct collection collections[] = {
156405         { "numa",       "NUMA scheduling and MM benchmarks",            numa_benchmarks         },
156406  #endif
156407         {"futex",       "Futex stressing benchmarks",                   futex_benchmarks        },
156408 +       {"futex2",      "Futex2 stressing benchmarks",                  futex2_benchmarks        },
156409  #ifdef HAVE_EVENTFD_SUPPORT
156410         {"epoll",       "Epoll stressing benchmarks",                   epoll_benchmarks        },
156411  #endif
156412 diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/cache.json b/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
156413 index 4ea7ec4f496e..008f1683e540 100644
156414 --- a/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
156415 +++ b/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
156416 @@ -275,7 +275,7 @@
156417    {
156418      "EventName": "l2_pf_hit_l2",
156419      "EventCode": "0x70",
156420 -    "BriefDescription": "L2 prefetch hit in L2.",
156421 +    "BriefDescription": "L2 prefetch hit in L2. Use l2_cache_hits_from_l2_hwpf instead.",
156422      "UMask": "0xff"
156423    },
156424    {
156425 diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json b/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json
156426 index 2cfe2d2f3bfd..3c954543d1ae 100644
156427 --- a/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json
156428 +++ b/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json
156429 @@ -79,10 +79,10 @@
156430      "UMask": "0x70"
156431    },
156432    {
156433 -    "MetricName": "l2_cache_hits_from_l2_hwpf",
156434 +    "EventName": "l2_cache_hits_from_l2_hwpf",
156435 +    "EventCode": "0x70",
156436      "BriefDescription": "L2 Cache Hits from L2 HWPF",
156437 -    "MetricExpr": "l2_pf_hit_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
156438 -    "MetricGroup": "l2_cache"
156439 +    "UMask": "0xff"
156440    },
156441    {
156442      "EventName": "l3_accesses",
156443 diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/cache.json b/tools/perf/pmu-events/arch/x86/amdzen2/cache.json
156444 index f61b982f83ca..8ba84a48188d 100644
156445 --- a/tools/perf/pmu-events/arch/x86/amdzen2/cache.json
156446 +++ b/tools/perf/pmu-events/arch/x86/amdzen2/cache.json
156447 @@ -205,7 +205,7 @@
156448    {
156449      "EventName": "l2_pf_hit_l2",
156450      "EventCode": "0x70",
156451 -    "BriefDescription": "L2 prefetch hit in L2.",
156452 +    "BriefDescription": "L2 prefetch hit in L2. Use l2_cache_hits_from_l2_hwpf instead.",
156453      "UMask": "0xff"
156454    },
156455    {
156456 diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json b/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json
156457 index 2ef91e25e661..1c624cee9ef4 100644
156458 --- a/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json
156459 +++ b/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json
156460 @@ -79,10 +79,10 @@
156461      "UMask": "0x70"
156462    },
156463    {
156464 -    "MetricName": "l2_cache_hits_from_l2_hwpf",
156465 +    "EventName": "l2_cache_hits_from_l2_hwpf",
156466 +    "EventCode": "0x70",
156467      "BriefDescription": "L2 Cache Hits from L2 HWPF",
156468 -    "MetricExpr": "l2_pf_hit_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
156469 -    "MetricGroup": "l2_cache"
156470 +    "UMask": "0xff"
156471    },
156472    {
156473      "EventName": "l3_accesses",
156474 diff --git a/tools/perf/trace/beauty/fsconfig.sh b/tools/perf/trace/beauty/fsconfig.sh
156475 index 83fb24df05c9..bc6ef7bb7a5f 100755
156476 --- a/tools/perf/trace/beauty/fsconfig.sh
156477 +++ b/tools/perf/trace/beauty/fsconfig.sh
156478 @@ -10,8 +10,7 @@ fi
156479  linux_mount=${linux_header_dir}/mount.h
156481  printf "static const char *fsconfig_cmds[] = {\n"
156482 -regex='^[[:space:]]*+FSCONFIG_([[:alnum:]_]+)[[:space:]]*=[[:space:]]*([[:digit:]]+)[[:space:]]*,[[:space:]]*.*'
156483 -egrep $regex ${linux_mount} | \
156484 -       sed -r "s/$regex/\2 \1/g"       | \
156485 -       xargs printf "\t[%s] = \"%s\",\n"
156486 +ms='[[:space:]]*'
156487 +sed -nr "s/^${ms}FSCONFIG_([[:alnum:]_]+)${ms}=${ms}([[:digit:]]+)${ms},.*/\t[\2] = \"\1\",/p" \
156488 +       ${linux_mount}
156489  printf "};\n"
156490 diff --git a/tools/perf/util/Build b/tools/perf/util/Build
156491 index e3e12f9d4733..5a296ac69415 100644
156492 --- a/tools/perf/util/Build
156493 +++ b/tools/perf/util/Build
156494 @@ -141,7 +141,14 @@ perf-$(CONFIG_LIBELF) += symbol-elf.o
156495  perf-$(CONFIG_LIBELF) += probe-file.o
156496  perf-$(CONFIG_LIBELF) += probe-event.o
156498 +ifdef CONFIG_LIBBPF_DYNAMIC
156499 +  hashmap := 1
156500 +endif
156501  ifndef CONFIG_LIBBPF
156502 +  hashmap := 1
156503 +endif
156505 +ifdef hashmap
156506  perf-y += hashmap.o
156507  endif
156509 diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
156510 index 9760d8e7b386..917a9c707371 100644
156511 --- a/tools/perf/util/jitdump.c
156512 +++ b/tools/perf/util/jitdump.c
156513 @@ -396,21 +396,31 @@ static pid_t jr_entry_tid(struct jit_buf_desc *jd, union jr_entry *jr)
156515  static uint64_t convert_timestamp(struct jit_buf_desc *jd, uint64_t timestamp)
156517 -       struct perf_tsc_conversion tc;
156518 +       struct perf_tsc_conversion tc = { .time_shift = 0, };
156519 +       struct perf_record_time_conv *time_conv = &jd->session->time_conv;
156521         if (!jd->use_arch_timestamp)
156522                 return timestamp;
156524 -       tc.time_shift          = jd->session->time_conv.time_shift;
156525 -       tc.time_mult           = jd->session->time_conv.time_mult;
156526 -       tc.time_zero           = jd->session->time_conv.time_zero;
156527 -       tc.time_cycles         = jd->session->time_conv.time_cycles;
156528 -       tc.time_mask           = jd->session->time_conv.time_mask;
156529 -       tc.cap_user_time_zero  = jd->session->time_conv.cap_user_time_zero;
156530 -       tc.cap_user_time_short = jd->session->time_conv.cap_user_time_short;
156531 +       tc.time_shift = time_conv->time_shift;
156532 +       tc.time_mult  = time_conv->time_mult;
156533 +       tc.time_zero  = time_conv->time_zero;
156535 -       if (!tc.cap_user_time_zero)
156536 -               return 0;
156537 +       /*
156538 +        * The event TIME_CONV was extended for the fields from "time_cycles"
156539 +        * when supported cap_user_time_short, for backward compatibility,
156540 +        * checks the event size and assigns these extended fields if these
156541 +        * fields are contained in the event.
156542 +        */
156543 +       if (event_contains(*time_conv, time_cycles)) {
156544 +               tc.time_cycles         = time_conv->time_cycles;
156545 +               tc.time_mask           = time_conv->time_mask;
156546 +               tc.cap_user_time_zero  = time_conv->cap_user_time_zero;
156547 +               tc.cap_user_time_short = time_conv->cap_user_time_short;
156549 +               if (!tc.cap_user_time_zero)
156550 +                       return 0;
156551 +       }
156553         return tsc_to_perf_time(timestamp, &tc);
156555 diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
156556 index 859832a82496..e9d4e6f4bdf3 100644
156557 --- a/tools/perf/util/session.c
156558 +++ b/tools/perf/util/session.c
156559 @@ -949,6 +949,19 @@ static void perf_event__stat_round_swap(union perf_event *event,
156560         event->stat_round.time = bswap_64(event->stat_round.time);
156563 +static void perf_event__time_conv_swap(union perf_event *event,
156564 +                                      bool sample_id_all __maybe_unused)
156566 +       event->time_conv.time_shift = bswap_64(event->time_conv.time_shift);
156567 +       event->time_conv.time_mult  = bswap_64(event->time_conv.time_mult);
156568 +       event->time_conv.time_zero  = bswap_64(event->time_conv.time_zero);
156570 +       if (event_contains(event->time_conv, time_cycles)) {
156571 +               event->time_conv.time_cycles = bswap_64(event->time_conv.time_cycles);
156572 +               event->time_conv.time_mask = bswap_64(event->time_conv.time_mask);
156573 +       }
156576  typedef void (*perf_event__swap_op)(union perf_event *event,
156577                                     bool sample_id_all);
156579 @@ -985,7 +998,7 @@ static perf_event__swap_op perf_event__swap_ops[] = {
156580         [PERF_RECORD_STAT]                = perf_event__stat_swap,
156581         [PERF_RECORD_STAT_ROUND]          = perf_event__stat_round_swap,
156582         [PERF_RECORD_EVENT_UPDATE]        = perf_event__event_update_swap,
156583 -       [PERF_RECORD_TIME_CONV]           = perf_event__all64_swap,
156584 +       [PERF_RECORD_TIME_CONV]           = perf_event__time_conv_swap,
156585         [PERF_RECORD_HEADER_MAX]          = NULL,
156588 diff --git a/tools/perf/util/symbol_fprintf.c b/tools/perf/util/symbol_fprintf.c
156589 index 35c936ce33ef..2664fb65e47a 100644
156590 --- a/tools/perf/util/symbol_fprintf.c
156591 +++ b/tools/perf/util/symbol_fprintf.c
156592 @@ -68,7 +68,7 @@ size_t dso__fprintf_symbols_by_name(struct dso *dso,
156594         for (nd = rb_first_cached(&dso->symbol_names); nd; nd = rb_next(nd)) {
156595                 pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
156596 -               fprintf(fp, "%s\n", pos->sym.name);
156597 +               ret += fprintf(fp, "%s\n", pos->sym.name);
156598         }
156600         return ret;
156601 diff --git a/tools/power/x86/intel-speed-select/isst-display.c b/tools/power/x86/intel-speed-select/isst-display.c
156602 index 8e54ce47648e..3bf1820c0da1 100644
156603 --- a/tools/power/x86/intel-speed-select/isst-display.c
156604 +++ b/tools/power/x86/intel-speed-select/isst-display.c
156605 @@ -25,10 +25,14 @@ static void printcpulist(int str_len, char *str, int mask_size,
156606                         index = snprintf(&str[curr_index],
156607                                          str_len - curr_index, ",");
156608                         curr_index += index;
156609 +                       if (curr_index >= str_len)
156610 +                               break;
156611                 }
156612                 index = snprintf(&str[curr_index], str_len - curr_index, "%d",
156613                                  i);
156614                 curr_index += index;
156615 +               if (curr_index >= str_len)
156616 +                       break;
156617                 first = 0;
156618         }
156620 @@ -64,10 +68,14 @@ static void printcpumask(int str_len, char *str, int mask_size,
156621                 index = snprintf(&str[curr_index], str_len - curr_index, "%08x",
156622                                  mask[i]);
156623                 curr_index += index;
156624 +               if (curr_index >= str_len)
156625 +                       break;
156626                 if (i) {
156627                         strncat(&str[curr_index], ",", str_len - curr_index);
156628                         curr_index++;
156629                 }
156630 +               if (curr_index >= str_len)
156631 +                       break;
156632         }
156634         free(mask);
156635 @@ -185,7 +193,7 @@ static void _isst_pbf_display_information(int cpu, FILE *outf, int level,
156636                                           int disp_level)
156638         char header[256];
156639 -       char value[256];
156640 +       char value[512];
156642         snprintf(header, sizeof(header), "speed-select-base-freq-properties");
156643         format_and_print(outf, disp_level, header, NULL);
156644 @@ -349,7 +357,7 @@ void isst_ctdp_display_information(int cpu, FILE *outf, int tdp_level,
156645                                    struct isst_pkg_ctdp *pkg_dev)
156647         char header[256];
156648 -       char value[256];
156649 +       char value[512];
156650         static int level;
156651         int i;
156653 diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
156654 index a7c4f0772e53..002697021474 100644
156655 --- a/tools/power/x86/turbostat/turbostat.c
156656 +++ b/tools/power/x86/turbostat/turbostat.c
156657 @@ -291,13 +291,16 @@ struct msr_sum_array {
156658  /* The percpu MSR sum array.*/
156659  struct msr_sum_array *per_cpu_msr_sum;
156661 -int idx_to_offset(int idx)
156662 +off_t idx_to_offset(int idx)
156664 -       int offset;
156665 +       off_t offset;
156667         switch (idx) {
156668         case IDX_PKG_ENERGY:
156669 -               offset = MSR_PKG_ENERGY_STATUS;
156670 +               if (do_rapl & RAPL_AMD_F17H)
156671 +                       offset = MSR_PKG_ENERGY_STAT;
156672 +               else
156673 +                       offset = MSR_PKG_ENERGY_STATUS;
156674                 break;
156675         case IDX_DRAM_ENERGY:
156676                 offset = MSR_DRAM_ENERGY_STATUS;
156677 @@ -320,12 +323,13 @@ int idx_to_offset(int idx)
156678         return offset;
156681 -int offset_to_idx(int offset)
156682 +int offset_to_idx(off_t offset)
156684         int idx;
156686         switch (offset) {
156687         case MSR_PKG_ENERGY_STATUS:
156688 +       case MSR_PKG_ENERGY_STAT:
156689                 idx = IDX_PKG_ENERGY;
156690                 break;
156691         case MSR_DRAM_ENERGY_STATUS:
156692 @@ -353,7 +357,7 @@ int idx_valid(int idx)
156694         switch (idx) {
156695         case IDX_PKG_ENERGY:
156696 -               return do_rapl & RAPL_PKG;
156697 +               return do_rapl & (RAPL_PKG | RAPL_AMD_F17H);
156698         case IDX_DRAM_ENERGY:
156699                 return do_rapl & RAPL_DRAM;
156700         case IDX_PP0_ENERGY:
156701 @@ -3272,7 +3276,7 @@ static int update_msr_sum(struct thread_data *t, struct core_data *c, struct pkg
156703         for (i = IDX_PKG_ENERGY; i < IDX_COUNT; i++) {
156704                 unsigned long long msr_cur, msr_last;
156705 -               int offset;
156706 +               off_t offset;
156708                 if (!idx_valid(i))
156709                         continue;
156710 @@ -3281,7 +3285,8 @@ static int update_msr_sum(struct thread_data *t, struct core_data *c, struct pkg
156711                         continue;
156712                 ret = get_msr(cpu, offset, &msr_cur);
156713                 if (ret) {
156714 -                       fprintf(outf, "Can not update msr(0x%x)\n", offset);
156715 +                       fprintf(outf, "Can not update msr(0x%llx)\n",
156716 +                               (unsigned long long)offset);
156717                         continue;
156718                 }
156720 @@ -4817,33 +4822,12 @@ double discover_bclk(unsigned int family, unsigned int model)
156721   * below this value, including the Digital Thermal Sensor (DTS),
156722   * Package Thermal Management Sensor (PTM), and thermal event thresholds.
156723   */
156724 -int read_tcc_activation_temp()
156725 +int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
156727         unsigned long long msr;
156728 -       unsigned int tcc, target_c, offset_c;
156730 -       /* Temperature Target MSR is Nehalem and newer only */
156731 -       if (!do_nhm_platform_info)
156732 -               return 0;
156734 -       if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr))
156735 -               return 0;
156737 -       target_c = (msr >> 16) & 0xFF;
156739 -       offset_c = (msr >> 24) & 0xF;
156741 -       tcc = target_c - offset_c;
156743 -       if (!quiet)
156744 -               fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C) (%d default - %d offset)\n",
156745 -                       base_cpu, msr, tcc, target_c, offset_c);
156747 -       return tcc;
156749 +       unsigned int target_c_local;
156750 +       int cpu;
156752 -int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
156754         /* tcc_activation_temp is used only for dts or ptm */
156755         if (!(do_dts || do_ptm))
156756                 return 0;
156757 @@ -4852,18 +4836,43 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk
156758         if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
156759                 return 0;
156761 +       cpu = t->cpu_id;
156762 +       if (cpu_migrate(cpu)) {
156763 +               fprintf(outf, "Could not migrate to CPU %d\n", cpu);
156764 +               return -1;
156765 +       }
156767         if (tcc_activation_temp_override != 0) {
156768                 tcc_activation_temp = tcc_activation_temp_override;
156769 -               fprintf(outf, "Using cmdline TCC Target (%d C)\n", tcc_activation_temp);
156770 +               fprintf(outf, "cpu%d: Using cmdline TCC Target (%d C)\n",
156771 +                       cpu, tcc_activation_temp);
156772                 return 0;
156773         }
156775 -       tcc_activation_temp = read_tcc_activation_temp();
156776 -       if (tcc_activation_temp)
156777 -               return 0;
156778 +       /* Temperature Target MSR is Nehalem and newer only */
156779 +       if (!do_nhm_platform_info)
156780 +               goto guess;
156782 +       if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr))
156783 +               goto guess;
156785 +       target_c_local = (msr >> 16) & 0xFF;
156787 +       if (!quiet)
156788 +               fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n",
156789 +                       cpu, msr, target_c_local);
156791 +       if (!target_c_local)
156792 +               goto guess;
156794 +       tcc_activation_temp = target_c_local;
156796 +       return 0;
156798 +guess:
156799         tcc_activation_temp = TJMAX_DEFAULT;
156800 -       fprintf(outf, "Guessing tjMax %d C, Please use -T to specify\n", tcc_activation_temp);
156801 +       fprintf(outf, "cpu%d: Guessing tjMax %d C, Please use -T to specify\n",
156802 +               cpu, tcc_activation_temp);
156804         return 0;
156806 diff --git a/tools/spi/Makefile b/tools/spi/Makefile
156807 index ada881afb489..0aa6dbd31fb8 100644
156808 --- a/tools/spi/Makefile
156809 +++ b/tools/spi/Makefile
156810 @@ -25,11 +25,12 @@ include $(srctree)/tools/build/Makefile.include
156812  # We need the following to be outside of kernel tree
156814 -$(OUTPUT)include/linux/spi/spidev.h: ../../include/uapi/linux/spi/spidev.h
156815 +$(OUTPUT)include/linux/spi: ../../include/uapi/linux/spi
156816         mkdir -p $(OUTPUT)include/linux/spi 2>&1 || true
156817         ln -sf $(CURDIR)/../../include/uapi/linux/spi/spidev.h $@
156818 +       ln -sf $(CURDIR)/../../include/uapi/linux/spi/spi.h $@
156820 -prepare: $(OUTPUT)include/linux/spi/spidev.h
156821 +prepare: $(OUTPUT)include/linux/spi
156824  # spidev_test
156825 diff --git a/tools/testing/selftests/arm64/mte/Makefile b/tools/testing/selftests/arm64/mte/Makefile
156826 index 0b3af552632a..df15d44aeb8d 100644
156827 --- a/tools/testing/selftests/arm64/mte/Makefile
156828 +++ b/tools/testing/selftests/arm64/mte/Makefile
156829 @@ -6,9 +6,7 @@ SRCS := $(filter-out mte_common_util.c,$(wildcard *.c))
156830  PROGS := $(patsubst %.c,%,$(SRCS))
156832  #Add mte compiler option
156833 -ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep gcc),)
156834  CFLAGS += -march=armv8.5-a+memtag
156835 -endif
156837  #check if the compiler works well
156838  mte_cc_support := $(shell if ($(CC) $(CFLAGS) -E -x c /dev/null -o /dev/null 2>&1) then echo "1"; fi)
156839 diff --git a/tools/testing/selftests/arm64/mte/mte_common_util.c b/tools/testing/selftests/arm64/mte/mte_common_util.c
156840 index 39f8908988ea..70665ba88cbb 100644
156841 --- a/tools/testing/selftests/arm64/mte/mte_common_util.c
156842 +++ b/tools/testing/selftests/arm64/mte/mte_common_util.c
156843 @@ -278,22 +278,13 @@ int mte_switch_mode(int mte_option, unsigned long incl_mask)
156844         return 0;
156847 -#define ID_AA64PFR1_MTE_SHIFT          8
156848 -#define ID_AA64PFR1_MTE                        2
156850  int mte_default_setup(void)
156852 -       unsigned long hwcaps = getauxval(AT_HWCAP);
156853 +       unsigned long hwcaps2 = getauxval(AT_HWCAP2);
156854         unsigned long en = 0;
156855         int ret;
156857 -       if (!(hwcaps & HWCAP_CPUID)) {
156858 -               ksft_print_msg("FAIL: CPUID registers unavailable\n");
156859 -               return KSFT_FAIL;
156860 -       }
156861 -       /* Read ID_AA64PFR1_EL1 register */
156862 -       asm volatile("mrs %0, id_aa64pfr1_el1" : "=r"(hwcaps) : : "memory");
156863 -       if (((hwcaps >> ID_AA64PFR1_MTE_SHIFT) & MT_TAG_MASK) != ID_AA64PFR1_MTE) {
156864 +       if (!(hwcaps2 & HWCAP2_MTE)) {
156865                 ksft_print_msg("FAIL: MTE features unavailable\n");
156866                 return KSFT_SKIP;
156867         }
156868 diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
156869 index 044bfdcf5b74..76a325862119 100644
156870 --- a/tools/testing/selftests/bpf/Makefile
156871 +++ b/tools/testing/selftests/bpf/Makefile
156872 @@ -221,7 +221,7 @@ $(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile)                \
156873                     DESTDIR=$(HOST_SCRATCH_DIR)/ prefix= all install_headers
156874  endif
156876 -$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) | $(BPFTOOL) $(INCLUDE_DIR)
156877 +$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL) | $(INCLUDE_DIR)
156878  ifeq ($(VMLINUX_H),)
156879         $(call msg,GEN,,$@)
156880         $(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@
156881 @@ -346,7 +346,8 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.o:                         \
156883  $(TRUNNER_BPF_SKELS): $(TRUNNER_OUTPUT)/%.skel.h:                      \
156884                       $(TRUNNER_OUTPUT)/%.o                             \
156885 -                     | $(BPFTOOL) $(TRUNNER_OUTPUT)
156886 +                     $(BPFTOOL)                                        \
156887 +                     | $(TRUNNER_OUTPUT)
156888         $$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
156889         $(Q)$$(BPFTOOL) gen skeleton $$< > $$@
156890  endif
156891 diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
156892 index 06eb956ff7bb..4b517d76257d 100644
156893 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
156894 +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
156895 @@ -210,11 +210,6 @@ static int duration = 0;
156896         .bpf_obj_file = "test_core_reloc_existence.o",                  \
156897         .btf_src_file = "btf__core_reloc_" #name ".o"                   \
156899 -#define FIELD_EXISTS_ERR_CASE(name) {                                  \
156900 -       FIELD_EXISTS_CASE_COMMON(name),                                 \
156901 -       .fails = true,                                                  \
156904  #define BITFIELDS_CASE_COMMON(objfile, test_name_prefix,  name)                \
156905         .case_name = test_name_prefix#name,                             \
156906         .bpf_obj_file = objfile,                                        \
156907 @@ -222,7 +217,7 @@ static int duration = 0;
156909  #define BITFIELDS_CASE(name, ...) {                                    \
156910         BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o",     \
156911 -                             "direct:", name),                         \
156912 +                             "probed:", name),                         \
156913         .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__,     \
156914         .input_len = sizeof(struct core_reloc_##name),                  \
156915         .output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output)       \
156916 @@ -230,7 +225,7 @@ static int duration = 0;
156917         .output_len = sizeof(struct core_reloc_bitfields_output),       \
156918  }, {                                                                   \
156919         BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o",     \
156920 -                             "probed:", name),                         \
156921 +                             "direct:", name),                         \
156922         .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__,     \
156923         .input_len = sizeof(struct core_reloc_##name),                  \
156924         .output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output)       \
156925 @@ -550,8 +545,7 @@ static struct core_reloc_test_case test_cases[] = {
156926         ARRAYS_ERR_CASE(arrays___err_too_small),
156927         ARRAYS_ERR_CASE(arrays___err_too_shallow),
156928         ARRAYS_ERR_CASE(arrays___err_non_array),
156929 -       ARRAYS_ERR_CASE(arrays___err_wrong_val_type1),
156930 -       ARRAYS_ERR_CASE(arrays___err_wrong_val_type2),
156931 +       ARRAYS_ERR_CASE(arrays___err_wrong_val_type),
156932         ARRAYS_ERR_CASE(arrays___err_bad_zero_sz_arr),
156934         /* enum/ptr/int handling scenarios */
156935 @@ -642,13 +636,25 @@ static struct core_reloc_test_case test_cases[] = {
156936                 },
156937                 .output_len = sizeof(struct core_reloc_existence_output),
156938         },
156940 -       FIELD_EXISTS_ERR_CASE(existence__err_int_sz),
156941 -       FIELD_EXISTS_ERR_CASE(existence__err_int_type),
156942 -       FIELD_EXISTS_ERR_CASE(existence__err_int_kind),
156943 -       FIELD_EXISTS_ERR_CASE(existence__err_arr_kind),
156944 -       FIELD_EXISTS_ERR_CASE(existence__err_arr_value_type),
156945 -       FIELD_EXISTS_ERR_CASE(existence__err_struct_type),
156946 +       {
156947 +               FIELD_EXISTS_CASE_COMMON(existence___wrong_field_defs),
156948 +               .input = STRUCT_TO_CHAR_PTR(core_reloc_existence___wrong_field_defs) {
156949 +               },
156950 +               .input_len = sizeof(struct core_reloc_existence___wrong_field_defs),
156951 +               .output = STRUCT_TO_CHAR_PTR(core_reloc_existence_output) {
156952 +                       .a_exists = 0,
156953 +                       .b_exists = 0,
156954 +                       .c_exists = 0,
156955 +                       .arr_exists = 0,
156956 +                       .s_exists = 0,
156957 +                       .a_value = 0xff000001u,
156958 +                       .b_value = 0xff000002u,
156959 +                       .c_value = 0xff000003u,
156960 +                       .arr_value = 0xff000004u,
156961 +                       .s_value = 0xff000005u,
156962 +               },
156963 +               .output_len = sizeof(struct core_reloc_existence_output),
156964 +       },
156966         /* bitfield relocation checks */
156967         BITFIELDS_CASE(bitfields, {
156968 @@ -857,13 +863,20 @@ void test_core_reloc(void)
156969                           "prog '%s' not found\n", probe_name))
156970                         goto cleanup;
156973 +               if (test_case->btf_src_file) {
156974 +                       err = access(test_case->btf_src_file, R_OK);
156975 +                       if (!ASSERT_OK(err, "btf_src_file"))
156976 +                               goto cleanup;
156977 +               }
156979                 load_attr.obj = obj;
156980                 load_attr.log_level = 0;
156981                 load_attr.target_btf_path = test_case->btf_src_file;
156982                 err = bpf_object__load_xattr(&load_attr);
156983                 if (err) {
156984                         if (!test_case->fails)
156985 -                               CHECK(false, "obj_load", "failed to load prog '%s': %d\n", probe_name, err);
156986 +                               ASSERT_OK(err, "obj_load");
156987                         goto cleanup;
156988                 }
156990 @@ -902,10 +915,8 @@ void test_core_reloc(void)
156991                         goto cleanup;
156992                 }
156994 -               if (test_case->fails) {
156995 -                       CHECK(false, "obj_load_fail", "should fail to load prog '%s'\n", probe_name);
156996 +               if (!ASSERT_FALSE(test_case->fails, "obj_load_should_fail"))
156997                         goto cleanup;
156998 -               }
157000                 equal = memcmp(data->out, test_case->output,
157001                                test_case->output_len) == 0;
157002 diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c
157003 deleted file mode 100644
157004 index dd0ffa518f36..000000000000
157005 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c
157006 +++ /dev/null
157007 @@ -1,3 +0,0 @@
157008 -#include "core_reloc_types.h"
157010 -void f(struct core_reloc_existence___err_wrong_arr_kind x) {}
157011 diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c
157012 deleted file mode 100644
157013 index bc83372088ad..000000000000
157014 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c
157015 +++ /dev/null
157016 @@ -1,3 +0,0 @@
157017 -#include "core_reloc_types.h"
157019 -void f(struct core_reloc_existence___err_wrong_arr_value_type x) {}
157020 diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c
157021 deleted file mode 100644
157022 index 917bec41be08..000000000000
157023 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c
157024 +++ /dev/null
157025 @@ -1,3 +0,0 @@
157026 -#include "core_reloc_types.h"
157028 -void f(struct core_reloc_existence___err_wrong_int_kind x) {}
157029 diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c
157030 deleted file mode 100644
157031 index 6ec7e6ec1c91..000000000000
157032 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c
157033 +++ /dev/null
157034 @@ -1,3 +0,0 @@
157035 -#include "core_reloc_types.h"
157037 -void f(struct core_reloc_existence___err_wrong_int_sz x) {}
157038 diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c
157039 deleted file mode 100644
157040 index 7bbcacf2b0d1..000000000000
157041 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c
157042 +++ /dev/null
157043 @@ -1,3 +0,0 @@
157044 -#include "core_reloc_types.h"
157046 -void f(struct core_reloc_existence___err_wrong_int_type x) {}
157047 diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c
157048 deleted file mode 100644
157049 index f384dd38ec70..000000000000
157050 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c
157051 +++ /dev/null
157052 @@ -1,3 +0,0 @@
157053 -#include "core_reloc_types.h"
157055 -void f(struct core_reloc_existence___err_wrong_struct_type x) {}
157056 diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c
157057 new file mode 100644
157058 index 000000000000..d14b496190c3
157059 --- /dev/null
157060 +++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c
157061 @@ -0,0 +1,3 @@
157062 +#include "core_reloc_types.h"
157064 +void f(struct core_reloc_existence___wrong_field_defs x) {}
157065 diff --git a/tools/testing/selftests/bpf/progs/core_reloc_types.h b/tools/testing/selftests/bpf/progs/core_reloc_types.h
157066 index 9a2850850121..664eea1013aa 100644
157067 --- a/tools/testing/selftests/bpf/progs/core_reloc_types.h
157068 +++ b/tools/testing/selftests/bpf/progs/core_reloc_types.h
157069 @@ -700,27 +700,11 @@ struct core_reloc_existence___minimal {
157070         int a;
157073 -struct core_reloc_existence___err_wrong_int_sz {
157074 -       short a;
157077 -struct core_reloc_existence___err_wrong_int_type {
157078 +struct core_reloc_existence___wrong_field_defs {
157079 +       void *a;
157080         int b[1];
157083 -struct core_reloc_existence___err_wrong_int_kind {
157084         struct{ int x; } c;
157087 -struct core_reloc_existence___err_wrong_arr_kind {
157088         int arr;
157091 -struct core_reloc_existence___err_wrong_arr_value_type {
157092 -       short arr[1];
157095 -struct core_reloc_existence___err_wrong_struct_type {
157096         int s;
157099 diff --git a/tools/testing/selftests/bpf/verifier/array_access.c b/tools/testing/selftests/bpf/verifier/array_access.c
157100 index 1b138cd2b187..1b1c798e9248 100644
157101 --- a/tools/testing/selftests/bpf/verifier/array_access.c
157102 +++ b/tools/testing/selftests/bpf/verifier/array_access.c
157103 @@ -186,7 +186,7 @@
157104         },
157105         .fixup_map_hash_48b = { 3 },
157106         .errstr_unpriv = "R0 leaks addr",
157107 -       .errstr = "invalid access to map value, value_size=48 off=44 size=8",
157108 +       .errstr = "R0 unbounded memory access",
157109         .result_unpriv = REJECT,
157110         .result = REJECT,
157111         .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
157112 diff --git a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
157113 index 6f3a70df63bc..e00435753008 100644
157114 --- a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
157115 +++ b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh
157116 @@ -120,12 +120,13 @@ __mirror_gre_test()
157117         sleep 5
157119         for ((i = 0; i < count; ++i)); do
157120 +               local sip=$(mirror_gre_ipv6_addr 1 $i)::1
157121                 local dip=$(mirror_gre_ipv6_addr 1 $i)::2
157122                 local htun=h3-gt6-$i
157123                 local message
157125                 icmp6_capture_install $htun
157126 -               mirror_test v$h1 "" $dip $htun 100 10
157127 +               mirror_test v$h1 $sip $dip $htun 100 10
157128                 icmp6_capture_uninstall $htun
157129         done
157131 diff --git a/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh
157132 index f813ffefc07e..65f43a7ce9c9 100644
157133 --- a/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh
157134 +++ b/tools/testing/selftests/drivers/net/mlxsw/port_scale.sh
157135 @@ -55,10 +55,6 @@ port_test()
157136               | jq '.[][][] | select(.name=="physical_ports") |.["occ"]')
157138         [[ $occ -eq $max_ports ]]
157139 -       if [[ $should_fail -eq 0 ]]; then
157140 -               check_err $? "Mismatch ports number: Expected $max_ports, got $occ."
157141 -       else
157142 -               check_err_fail $should_fail $? "Reached more ports than expected"
157143 -       fi
157144 +       check_err_fail $should_fail $? "Attempt to create $max_ports ports (actual result $occ)"
157147 diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
157148 index b0cb1aaffdda..33ddd01689be 100644
157149 --- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
157150 +++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
157151 @@ -507,8 +507,8 @@ do_red_test()
157152         check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected == 0."
157153         local diff=$((limit - backlog))
157154         pct=$((100 * diff / limit))
157155 -       ((0 <= pct && pct <= 5))
157156 -       check_err $? "backlog $backlog / $limit expected <= 5% distance"
157157 +       ((0 <= pct && pct <= 10))
157158 +       check_err $? "backlog $backlog / $limit expected <= 10% distance"
157159         log_test "TC $((vlan - 10)): RED backlog > limit"
157161         stop_traffic
157162 diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
157163 index cc0f07e72cf2..aa74be9f47c8 100644
157164 --- a/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
157165 +++ b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
157166 @@ -98,11 +98,7 @@ __tc_flower_test()
157167                         jq -r '[ .[] | select(.kind == "flower") |
157168                         .options | .in_hw ]' | jq .[] | wc -l)
157169         [[ $((offload_count - 1)) -eq $count ]]
157170 -       if [[ $should_fail -eq 0 ]]; then
157171 -               check_err $? "Offload mismatch"
157172 -       else
157173 -               check_err_fail $should_fail $? "Offload more than expacted"
157174 -       fi
157175 +       check_err_fail $should_fail $? "Attempt to offload $count rules (actual result $((offload_count - 1)))"
157178  tc_flower_test()
157179 diff --git a/tools/testing/selftests/futex/functional/.gitignore b/tools/testing/selftests/futex/functional/.gitignore
157180 index 0efcd494daab..af7557e821da 100644
157181 --- a/tools/testing/selftests/futex/functional/.gitignore
157182 +++ b/tools/testing/selftests/futex/functional/.gitignore
157183 @@ -6,3 +6,6 @@ futex_wait_private_mapped_file
157184  futex_wait_timeout
157185  futex_wait_uninitialized_heap
157186  futex_wait_wouldblock
157187 +futex2_wait
157188 +futex2_waitv
157189 +futex2_requeue
157190 diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
157191 index 23207829ec75..3ccb9ea58ddd 100644
157192 --- a/tools/testing/selftests/futex/functional/Makefile
157193 +++ b/tools/testing/selftests/futex/functional/Makefile
157194 @@ -1,10 +1,11 @@
157195  # SPDX-License-Identifier: GPL-2.0
157196 -INCLUDES := -I../include -I../../
157197 +INCLUDES := -I../include -I../../ -I../../../../../usr/include/
157198  CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE -pthread $(INCLUDES)
157199  LDLIBS := -lpthread -lrt
157201  HEADERS := \
157202         ../include/futextest.h \
157203 +       ../include/futex2test.h \
157204         ../include/atomic.h \
157205         ../include/logging.h
157206  TEST_GEN_FILES := \
157207 @@ -14,7 +15,10 @@ TEST_GEN_FILES := \
157208         futex_requeue_pi_signal_restart \
157209         futex_requeue_pi_mismatched_ops \
157210         futex_wait_uninitialized_heap \
157211 -       futex_wait_private_mapped_file
157212 +       futex_wait_private_mapped_file \
157213 +       futex2_wait \
157214 +       futex2_waitv \
157215 +       futex2_requeue
157217  TEST_PROGS := run.sh
157219 diff --git a/tools/testing/selftests/futex/functional/futex2_requeue.c b/tools/testing/selftests/futex/functional/futex2_requeue.c
157220 new file mode 100644
157221 index 000000000000..1bc3704dc8c2
157222 --- /dev/null
157223 +++ b/tools/testing/selftests/futex/functional/futex2_requeue.c
157224 @@ -0,0 +1,164 @@
157225 +// SPDX-License-Identifier: GPL-2.0-or-later
157226 +/******************************************************************************
157228 + *   Copyright Collabora Ltd., 2021
157230 + * DESCRIPTION
157231 + *     Test requeue mechanism of futex2, using 32bit sized futexes.
157233 + * AUTHOR
157234 + *     André Almeida <andrealmeid@collabora.com>
157236 + * HISTORY
157237 + *      2021-Feb-5: Initial version by André <andrealmeid@collabora.com>
157239 + *****************************************************************************/
157241 +#include <errno.h>
157242 +#include <error.h>
157243 +#include <getopt.h>
157244 +#include <stdio.h>
157245 +#include <stdlib.h>
157246 +#include <string.h>
157247 +#include <time.h>
157248 +#include <pthread.h>
157249 +#include <sys/shm.h>
157250 +#include <limits.h>
157251 +#include "futex2test.h"
157252 +#include "logging.h"
157254 +#define TEST_NAME "futex2-wait"
157255 +#define timeout_ns  30000000
157256 +#define WAKE_WAIT_US 10000
157257 +volatile futex_t *f1;
157259 +void usage(char *prog)
157261 +       printf("Usage: %s\n", prog);
157262 +       printf("  -c    Use color\n");
157263 +       printf("  -h    Display this help message\n");
157264 +       printf("  -v L  Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
157265 +              VQUIET, VCRITICAL, VINFO);
157268 +void *waiterfn(void *arg)
157270 +       struct timespec64 to64;
157272 +       /* setting absolute timeout for futex2 */
157273 +       if (gettime64(CLOCK_MONOTONIC, &to64))
157274 +               error("gettime64 failed\n", errno);
157276 +       to64.tv_nsec += timeout_ns;
157278 +       if (to64.tv_nsec >= 1000000000) {
157279 +               to64.tv_sec++;
157280 +               to64.tv_nsec -= 1000000000;
157281 +       }
157283 +       if (futex2_wait(f1, *f1, FUTEX_32, &to64))
157284 +               printf("waiter failed errno %d\n", errno);
157286 +       return NULL;
157289 +int main(int argc, char *argv[])
157291 +       pthread_t waiter[10];
157292 +       int res, ret = RET_PASS;
157293 +       int c, i;
157294 +       volatile futex_t _f1 = 0;
157295 +       volatile futex_t f2 = 0;
157296 +       struct futex_requeue r1, r2;
157298 +       f1 = &_f1;
157300 +       r1.flags = FUTEX_32;
157301 +       r2.flags = FUTEX_32;
157303 +       r1.uaddr = f1;
157304 +       r2.uaddr = &f2;
157306 +       while ((c = getopt(argc, argv, "cht:v:")) != -1) {
157307 +               switch (c) {
157308 +               case 'c':
157309 +                       log_color(1);
157310 +                       break;
157311 +               case 'h':
157312 +                       usage(basename(argv[0]));
157313 +                       exit(0);
157314 +               case 'v':
157315 +                       log_verbosity(atoi(optarg));
157316 +                       break;
157317 +               default:
157318 +                       usage(basename(argv[0]));
157319 +                       exit(1);
157320 +               }
157321 +       }
157323 +       ksft_print_header();
157324 +       ksft_set_plan(2);
157325 +       ksft_print_msg("%s: Test FUTEX2_REQUEUE\n",
157326 +                      basename(argv[0]));
157328 +       /*
157329 +        * Requeue a waiter from f1 to f2, and wake f2.
157330 +        */
157331 +       if (pthread_create(&waiter[0], NULL, waiterfn, NULL))
157332 +               error("pthread_create failed\n", errno);
157334 +       usleep(WAKE_WAIT_US);
157336 +       res = futex2_requeue(&r1, &r2, 0, 1, 0, 0);
157337 +       if (res != 1) {
157338 +               ksft_test_result_fail("futex2_requeue private returned: %d %s\n",
157339 +                                     res ? errno : res,
157340 +                                     res ? strerror(errno) : "");
157341 +               ret = RET_FAIL;
157342 +       }
157345 +       info("Calling private futex2_wake on f2: %u @ %p with val=%u\n", f2, &f2, f2);
157346 +       res = futex2_wake(&f2, 1, FUTEX_32);
157347 +       if (res != 1) {
157348 +               ksft_test_result_fail("futex2_requeue private returned: %d %s\n",
157349 +                                     res ? errno : res,
157350 +                                     res ? strerror(errno) : "");
157351 +               ret = RET_FAIL;
157352 +       } else {
157353 +               ksft_test_result_pass("futex2_requeue simple succeeds\n");
157354 +       }
157357 +       /*
157358 +        * Create 10 waiters at f1. At futex_requeue, wake 3 and requeue 7.
157359 +        * At futex_wake, wake INT_MAX (should be exaclty 7).
157360 +        */
157361 +       for (i = 0; i < 10; i++) {
157362 +               if (pthread_create(&waiter[i], NULL, waiterfn, NULL))
157363 +                       error("pthread_create failed\n", errno);
157364 +       }
157366 +       usleep(WAKE_WAIT_US);
157368 +       res = futex2_requeue(&r1, &r2, 3, 7, 0, 0);
157369 +       if (res != 10) {
157370 +               ksft_test_result_fail("futex2_requeue private returned: %d %s\n",
157371 +                                     res ? errno : res,
157372 +                                     res ? strerror(errno) : "");
157373 +               ret = RET_FAIL;
157374 +       }
157376 +       res = futex2_wake(&f2, INT_MAX, FUTEX_32);
157377 +       if (res != 7) {
157378 +               ksft_test_result_fail("futex2_requeue private returned: %d %s\n",
157379 +                                     res ? errno : res,
157380 +                                     res ? strerror(errno) : "");
157381 +               ret = RET_FAIL;
157382 +       } else {
157383 +               ksft_test_result_pass("futex2_requeue succeeds\n");
157384 +       }
157386 +       ksft_print_cnts();
157387 +       return ret;
157389 diff --git a/tools/testing/selftests/futex/functional/futex2_wait.c b/tools/testing/selftests/futex/functional/futex2_wait.c
157390 new file mode 100644
157391 index 000000000000..4b5416585c79
157392 --- /dev/null
157393 +++ b/tools/testing/selftests/futex/functional/futex2_wait.c
157394 @@ -0,0 +1,209 @@
157395 +// SPDX-License-Identifier: GPL-2.0-or-later
157396 +/******************************************************************************
157398 + *   Copyright Collabora Ltd., 2021
157400 + * DESCRIPTION
157401 + *     Test wait/wake mechanism of futex2, using 32bit sized futexes.
157403 + * AUTHOR
157404 + *     André Almeida <andrealmeid@collabora.com>
157406 + * HISTORY
157407 + *      2021-Feb-5: Initial version by André <andrealmeid@collabora.com>
157409 + *****************************************************************************/
157411 +#include <errno.h>
157412 +#include <error.h>
157413 +#include <getopt.h>
157414 +#include <stdio.h>
157415 +#include <stdlib.h>
157416 +#include <string.h>
157417 +#include <time.h>
157418 +#include <pthread.h>
157419 +#include <sys/shm.h>
157420 +#include <sys/mman.h>
157421 +#include <fcntl.h>
157422 +#include <string.h>
157423 +#include "futex2test.h"
157424 +#include "logging.h"
157426 +#define TEST_NAME "futex2-wait"
157427 +#define timeout_ns  30000000
157428 +#define WAKE_WAIT_US 10000
157429 +#define SHM_PATH "futex2_shm_file"
157430 +futex_t *f1;
157432 +void usage(char *prog)
157434 +       printf("Usage: %s\n", prog);
157435 +       printf("  -c    Use color\n");
157436 +       printf("  -h    Display this help message\n");
157437 +       printf("  -v L  Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
157438 +              VQUIET, VCRITICAL, VINFO);
157441 +void *waiterfn(void *arg)
157443 +       struct timespec64 to64;
157444 +       unsigned int flags = 0;
157446 +       if (arg)
157447 +               flags = *((unsigned int *) arg);
157449 +       /* setting absolute timeout for futex2 */
157450 +       if (gettime64(CLOCK_MONOTONIC, &to64))
157451 +               error("gettime64 failed\n", errno);
157453 +       to64.tv_nsec += timeout_ns;
157455 +       if (to64.tv_nsec >= 1000000000) {
157456 +               to64.tv_sec++;
157457 +               to64.tv_nsec -= 1000000000;
157458 +       }
157460 +       if (futex2_wait(f1, *f1, FUTEX_32 | flags, &to64))
157461 +               printf("waiter failed errno %d\n", errno);
157463 +       return NULL;
157466 +void *waitershm(void *arg)
157468 +       futex2_wait(arg, 0, FUTEX_32 | FUTEX_SHARED_FLAG, NULL);
157470 +       return NULL;
157473 +int main(int argc, char *argv[])
157475 +       pthread_t waiter;
157476 +       unsigned int flags = FUTEX_SHARED_FLAG;
157477 +       int res, ret = RET_PASS;
157478 +       int c;
157479 +       futex_t f_private = 0;
157481 +       f1 = &f_private;
157483 +       while ((c = getopt(argc, argv, "cht:v:")) != -1) {
157484 +               switch (c) {
157485 +               case 'c':
157486 +                       log_color(1);
157487 +                       break;
157488 +               case 'h':
157489 +                       usage(basename(argv[0]));
157490 +                       exit(0);
157491 +               case 'v':
157492 +                       log_verbosity(atoi(optarg));
157493 +                       break;
157494 +               default:
157495 +                       usage(basename(argv[0]));
157496 +                       exit(1);
157497 +               }
157498 +       }
157500 +       ksft_print_header();
157501 +       ksft_set_plan(3);
157502 +       ksft_print_msg("%s: Test FUTEX2_WAIT\n",
157503 +                      basename(argv[0]));
157505 +       /* Testing a private futex */
157506 +       info("Calling private futex2_wait on f1: %u @ %p with val=%u\n", *f1, f1, *f1);
157508 +       if (pthread_create(&waiter, NULL, waiterfn, NULL))
157509 +               error("pthread_create failed\n", errno);
157511 +       usleep(WAKE_WAIT_US);
157513 +       info("Calling private futex2_wake on f1: %u @ %p with val=%u\n", *f1, f1, *f1);
157514 +       res = futex2_wake(f1, 1, FUTEX_32);
157515 +       if (res != 1) {
157516 +               ksft_test_result_fail("futex2_wake private returned: %d %s\n",
157517 +                                     res ? errno : res,
157518 +                                     res ? strerror(errno) : "");
157519 +               ret = RET_FAIL;
157520 +       } else {
157521 +               ksft_test_result_pass("futex2_wake private succeeds\n");
157522 +       }
157524 +       int shm_id = shmget(IPC_PRIVATE, 4096, IPC_CREAT | 0666);
157526 +       if (shm_id < 0) {
157527 +               perror("shmget");
157528 +               exit(1);
157529 +       }
157531 +       /* Testing an anon page shared memory */
157532 +       unsigned int *shared_data = shmat(shm_id, NULL, 0);
157534 +       *shared_data = 0;
157535 +       f1 = shared_data;
157537 +       info("Calling shared futex2_wait on f1: %u @ %p with val=%u\n", *f1, f1, *f1);
157539 +       if (pthread_create(&waiter, NULL, waiterfn, &flags))
157540 +               error("pthread_create failed\n", errno);
157542 +       usleep(WAKE_WAIT_US);
157544 +       info("Calling shared futex2_wake on f1: %u @ %p with val=%u\n", *f1, f1, *f1);
157545 +       res = futex2_wake(f1, 1, FUTEX_32 | FUTEX_SHARED_FLAG);
157546 +       if (res != 1) {
157547 +               ksft_test_result_fail("futex2_wake shared (shmget) returned: %d %s\n",
157548 +                                     res ? errno : res,
157549 +                                     res ? strerror(errno) : "");
157550 +               ret = RET_FAIL;
157551 +       } else {
157552 +               ksft_test_result_pass("futex2_wake shared (shmget) succeeds\n");
157553 +       }
157555 +       shmdt(shared_data);
157557 +       /* Testing a file backed shared memory */
157558 +       void *shm;
157559 +       int fd, pid;
157561 +       f_private = 0;
157563 +       fd = open(SHM_PATH, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
157564 +       if (fd < 0) {
157565 +               perror("open");
157566 +               exit(1);
157567 +       }
157569 +       res = ftruncate(fd, sizeof(f_private));
157570 +       if (res) {
157571 +               perror("ftruncate");
157572 +               exit(1);
157573 +       }
157575 +       shm = mmap(NULL, sizeof(f_private), PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
157576 +       if (shm == MAP_FAILED) {
157577 +               perror("mmap");
157578 +               exit(1);
157579 +       }
157581 +       memcpy(shm, &f_private, sizeof(f_private));
157583 +       pthread_create(&waiter, NULL, waitershm, shm);
157585 +       usleep(WAKE_WAIT_US);
157587 +       res = futex2_wake(shm, 1, FUTEX_32 | FUTEX_SHARED_FLAG);
157588 +       if (res != 1) {
157589 +               ksft_test_result_fail("futex2_wake shared (mmap) returned: %d %s\n",
157590 +                                     res ? errno : res,
157591 +                                     res ? strerror(errno) : "");
157592 +               ret = RET_FAIL;
157593 +       } else {
157594 +               ksft_test_result_pass("futex2_wake shared (mmap) succeeds\n");
157595 +       }
157597 +       munmap(shm, sizeof(f_private));
157599 +       remove(SHM_PATH);
157601 +       ksft_print_cnts();
157602 +       return ret;
157604 diff --git a/tools/testing/selftests/futex/functional/futex2_waitv.c b/tools/testing/selftests/futex/functional/futex2_waitv.c
157605 new file mode 100644
157606 index 000000000000..2f81d296d95d
157607 --- /dev/null
157608 +++ b/tools/testing/selftests/futex/functional/futex2_waitv.c
157609 @@ -0,0 +1,157 @@
157610 +// SPDX-License-Identifier: GPL-2.0-or-later
157611 +/******************************************************************************
157613 + *   Copyright Collabora Ltd., 2021
157615 + * DESCRIPTION
157616 + *     Test waitv/wake mechanism of futex2, using 32bit sized futexes.
157618 + * AUTHOR
157619 + *     André Almeida <andrealmeid@collabora.com>
157621 + * HISTORY
157622 + *      2021-Feb-5: Initial version by André <andrealmeid@collabora.com>
157624 + *****************************************************************************/
157626 +#include <errno.h>
157627 +#include <error.h>
157628 +#include <getopt.h>
157629 +#include <stdio.h>
157630 +#include <stdlib.h>
157631 +#include <string.h>
157632 +#include <time.h>
157633 +#include <pthread.h>
157634 +#include <sys/shm.h>
157635 +#include "futex2test.h"
157636 +#include "logging.h"
157638 +#define TEST_NAME "futex2-wait"
157639 +#define timeout_ns  1000000000
157640 +#define WAKE_WAIT_US 10000
157641 +#define NR_FUTEXES 30
157642 +struct futex_waitv waitv[NR_FUTEXES];
157643 +u_int32_t futexes[NR_FUTEXES] = {0};
157645 +void usage(char *prog)
157647 +       printf("Usage: %s\n", prog);
157648 +       printf("  -c    Use color\n");
157649 +       printf("  -h    Display this help message\n");
157650 +       printf("  -v L  Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
157651 +              VQUIET, VCRITICAL, VINFO);
157654 +void *waiterfn(void *arg)
157656 +       struct timespec64 to64;
157657 +       int res;
157659 +       /* setting absolute timeout for futex2 */
157660 +       if (gettime64(CLOCK_MONOTONIC, &to64))
157661 +               error("gettime64 failed\n", errno);
157663 +       to64.tv_sec++;
157665 +       res = futex2_waitv(waitv, NR_FUTEXES, 0, &to64);
157666 +       if (res < 0) {
157667 +               ksft_test_result_fail("futex2_waitv private returned: %d %s\n",
157668 +                                     res ? errno : res,
157669 +                                     res ? strerror(errno) : "");
157670 +       } else if (res != NR_FUTEXES - 1) {
157671 +               ksft_test_result_fail("futex2_waitv private returned: %d %s\n",
157672 +                                     res ? errno : res,
157673 +                                     res ? strerror(errno) : "");
157674 +       }
157676 +       return NULL;
157679 +int main(int argc, char *argv[])
157681 +       pthread_t waiter;
157682 +       int res, ret = RET_PASS;
157683 +       int c, i;
157685 +       while ((c = getopt(argc, argv, "cht:v:")) != -1) {
157686 +               switch (c) {
157687 +               case 'c':
157688 +                       log_color(1);
157689 +                       break;
157690 +               case 'h':
157691 +                       usage(basename(argv[0]));
157692 +                       exit(0);
157693 +               case 'v':
157694 +                       log_verbosity(atoi(optarg));
157695 +                       break;
157696 +               default:
157697 +                       usage(basename(argv[0]));
157698 +                       exit(1);
157699 +               }
157700 +       }
157702 +       ksft_print_header();
157703 +       ksft_set_plan(2);
157704 +       ksft_print_msg("%s: Test FUTEX2_WAITV\n",
157705 +                      basename(argv[0]));
157707 +       for (i = 0; i < NR_FUTEXES; i++) {
157708 +               waitv[i].uaddr = &futexes[i];
157709 +               waitv[i].flags = FUTEX_32;
157710 +               waitv[i].val = 0;
157711 +       }
157713 +       /* Private waitv */
157714 +       if (pthread_create(&waiter, NULL, waiterfn, NULL))
157715 +               error("pthread_create failed\n", errno);
157717 +       usleep(WAKE_WAIT_US);
157719 +       res = futex2_wake(waitv[NR_FUTEXES - 1].uaddr, 1, FUTEX_32);
157720 +       if (res != 1) {
157721 +               ksft_test_result_fail("futex2_waitv private returned: %d %s\n",
157722 +                                     res ? errno : res,
157723 +                                     res ? strerror(errno) : "");
157724 +               ret = RET_FAIL;
157725 +       } else {
157726 +               ksft_test_result_pass("futex2_waitv private succeeds\n");
157727 +       }
157729 +       /* Shared waitv */
157730 +       for (i = 0; i < NR_FUTEXES; i++) {
157731 +               int shm_id = shmget(IPC_PRIVATE, 4096, IPC_CREAT | 0666);
157733 +               if (shm_id < 0) {
157734 +                       perror("shmget");
157735 +                       exit(1);
157736 +               }
157738 +               unsigned int *shared_data = shmat(shm_id, NULL, 0);
157740 +               *shared_data = 0;
157741 +               waitv[i].uaddr = shared_data;
157742 +               waitv[i].flags = FUTEX_32 | FUTEX_SHARED_FLAG;
157743 +               waitv[i].val = 0;
157744 +       }
157746 +       if (pthread_create(&waiter, NULL, waiterfn, NULL))
157747 +               error("pthread_create failed\n", errno);
157749 +       usleep(WAKE_WAIT_US);
157751 +       res = futex2_wake(waitv[NR_FUTEXES - 1].uaddr, 1, FUTEX_32 | FUTEX_SHARED_FLAG);
157752 +       if (res != 1) {
157753 +               ksft_test_result_fail("futex2_waitv shared returned: %d %s\n",
157754 +                                     res ? errno : res,
157755 +                                     res ? strerror(errno) : "");
157756 +               ret = RET_FAIL;
157757 +       } else {
157758 +               ksft_test_result_pass("futex2_waitv shared succeeds\n");
157759 +       }
157761 +       for (i = 0; i < NR_FUTEXES; i++)
157762 +               shmdt(waitv[i].uaddr);
157764 +       ksft_print_cnts();
157765 +       return ret;
157767 diff --git a/tools/testing/selftests/futex/functional/futex_wait_timeout.c b/tools/testing/selftests/futex/functional/futex_wait_timeout.c
157768 index ee55e6d389a3..b4dffe9e3b44 100644
157769 --- a/tools/testing/selftests/futex/functional/futex_wait_timeout.c
157770 +++ b/tools/testing/selftests/futex/functional/futex_wait_timeout.c
157771 @@ -11,6 +11,7 @@
157772   *
157773   * HISTORY
157774   *      2009-Nov-6: Initial version by Darren Hart <dvhart@linux.intel.com>
157775 + *      2021-Feb-5: Add futex2 test by André <andrealmeid@collabora.com>
157776   *
157777   *****************************************************************************/
157779 @@ -20,7 +21,7 @@
157780  #include <stdlib.h>
157781  #include <string.h>
157782  #include <time.h>
157783 -#include "futextest.h"
157784 +#include "futex2test.h"
157785  #include "logging.h"
157787  #define TEST_NAME "futex-wait-timeout"
157788 @@ -40,7 +41,8 @@ void usage(char *prog)
157789  int main(int argc, char *argv[])
157791         futex_t f1 = FUTEX_INITIALIZER;
157792 -       struct timespec to;
157793 +       struct timespec to = {.tv_sec = 0, .tv_nsec = timeout_ns};
157794 +       struct timespec64 to64;
157795         int res, ret = RET_PASS;
157796         int c;
157798 @@ -65,22 +67,60 @@ int main(int argc, char *argv[])
157799         }
157801         ksft_print_header();
157802 -       ksft_set_plan(1);
157803 +       ksft_set_plan(3);
157804         ksft_print_msg("%s: Block on a futex and wait for timeout\n",
157805                basename(argv[0]));
157806         ksft_print_msg("\tArguments: timeout=%ldns\n", timeout_ns);
157808 -       /* initialize timeout */
157809 -       to.tv_sec = 0;
157810 -       to.tv_nsec = timeout_ns;
157812         info("Calling futex_wait on f1: %u @ %p\n", f1, &f1);
157813         res = futex_wait(&f1, f1, &to, FUTEX_PRIVATE_FLAG);
157814         if (!res || errno != ETIMEDOUT) {
157815 -               fail("futex_wait returned %d\n", ret < 0 ? errno : ret);
157816 +               ksft_test_result_fail("futex_wait returned %d\n", ret < 0 ? errno : ret);
157817 +               ret = RET_FAIL;
157818 +       } else {
157819 +               ksft_test_result_pass("futex_wait timeout succeeds\n");
157820 +       }
157822 +       /* setting absolute monotonic timeout for futex2 */
157823 +       if (gettime64(CLOCK_MONOTONIC, &to64))
157824 +               error("gettime64 failed\n", errno);
157826 +       to64.tv_nsec += timeout_ns;
157828 +       if (to64.tv_nsec >= 1000000000) {
157829 +               to64.tv_sec++;
157830 +               to64.tv_nsec -= 1000000000;
157831 +       }
157833 +       info("Calling futex2_wait on f1: %u @ %p\n", f1, &f1);
157834 +       res = futex2_wait(&f1, f1, FUTEX_32, &to64);
157835 +       if (!res || errno != ETIMEDOUT) {
157836 +               ksft_test_result_fail("futex2_wait monotonic returned %d\n", ret < 0 ? errno : ret);
157837 +               ret = RET_FAIL;
157838 +       } else {
157839 +               ksft_test_result_pass("futex2_wait monotonic timeout succeeds\n");
157840 +       }
157842 +       /* setting absolute realtime timeout for futex2 */
157843 +       if (gettime64(CLOCK_REALTIME, &to64))
157844 +               error("gettime64 failed\n", errno);
157846 +       to64.tv_nsec += timeout_ns;
157848 +       if (to64.tv_nsec >= 1000000000) {
157849 +               to64.tv_sec++;
157850 +               to64.tv_nsec -= 1000000000;
157851 +       }
157853 +       info("Calling futex2_wait on f1: %u @ %p\n", f1, &f1);
157854 +       res = futex2_wait(&f1, f1, FUTEX_32 | FUTEX_CLOCK_REALTIME, &to64);
157855 +       if (!res || errno != ETIMEDOUT) {
157856 +               ksft_test_result_fail("futex2_wait realtime returned %d\n", ret < 0 ? errno : ret);
157857                 ret = RET_FAIL;
157858 +       } else {
157859 +               ksft_test_result_pass("futex2_wait realtime timeout succeeds\n");
157860         }
157862 -       print_result(TEST_NAME, ret);
157863 +       ksft_print_cnts();
157864         return ret;
157866 diff --git a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
157867 index 0ae390ff8164..ed3660090907 100644
157868 --- a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
157869 +++ b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
157870 @@ -12,6 +12,7 @@
157871   *
157872   * HISTORY
157873   *      2009-Nov-14: Initial version by Gowrishankar <gowrishankar.m@in.ibm.com>
157874 + *      2021-Feb-5: Add futex2 test by André <andrealmeid@collabora.com>
157875   *
157876   *****************************************************************************/
157878 @@ -21,7 +22,7 @@
157879  #include <stdlib.h>
157880  #include <string.h>
157881  #include <time.h>
157882 -#include "futextest.h"
157883 +#include "futex2test.h"
157884  #include "logging.h"
157886  #define TEST_NAME "futex-wait-wouldblock"
157887 @@ -39,6 +40,7 @@ void usage(char *prog)
157888  int main(int argc, char *argv[])
157890         struct timespec to = {.tv_sec = 0, .tv_nsec = timeout_ns};
157891 +       struct timespec64 to64;
157892         futex_t f1 = FUTEX_INITIALIZER;
157893         int res, ret = RET_PASS;
157894         int c;
157895 @@ -61,18 +63,41 @@ int main(int argc, char *argv[])
157896         }
157898         ksft_print_header();
157899 -       ksft_set_plan(1);
157900 +       ksft_set_plan(2);
157901         ksft_print_msg("%s: Test the unexpected futex value in FUTEX_WAIT\n",
157902                basename(argv[0]));
157904         info("Calling futex_wait on f1: %u @ %p with val=%u\n", f1, &f1, f1+1);
157905         res = futex_wait(&f1, f1+1, &to, FUTEX_PRIVATE_FLAG);
157906         if (!res || errno != EWOULDBLOCK) {
157907 -               fail("futex_wait returned: %d %s\n",
157908 +               ksft_test_result_fail("futex_wait returned: %d %s\n",
157909                      res ? errno : res, res ? strerror(errno) : "");
157910                 ret = RET_FAIL;
157911 +       } else {
157912 +               ksft_test_result_pass("futex_wait wouldblock succeeds\n");
157913         }
157915 -       print_result(TEST_NAME, ret);
157916 +       /* setting absolute timeout for futex2 */
157917 +       if (gettime64(CLOCK_MONOTONIC, &to64))
157918 +               error("gettime64 failed\n", errno);
157920 +       to64.tv_nsec += timeout_ns;
157922 +       if (to64.tv_nsec >= 1000000000) {
157923 +               to64.tv_sec++;
157924 +               to64.tv_nsec -= 1000000000;
157925 +       }
157927 +       info("Calling futex2_wait on f1: %u @ %p with val=%u\n", f1, &f1, f1+1);
157928 +       res = futex2_wait(&f1, f1+1, FUTEX_32, &to64);
157929 +       if (!res || errno != EWOULDBLOCK) {
157930 +               ksft_test_result_fail("futex2_wait returned: %d %s\n",
157931 +                    res ? errno : res, res ? strerror(errno) : "");
157932 +               ret = RET_FAIL;
157933 +       } else {
157934 +               ksft_test_result_pass("futex2_wait wouldblock succeeds\n");
157935 +       }
157937 +       ksft_print_cnts();
157938         return ret;
157940 diff --git a/tools/testing/selftests/futex/functional/run.sh b/tools/testing/selftests/futex/functional/run.sh
157941 index 1acb6ace1680..18b3883d7236 100755
157942 --- a/tools/testing/selftests/futex/functional/run.sh
157943 +++ b/tools/testing/selftests/futex/functional/run.sh
157944 @@ -73,3 +73,9 @@ echo
157945  echo
157946  ./futex_wait_uninitialized_heap $COLOR
157947  ./futex_wait_private_mapped_file $COLOR
157949 +echo
157950 +./futex2_wait $COLOR
157952 +echo
157953 +./futex2_waitv $COLOR
157954 diff --git a/tools/testing/selftests/futex/include/futex2test.h b/tools/testing/selftests/futex/include/futex2test.h
157955 new file mode 100644
157956 index 000000000000..e2635006b1a9
157957 --- /dev/null
157958 +++ b/tools/testing/selftests/futex/include/futex2test.h
157959 @@ -0,0 +1,121 @@
157960 +/* SPDX-License-Identifier: GPL-2.0-or-later */
157961 +/******************************************************************************
157963 + *   Copyright Collabora Ltd., 2021
157965 + * DESCRIPTION
157966 + *     Futex2 library addons for old futex library
157968 + * AUTHOR
157969 + *     André Almeida <andrealmeid@collabora.com>
157971 + * HISTORY
157972 + *      2021-Feb-5: Initial version by André <andrealmeid@collabora.com>
157974 + *****************************************************************************/
157975 +#include "futextest.h"
157976 +#include <stdio.h>
157978 +#define NSEC_PER_SEC   1000000000L
157980 +#ifndef FUTEX_8
157981 +# define FUTEX_8       0
157982 +#endif
157983 +#ifndef FUTEX_16
157984 +# define FUTEX_16      1
157985 +#endif
157986 +#ifndef FUTEX_32
157987 +# define FUTEX_32      2
157988 +#endif
157990 +#ifndef FUTEX_SHARED_FLAG
157991 +#define FUTEX_SHARED_FLAG 8
157992 +#endif
157994 +#ifndef FUTEX_WAITV_MAX
157995 +#define FUTEX_WAITV_MAX 128
157996 +struct futex_waitv {
157997 +       void *uaddr;
157998 +       unsigned int val;
157999 +       unsigned int flags;
158001 +#endif
158004 + * - Y2038 section for 32-bit applications -
158006 + * Remove this when glibc is ready for y2038. Then, always compile with
158007 + * `-DTIME_BITS=64` or `-D__USE_TIME_BITS64`. glibc will provide both
158008 + * timespec64 and clock_gettime64 so we won't need to define here.
158009 + */
158010 +#if defined(__i386__) || __TIMESIZE == 32
158011 +# define NR_gettime __NR_clock_gettime64
158012 +#else
158013 +# define NR_gettime __NR_clock_gettime
158014 +#endif
158016 +struct timespec64 {
158017 +       long long tv_sec;       /* seconds */
158018 +       long long tv_nsec;      /* nanoseconds */
158021 +int gettime64(clock_t clockid, struct timespec64 *tv)
158023 +       return syscall(NR_gettime, clockid, tv);
158026 + * - End of Y2038 section -
158027 + */
158030 + * futex2_wait - If (*uaddr == val), wait at uaddr until timo
158031 + * @uaddr: User address to wait on
158032 + * @val:   Expected value at uaddr, return if is not equal
158033 + * @flags: Operation flags
158034 + * @timo:  Optional timeout for operation
158035 + */
158036 +static inline int futex2_wait(volatile void *uaddr, unsigned long val,
158037 +                             unsigned long flags, struct timespec64 *timo)
158039 +       return syscall(__NR_futex_wait, uaddr, val, flags, timo);
158043 + * futex2_wake - Wake a number of waiters at uaddr
158044 + * @uaddr: Address to wake
158045 + * @nr:    Number of waiters to wake
158046 + * @flags: Operation flags
158047 + */
158048 +static inline int futex2_wake(volatile void *uaddr, unsigned int nr, unsigned long flags)
158050 +       return syscall(__NR_futex_wake, uaddr, nr, flags);
158054 + * futex2_waitv - Wait at multiple futexes, wake on any
158055 + * @waiters:    Array of waiters
158056 + * @nr_waiters: Length of waiters array
158057 + * @flags: Operation flags
158058 + * @timo:  Optional timeout for operation
158059 + */
158060 +static inline int futex2_waitv(volatile struct futex_waitv *waiters, unsigned long nr_waiters,
158061 +                             unsigned long flags, struct timespec64 *timo)
158063 +       return syscall(__NR_futex_waitv, waiters, nr_waiters, flags, timo);
158067 + * futex2_requeue - Wake futexes at uaddr1 and requeue from uaddr1 to uaddr2
158068 + * @uaddr1:     Original address to wake and requeue from
158069 + * @uaddr2:     Address to requeue to
158070 + * @nr_wake:    Number of futexes to wake at uaddr1 before requeuing
158071 + * @nr_requeue: Number of futexes to requeue from uaddr1 to uaddr2
158072 + * @cmpval:     If (uaddr1->uaddr != cmpval), return immediatally
158073 + * @flgas:      Operation flags
158074 + */
158075 +static inline int futex2_requeue(struct futex_requeue *uaddr1, struct futex_requeue *uaddr2,
158076 +                                unsigned int nr_wake, unsigned int nr_requeue,
158077 +                                unsigned int cmpval, unsigned long flags)
158079 +       return syscall(__NR_futex_requeue, uaddr1, uaddr2, nr_wake, nr_requeue, cmpval, flags);
158081 diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
158082 index bb2752d78fe3..81edbd23d371 100644
158083 --- a/tools/testing/selftests/kvm/dirty_log_test.c
158084 +++ b/tools/testing/selftests/kvm/dirty_log_test.c
158085 @@ -17,6 +17,7 @@
158086  #include <linux/bitmap.h>
158087  #include <linux/bitops.h>
158088  #include <asm/barrier.h>
158089 +#include <linux/atomic.h>
158091  #include "kvm_util.h"
158092  #include "test_util.h"
158093 @@ -137,12 +138,20 @@ static uint64_t host_clear_count;
158094  static uint64_t host_track_next_count;
158096  /* Whether dirty ring reset is requested, or finished */
158097 -static sem_t dirty_ring_vcpu_stop;
158098 -static sem_t dirty_ring_vcpu_cont;
158099 +static sem_t sem_vcpu_stop;
158100 +static sem_t sem_vcpu_cont;
158102 + * This is only set by main thread, and only cleared by vcpu thread.  It is
158103 + * used to request vcpu thread to stop at the next GUEST_SYNC, since GUEST_SYNC
158104 + * is the only place that we'll guarantee both "dirty bit" and "dirty data"
158105 + * will match.  E.g., SIG_IPI won't guarantee that if the vcpu is interrupted
158106 + * after setting dirty bit but before the data is written.
158107 + */
158108 +static atomic_t vcpu_sync_stop_requested;
158110   * This is updated by the vcpu thread to tell the host whether it's a
158111   * ring-full event.  It should only be read until a sem_wait() of
158112 - * dirty_ring_vcpu_stop and before vcpu continues to run.
158113 + * sem_vcpu_stop and before vcpu continues to run.
158114   */
158115  static bool dirty_ring_vcpu_ring_full;
158117 @@ -234,6 +243,17 @@ static void clear_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
158118         kvm_vm_clear_dirty_log(vm, slot, bitmap, 0, num_pages);
158121 +/* Should only be called after a GUEST_SYNC */
158122 +static void vcpu_handle_sync_stop(void)
158124 +       if (atomic_read(&vcpu_sync_stop_requested)) {
158125 +               /* It means main thread is sleeping waiting */
158126 +               atomic_set(&vcpu_sync_stop_requested, false);
158127 +               sem_post(&sem_vcpu_stop);
158128 +               sem_wait_until(&sem_vcpu_cont);
158129 +       }
158132  static void default_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
158134         struct kvm_run *run = vcpu_state(vm, VCPU_ID);
158135 @@ -244,6 +264,8 @@ static void default_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
158136         TEST_ASSERT(get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC,
158137                     "Invalid guest sync status: exit_reason=%s\n",
158138                     exit_reason_str(run->exit_reason));
158140 +       vcpu_handle_sync_stop();
158143  static bool dirty_ring_supported(void)
158144 @@ -301,13 +323,13 @@ static void dirty_ring_wait_vcpu(void)
158146         /* This makes sure that hardware PML cache flushed */
158147         vcpu_kick();
158148 -       sem_wait_until(&dirty_ring_vcpu_stop);
158149 +       sem_wait_until(&sem_vcpu_stop);
158152  static void dirty_ring_continue_vcpu(void)
158154         pr_info("Notifying vcpu to continue\n");
158155 -       sem_post(&dirty_ring_vcpu_cont);
158156 +       sem_post(&sem_vcpu_cont);
158159  static void dirty_ring_collect_dirty_pages(struct kvm_vm *vm, int slot,
158160 @@ -361,11 +383,11 @@ static void dirty_ring_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
158161                 /* Update the flag first before pause */
158162                 WRITE_ONCE(dirty_ring_vcpu_ring_full,
158163                            run->exit_reason == KVM_EXIT_DIRTY_RING_FULL);
158164 -               sem_post(&dirty_ring_vcpu_stop);
158165 +               sem_post(&sem_vcpu_stop);
158166                 pr_info("vcpu stops because %s...\n",
158167                         dirty_ring_vcpu_ring_full ?
158168                         "dirty ring is full" : "vcpu is kicked out");
158169 -               sem_wait_until(&dirty_ring_vcpu_cont);
158170 +               sem_wait_until(&sem_vcpu_cont);
158171                 pr_info("vcpu continues now.\n");
158172         } else {
158173                 TEST_ASSERT(false, "Invalid guest sync status: "
158174 @@ -377,7 +399,7 @@ static void dirty_ring_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
158175  static void dirty_ring_before_vcpu_join(void)
158177         /* Kick another round of vcpu just to make sure it will quit */
158178 -       sem_post(&dirty_ring_vcpu_cont);
158179 +       sem_post(&sem_vcpu_cont);
158182  struct log_mode {
158183 @@ -505,9 +527,8 @@ static void *vcpu_worker(void *data)
158184          */
158185         sigmask->len = 8;
158186         pthread_sigmask(0, NULL, sigset);
158187 +       sigdelset(sigset, SIG_IPI);
158188         vcpu_ioctl(vm, VCPU_ID, KVM_SET_SIGNAL_MASK, sigmask);
158189 -       sigaddset(sigset, SIG_IPI);
158190 -       pthread_sigmask(SIG_BLOCK, sigset, NULL);
158192         sigemptyset(sigset);
158193         sigaddset(sigset, SIG_IPI);
158194 @@ -768,7 +789,25 @@ static void run_test(enum vm_guest_mode mode, void *arg)
158195                 usleep(p->interval * 1000);
158196                 log_mode_collect_dirty_pages(vm, TEST_MEM_SLOT_INDEX,
158197                                              bmap, host_num_pages);
158199 +               /*
158200 +                * See vcpu_sync_stop_requested definition for details on why
158201 +                * we need to stop vcpu when verify data.
158202 +                */
158203 +               atomic_set(&vcpu_sync_stop_requested, true);
158204 +               sem_wait_until(&sem_vcpu_stop);
158205 +               /*
158206 +                * NOTE: for dirty ring, it's possible that we didn't stop at
158207 +                * GUEST_SYNC but instead we stopped because ring is full;
158208 +                * that's okay too because ring full means we're only missing
158209 +                * the flush of the last page, and since we handle the last
158210 +                * page specially verification will succeed anyway.
158211 +                */
158212 +               assert(host_log_mode == LOG_MODE_DIRTY_RING ||
158213 +                      atomic_read(&vcpu_sync_stop_requested) == false);
158214                 vm_dirty_log_verify(mode, bmap);
158215 +               sem_post(&sem_vcpu_cont);
158217                 iteration++;
158218                 sync_global_to_guest(vm, iteration);
158219         }
158220 @@ -818,9 +857,10 @@ int main(int argc, char *argv[])
158221                 .interval = TEST_HOST_LOOP_INTERVAL,
158222         };
158223         int opt, i;
158224 +       sigset_t sigset;
158226 -       sem_init(&dirty_ring_vcpu_stop, 0, 0);
158227 -       sem_init(&dirty_ring_vcpu_cont, 0, 0);
158228 +       sem_init(&sem_vcpu_stop, 0, 0);
158229 +       sem_init(&sem_vcpu_cont, 0, 0);
158231         guest_modes_append_default();
158233 @@ -876,6 +916,11 @@ int main(int argc, char *argv[])
158235         srandom(time(0));
158237 +       /* Ensure that vCPU threads start with SIG_IPI blocked.  */
158238 +       sigemptyset(&sigset);
158239 +       sigaddset(&sigset, SIG_IPI);
158240 +       pthread_sigmask(SIG_BLOCK, &sigset, NULL);
158242         if (host_log_mode_option == LOG_MODE_ALL) {
158243                 /* Run each log mode */
158244                 for (i = 0; i < LOG_MODE_NUM; i++) {
158245 diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
158246 index a5ce26d548e4..0af84ad48aa7 100644
158247 --- a/tools/testing/selftests/lib.mk
158248 +++ b/tools/testing/selftests/lib.mk
158249 @@ -1,6 +1,10 @@
158250  # This mimics the top-level Makefile. We do it explicitly here so that this
158251  # Makefile can operate with or without the kbuild infrastructure.
158252 +ifneq ($(LLVM),)
158253 +CC := clang
158254 +else
158255  CC := $(CROSS_COMPILE)gcc
158256 +endif
158258  ifeq (0,$(MAKELEVEL))
158259      ifeq ($(OUTPUT),)
158260 @@ -74,7 +78,8 @@ ifdef building_out_of_srctree
158261                 rsync -aq $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \
158262         fi
158263         @if [ "X$(TEST_PROGS)" != "X" ]; then \
158264 -               $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(OUTPUT)/$(TEST_PROGS)) ; \
158265 +               $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) \
158266 +                                 $(addprefix $(OUTPUT)/,$(TEST_PROGS))) ; \
158267         else \
158268                 $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS)); \
158269         fi
158270 diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
158271 index c02291e9841e..880e3ab9d088 100755
158272 --- a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
158273 +++ b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
158274 @@ -271,7 +271,7 @@ test_span_gre_fdb_roaming()
158276         while ((RET == 0)); do
158277                 bridge fdb del dev $swp3 $h3mac vlan 555 master 2>/dev/null
158278 -               bridge fdb add dev $swp2 $h3mac vlan 555 master
158279 +               bridge fdb add dev $swp2 $h3mac vlan 555 master static
158280                 sleep 1
158281                 fail_test_span_gre_dir $tundev ingress
158283 diff --git a/tools/testing/selftests/net/forwarding/mirror_lib.sh b/tools/testing/selftests/net/forwarding/mirror_lib.sh
158284 index 13db1cb50e57..6406cd76a19d 100644
158285 --- a/tools/testing/selftests/net/forwarding/mirror_lib.sh
158286 +++ b/tools/testing/selftests/net/forwarding/mirror_lib.sh
158287 @@ -20,6 +20,13 @@ mirror_uninstall()
158288         tc filter del dev $swp1 $direction pref 1000
158291 +is_ipv6()
158293 +       local addr=$1; shift
158295 +       [[ -z ${addr//[0-9a-fA-F:]/} ]]
158298  mirror_test()
158300         local vrf_name=$1; shift
158301 @@ -29,9 +36,17 @@ mirror_test()
158302         local pref=$1; shift
158303         local expect=$1; shift
158305 +       if is_ipv6 $dip; then
158306 +               local proto=-6
158307 +               local type="icmp6 type=128" # Echo request.
158308 +       else
158309 +               local proto=
158310 +               local type="icmp echoreq"
158311 +       fi
158313         local t0=$(tc_rule_stats_get $dev $pref)
158314 -       $MZ $vrf_name ${sip:+-A $sip} -B $dip -a own -b bc -q \
158315 -           -c 10 -d 100msec -t icmp type=8
158316 +       $MZ $proto $vrf_name ${sip:+-A $sip} -B $dip -a own -b bc -q \
158317 +           -c 10 -d 100msec -t $type
158318         sleep 0.5
158319         local t1=$(tc_rule_stats_get $dev $pref)
158320         local delta=$((t1 - t0))
158321 diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh
158322 index 39edce4f541c..2674ba20d524 100755
158323 --- a/tools/testing/selftests/net/mptcp/diag.sh
158324 +++ b/tools/testing/selftests/net/mptcp/diag.sh
158325 @@ -5,8 +5,9 @@ rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
158326  ns="ns1-$rndh"
158327  ksft_skip=4
158328  test_cnt=1
158329 +timeout_poll=100
158330 +timeout_test=$((timeout_poll * 2 + 1))
158331  ret=0
158332 -pids=()
158334  flush_pids()
158336 @@ -14,18 +15,14 @@ flush_pids()
158337         # give it some time
158338         sleep 1.1
158340 -       for pid in ${pids[@]}; do
158341 -               [ -d /proc/$pid ] && kill -SIGUSR1 $pid >/dev/null 2>&1
158342 -       done
158343 -       pids=()
158344 +       ip netns pids "${ns}" | xargs --no-run-if-empty kill -SIGUSR1 &>/dev/null
158347  cleanup()
158349 +       ip netns pids "${ns}" | xargs --no-run-if-empty kill -SIGKILL &>/dev/null
158351         ip netns del $ns
158352 -       for pid in ${pids[@]}; do
158353 -               [ -d /proc/$pid ] && kill -9 $pid >/dev/null 2>&1
158354 -       done
158357  ip -Version > /dev/null 2>&1
158358 @@ -79,39 +76,57 @@ trap cleanup EXIT
158359  ip netns add $ns
158360  ip -n $ns link set dev lo up
158362 -echo "a" | ip netns exec $ns ./mptcp_connect -p 10000 -l 0.0.0.0 -t 100 >/dev/null &
158363 +echo "a" | \
158364 +       timeout ${timeout_test} \
158365 +               ip netns exec $ns \
158366 +                       ./mptcp_connect -p 10000 -l -t ${timeout_poll} \
158367 +                               0.0.0.0 >/dev/null &
158368  sleep 0.1
158369 -pids[0]=$!
158370  chk_msk_nr 0 "no msk on netns creation"
158372 -echo "b" | ip netns exec $ns ./mptcp_connect -p 10000 127.0.0.1 -j -t 100 >/dev/null &
158373 +echo "b" | \
158374 +       timeout ${timeout_test} \
158375 +               ip netns exec $ns \
158376 +                       ./mptcp_connect -p 10000 -j -t ${timeout_poll} \
158377 +                               127.0.0.1 >/dev/null &
158378  sleep 0.1
158379 -pids[1]=$!
158380  chk_msk_nr 2 "after MPC handshake "
158381  chk_msk_remote_key_nr 2 "....chk remote_key"
158382  chk_msk_fallback_nr 0 "....chk no fallback"
158383  flush_pids
158386 -echo "a" | ip netns exec $ns ./mptcp_connect -p 10001 -s TCP -l 0.0.0.0 -t 100 >/dev/null &
158387 -pids[0]=$!
158388 +echo "a" | \
158389 +       timeout ${timeout_test} \
158390 +               ip netns exec $ns \
158391 +                       ./mptcp_connect -p 10001 -l -s TCP -t ${timeout_poll} \
158392 +                               0.0.0.0 >/dev/null &
158393  sleep 0.1
158394 -echo "b" | ip netns exec $ns ./mptcp_connect -p 10001 127.0.0.1 -j -t 100 >/dev/null &
158395 -pids[1]=$!
158396 +echo "b" | \
158397 +       timeout ${timeout_test} \
158398 +               ip netns exec $ns \
158399 +                       ./mptcp_connect -p 10001 -j -t ${timeout_poll} \
158400 +                               127.0.0.1 >/dev/null &
158401  sleep 0.1
158402  chk_msk_fallback_nr 1 "check fallback"
158403  flush_pids
158405  NR_CLIENTS=100
158406  for I in `seq 1 $NR_CLIENTS`; do
158407 -       echo "a" | ip netns exec $ns ./mptcp_connect -p $((I+10001)) -l 0.0.0.0 -t 100 -w 10 >/dev/null  &
158408 -       pids[$((I*2))]=$!
158409 +       echo "a" | \
158410 +               timeout ${timeout_test} \
158411 +                       ip netns exec $ns \
158412 +                               ./mptcp_connect -p $((I+10001)) -l -w 10 \
158413 +                                       -t ${timeout_poll} 0.0.0.0 >/dev/null &
158414  done
158415  sleep 0.1
158417  for I in `seq 1 $NR_CLIENTS`; do
158418 -       echo "b" | ip netns exec $ns ./mptcp_connect -p $((I+10001)) 127.0.0.1 -t 100 -w 10 >/dev/null &
158419 -       pids[$((I*2 + 1))]=$!
158420 +       echo "b" | \
158421 +               timeout ${timeout_test} \
158422 +                       ip netns exec $ns \
158423 +                               ./mptcp_connect -p $((I+10001)) -w 10 \
158424 +                                       -t ${timeout_poll} 127.0.0.1 >/dev/null &
158425  done
158426  sleep 1.5
158428 diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
158429 index 10a030b53b23..65b3b983efc2 100755
158430 --- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
158431 +++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
158432 @@ -11,7 +11,8 @@ cin=""
158433  cout=""
158434  ksft_skip=4
158435  capture=false
158436 -timeout=30
158437 +timeout_poll=30
158438 +timeout_test=$((timeout_poll * 2 + 1))
158439  ipv6=true
158440  ethtool_random_on=true
158441  tc_delay="$((RANDOM%50))"
158442 @@ -273,7 +274,7 @@ check_mptcp_disabled()
158443         ip netns exec ${disabled_ns} sysctl -q net.mptcp.enabled=0
158445         local err=0
158446 -       LANG=C ip netns exec ${disabled_ns} ./mptcp_connect -t $timeout -p 10000 -s MPTCP 127.0.0.1 < "$cin" 2>&1 | \
158447 +       LANG=C ip netns exec ${disabled_ns} ./mptcp_connect -p 10000 -s MPTCP 127.0.0.1 < "$cin" 2>&1 | \
158448                 grep -q "^socket: Protocol not available$" && err=1
158449         ip netns delete ${disabled_ns}
158451 @@ -430,14 +431,20 @@ do_transfer()
158452         local stat_cookietx_last=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesSent")
158453         local stat_cookierx_last=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesRecv")
158455 -       ip netns exec ${listener_ns} ./mptcp_connect -t $timeout -l -p $port -s ${srv_proto} $extra_args $local_addr < "$sin" > "$sout" &
158456 +       timeout ${timeout_test} \
158457 +               ip netns exec ${listener_ns} \
158458 +                       ./mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
158459 +                               $extra_args $local_addr < "$sin" > "$sout" &
158460         local spid=$!
158462         wait_local_port_listen "${listener_ns}" "${port}"
158464         local start
158465         start=$(date +%s%3N)
158466 -       ip netns exec ${connector_ns} ./mptcp_connect -t $timeout -p $port -s ${cl_proto} $extra_args $connect_addr < "$cin" > "$cout" &
158467 +       timeout ${timeout_test} \
158468 +               ip netns exec ${connector_ns} \
158469 +                       ./mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
158470 +                               $extra_args $connect_addr < "$cin" > "$cout" &
158471         local cpid=$!
158473         wait $cpid
158474 diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
158475 index ad32240fbfda..43ed99de7734 100755
158476 --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
158477 +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
158478 @@ -8,7 +8,8 @@ cin=""
158479  cinsent=""
158480  cout=""
158481  ksft_skip=4
158482 -timeout=30
158483 +timeout_poll=30
158484 +timeout_test=$((timeout_poll * 2 + 1))
158485  mptcp_connect=""
158486  capture=0
158487  do_all_tests=1
158488 @@ -245,17 +246,26 @@ do_transfer()
158489                 local_addr="0.0.0.0"
158490         fi
158492 -       ip netns exec ${listener_ns} $mptcp_connect -t $timeout -l -p $port \
158493 -               -s ${srv_proto} ${local_addr} < "$sin" > "$sout" &
158494 +       timeout ${timeout_test} \
158495 +               ip netns exec ${listener_ns} \
158496 +                       $mptcp_connect -t ${timeout_poll} -l -p $port -s ${srv_proto} \
158497 +                               ${local_addr} < "$sin" > "$sout" &
158498         spid=$!
158500         sleep 1
158502         if [ "$test_link_fail" -eq 0 ];then
158503 -               ip netns exec ${connector_ns} $mptcp_connect -t $timeout -p $port -s ${cl_proto} $connect_addr < "$cin" > "$cout" &
158504 +               timeout ${timeout_test} \
158505 +                       ip netns exec ${connector_ns} \
158506 +                               $mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
158507 +                                       $connect_addr < "$cin" > "$cout" &
158508         else
158509 -               ( cat "$cin" ; sleep 2; link_failure $listener_ns ; cat "$cin" ) | tee "$cinsent" | \
158510 -               ip netns exec ${connector_ns} $mptcp_connect -t $timeout -p $port -s ${cl_proto} $connect_addr > "$cout" &
158511 +               ( cat "$cin" ; sleep 2; link_failure $listener_ns ; cat "$cin" ) | \
158512 +                       tee "$cinsent" | \
158513 +                       timeout ${timeout_test} \
158514 +                               ip netns exec ${connector_ns} \
158515 +                                       $mptcp_connect -t ${timeout_poll} -p $port -s ${cl_proto} \
158516 +                                               $connect_addr > "$cout" &
158517         fi
158518         cpid=$!
158520 diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
158521 index f039ee57eb3c..3aeef3bcb101 100755
158522 --- a/tools/testing/selftests/net/mptcp/simult_flows.sh
158523 +++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
158524 @@ -7,7 +7,8 @@ ns2="ns2-$rndh"
158525  ns3="ns3-$rndh"
158526  capture=false
158527  ksft_skip=4
158528 -timeout=30
158529 +timeout_poll=30
158530 +timeout_test=$((timeout_poll * 2 + 1))
158531  test_cnt=1
158532  ret=0
158533  bail=0
158534 @@ -157,14 +158,20 @@ do_transfer()
158535                 sleep 1
158536         fi
158538 -       ip netns exec ${ns3} ./mptcp_connect -jt $timeout -l -p $port 0.0.0.0 < "$sin" > "$sout" &
158539 +       timeout ${timeout_test} \
158540 +               ip netns exec ${ns3} \
158541 +                       ./mptcp_connect -jt ${timeout_poll} -l -p $port \
158542 +                               0.0.0.0 < "$sin" > "$sout" &
158543         local spid=$!
158545         wait_local_port_listen "${ns3}" "${port}"
158547         local start
158548         start=$(date +%s%3N)
158549 -       ip netns exec ${ns1} ./mptcp_connect -jt $timeout -p $port 10.0.3.3 < "$cin" > "$cout" &
158550 +       timeout ${timeout_test} \
158551 +               ip netns exec ${ns1} \
158552 +                       ./mptcp_connect -jt ${timeout_poll} -p $port \
158553 +                               10.0.3.3 < "$cin" > "$cout" &
158554         local cpid=$!
158556         wait $cpid
158557 diff --git a/tools/testing/selftests/powerpc/security/entry_flush.c b/tools/testing/selftests/powerpc/security/entry_flush.c
158558 index 78cf914fa321..68ce377b205e 100644
158559 --- a/tools/testing/selftests/powerpc/security/entry_flush.c
158560 +++ b/tools/testing/selftests/powerpc/security/entry_flush.c
158561 @@ -53,7 +53,7 @@ int entry_flush_test(void)
158563         entry_flush = entry_flush_orig;
158565 -       fd = perf_event_open_counter(PERF_TYPE_RAW, /* L1d miss */ 0x400f0, -1);
158566 +       fd = perf_event_open_counter(PERF_TYPE_HW_CACHE, PERF_L1D_READ_MISS_CONFIG, -1);
158567         FAIL_IF(fd < 0);
158569         p = (char *)memalign(zero_size, CACHELINE_SIZE);
158570 diff --git a/tools/testing/selftests/powerpc/security/flush_utils.h b/tools/testing/selftests/powerpc/security/flush_utils.h
158571 index 07a5eb301466..7a3d60292916 100644
158572 --- a/tools/testing/selftests/powerpc/security/flush_utils.h
158573 +++ b/tools/testing/selftests/powerpc/security/flush_utils.h
158574 @@ -9,6 +9,10 @@
158576  #define CACHELINE_SIZE 128
158578 +#define PERF_L1D_READ_MISS_CONFIG      ((PERF_COUNT_HW_CACHE_L1D) |            \
158579 +                                       (PERF_COUNT_HW_CACHE_OP_READ << 8) |    \
158580 +                                       (PERF_COUNT_HW_CACHE_RESULT_MISS << 16))
158582  void syscall_loop(char *p, unsigned long iterations,
158583                   unsigned long zero_size);
158585 diff --git a/tools/testing/selftests/powerpc/security/rfi_flush.c b/tools/testing/selftests/powerpc/security/rfi_flush.c
158586 index 7565fd786640..f73484a6470f 100644
158587 --- a/tools/testing/selftests/powerpc/security/rfi_flush.c
158588 +++ b/tools/testing/selftests/powerpc/security/rfi_flush.c
158589 @@ -54,7 +54,7 @@ int rfi_flush_test(void)
158591         rfi_flush = rfi_flush_orig;
158593 -       fd = perf_event_open_counter(PERF_TYPE_RAW, /* L1d miss */ 0x400f0, -1);
158594 +       fd = perf_event_open_counter(PERF_TYPE_HW_CACHE, PERF_L1D_READ_MISS_CONFIG, -1);
158595         FAIL_IF(fd < 0);
158597         p = (char *)memalign(zero_size, CACHELINE_SIZE);
158598 diff --git a/tools/testing/selftests/resctrl/Makefile b/tools/testing/selftests/resctrl/Makefile
158599 index d585cc1948cc..6bcee2ec91a9 100644
158600 --- a/tools/testing/selftests/resctrl/Makefile
158601 +++ b/tools/testing/selftests/resctrl/Makefile
158602 @@ -1,5 +1,5 @@
158603  CC = $(CROSS_COMPILE)gcc
158604 -CFLAGS = -g -Wall
158605 +CFLAGS = -g -Wall -O2 -D_FORTIFY_SOURCE=2
158606  SRCS=$(wildcard *.c)
158607  OBJS=$(SRCS:.c=.o)
158609 diff --git a/tools/testing/selftests/resctrl/cache.c b/tools/testing/selftests/resctrl/cache.c
158610 index 38dbf4962e33..5922cc1b0386 100644
158611 --- a/tools/testing/selftests/resctrl/cache.c
158612 +++ b/tools/testing/selftests/resctrl/cache.c
158613 @@ -182,7 +182,7 @@ int measure_cache_vals(struct resctrl_val_param *param, int bm_pid)
158614         /*
158615          * Measure cache miss from perf.
158616          */
158617 -       if (!strcmp(param->resctrl_val, "cat")) {
158618 +       if (!strncmp(param->resctrl_val, CAT_STR, sizeof(CAT_STR))) {
158619                 ret = get_llc_perf(&llc_perf_miss);
158620                 if (ret < 0)
158621                         return ret;
158622 @@ -192,7 +192,7 @@ int measure_cache_vals(struct resctrl_val_param *param, int bm_pid)
158623         /*
158624          * Measure llc occupancy from resctrl.
158625          */
158626 -       if (!strcmp(param->resctrl_val, "cqm")) {
158627 +       if (!strncmp(param->resctrl_val, CQM_STR, sizeof(CQM_STR))) {
158628                 ret = get_llc_occu_resctrl(&llc_occu_resc);
158629                 if (ret < 0)
158630                         return ret;
158631 @@ -234,7 +234,7 @@ int cat_val(struct resctrl_val_param *param)
158632         if (ret)
158633                 return ret;
158635 -       if ((strcmp(resctrl_val, "cat") == 0)) {
158636 +       if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
158637                 ret = initialize_llc_perf();
158638                 if (ret)
158639                         return ret;
158640 @@ -242,7 +242,7 @@ int cat_val(struct resctrl_val_param *param)
158642         /* Test runs until the callback setup() tells the test to stop. */
158643         while (1) {
158644 -               if (strcmp(resctrl_val, "cat") == 0) {
158645 +               if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
158646                         ret = param->setup(1, param);
158647                         if (ret) {
158648                                 ret = 0;
158649 diff --git a/tools/testing/selftests/resctrl/cat_test.c b/tools/testing/selftests/resctrl/cat_test.c
158650 index 5da43767b973..20823725daca 100644
158651 --- a/tools/testing/selftests/resctrl/cat_test.c
158652 +++ b/tools/testing/selftests/resctrl/cat_test.c
158653 @@ -17,10 +17,10 @@
158654  #define MAX_DIFF_PERCENT       4
158655  #define MAX_DIFF               1000000
158657 -int count_of_bits;
158658 -char cbm_mask[256];
158659 -unsigned long long_mask;
158660 -unsigned long cache_size;
158661 +static int count_of_bits;
158662 +static char cbm_mask[256];
158663 +static unsigned long long_mask;
158664 +static unsigned long cache_size;
158667   * Change schemata. Write schemata to specified
158668 @@ -136,7 +136,7 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
158669                 return -1;
158671         /* Get default cbm mask for L3/L2 cache */
158672 -       ret = get_cbm_mask(cache_type);
158673 +       ret = get_cbm_mask(cache_type, cbm_mask);
158674         if (ret)
158675                 return ret;
158677 @@ -164,7 +164,7 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
158678                 return -1;
158680         struct resctrl_val_param param = {
158681 -               .resctrl_val    = "cat",
158682 +               .resctrl_val    = CAT_STR,
158683                 .cpu_no         = cpu_no,
158684                 .mum_resctrlfs  = 0,
158685                 .setup          = cat_setup,
158686 diff --git a/tools/testing/selftests/resctrl/cqm_test.c b/tools/testing/selftests/resctrl/cqm_test.c
158687 index c8756152bd61..271752e9ef5b 100644
158688 --- a/tools/testing/selftests/resctrl/cqm_test.c
158689 +++ b/tools/testing/selftests/resctrl/cqm_test.c
158690 @@ -16,10 +16,10 @@
158691  #define MAX_DIFF               2000000
158692  #define MAX_DIFF_PERCENT       15
158694 -int count_of_bits;
158695 -char cbm_mask[256];
158696 -unsigned long long_mask;
158697 -unsigned long cache_size;
158698 +static int count_of_bits;
158699 +static char cbm_mask[256];
158700 +static unsigned long long_mask;
158701 +static unsigned long cache_size;
158703  static int cqm_setup(int num, ...)
158705 @@ -86,7 +86,7 @@ static int check_results(struct resctrl_val_param *param, int no_of_bits)
158706                 return errno;
158707         }
158709 -       while (fgets(temp, 1024, fp)) {
158710 +       while (fgets(temp, sizeof(temp), fp)) {
158711                 char *token = strtok(temp, ":\t");
158712                 int fields = 0;
158714 @@ -125,7 +125,7 @@ int cqm_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
158715         if (!validate_resctrl_feature_request("cqm"))
158716                 return -1;
158718 -       ret = get_cbm_mask("L3");
158719 +       ret = get_cbm_mask("L3", cbm_mask);
158720         if (ret)
158721                 return ret;
158723 @@ -145,7 +145,7 @@ int cqm_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
158724         }
158726         struct resctrl_val_param param = {
158727 -               .resctrl_val    = "cqm",
158728 +               .resctrl_val    = CQM_STR,
158729                 .ctrlgrp        = "c1",
158730                 .mongrp         = "m1",
158731                 .cpu_no         = cpu_no,
158732 diff --git a/tools/testing/selftests/resctrl/fill_buf.c b/tools/testing/selftests/resctrl/fill_buf.c
158733 index 79c611c99a3d..51e5cf22632f 100644
158734 --- a/tools/testing/selftests/resctrl/fill_buf.c
158735 +++ b/tools/testing/selftests/resctrl/fill_buf.c
158736 @@ -115,7 +115,7 @@ static int fill_cache_read(unsigned char *start_ptr, unsigned char *end_ptr,
158738         while (1) {
158739                 ret = fill_one_span_read(start_ptr, end_ptr);
158740 -               if (!strcmp(resctrl_val, "cat"))
158741 +               if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)))
158742                         break;
158743         }
158745 @@ -134,7 +134,7 @@ static int fill_cache_write(unsigned char *start_ptr, unsigned char *end_ptr,
158747         while (1) {
158748                 fill_one_span_write(start_ptr, end_ptr);
158749 -               if (!strcmp(resctrl_val, "cat"))
158750 +               if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)))
158751                         break;
158752         }
158754 diff --git a/tools/testing/selftests/resctrl/mba_test.c b/tools/testing/selftests/resctrl/mba_test.c
158755 index 7bf8eaa6204b..6449fbd96096 100644
158756 --- a/tools/testing/selftests/resctrl/mba_test.c
158757 +++ b/tools/testing/selftests/resctrl/mba_test.c
158758 @@ -141,7 +141,7 @@ void mba_test_cleanup(void)
158759  int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd)
158761         struct resctrl_val_param param = {
158762 -               .resctrl_val    = "mba",
158763 +               .resctrl_val    = MBA_STR,
158764                 .ctrlgrp        = "c1",
158765                 .mongrp         = "m1",
158766                 .cpu_no         = cpu_no,
158767 diff --git a/tools/testing/selftests/resctrl/mbm_test.c b/tools/testing/selftests/resctrl/mbm_test.c
158768 index 4700f7453f81..ec6cfe01c9c2 100644
158769 --- a/tools/testing/selftests/resctrl/mbm_test.c
158770 +++ b/tools/testing/selftests/resctrl/mbm_test.c
158771 @@ -114,7 +114,7 @@ void mbm_test_cleanup(void)
158772  int mbm_bw_change(int span, int cpu_no, char *bw_report, char **benchmark_cmd)
158774         struct resctrl_val_param param = {
158775 -               .resctrl_val    = "mbm",
158776 +               .resctrl_val    = MBM_STR,
158777                 .ctrlgrp        = "c1",
158778                 .mongrp         = "m1",
158779                 .span           = span,
158780 diff --git a/tools/testing/selftests/resctrl/resctrl.h b/tools/testing/selftests/resctrl/resctrl.h
158781 index 39bf59c6b9c5..9dcc96e1ad3d 100644
158782 --- a/tools/testing/selftests/resctrl/resctrl.h
158783 +++ b/tools/testing/selftests/resctrl/resctrl.h
158784 @@ -28,6 +28,10 @@
158785  #define RESCTRL_PATH           "/sys/fs/resctrl"
158786  #define PHYS_ID_PATH           "/sys/devices/system/cpu/cpu"
158787  #define CBM_MASK_PATH          "/sys/fs/resctrl/info"
158788 +#define L3_PATH                        "/sys/fs/resctrl/info/L3"
158789 +#define MB_PATH                        "/sys/fs/resctrl/info/MB"
158790 +#define L3_MON_PATH            "/sys/fs/resctrl/info/L3_MON"
158791 +#define L3_MON_FEATURES_PATH   "/sys/fs/resctrl/info/L3_MON/mon_features"
158793  #define PARENT_EXIT(err_msg)                   \
158794         do {                                    \
158795 @@ -62,11 +66,16 @@ struct resctrl_val_param {
158796         int             (*setup)(int num, ...);
158799 -pid_t bm_pid, ppid;
158800 -int tests_run;
158801 +#define MBM_STR                        "mbm"
158802 +#define MBA_STR                        "mba"
158803 +#define CQM_STR                        "cqm"
158804 +#define CAT_STR                        "cat"
158806 -char llc_occup_path[1024];
158807 -bool is_amd;
158808 +extern pid_t bm_pid, ppid;
158809 +extern int tests_run;
158811 +extern char llc_occup_path[1024];
158812 +extern bool is_amd;
158814  bool check_resctrlfs_support(void);
158815  int filter_dmesg(void);
158816 @@ -74,7 +83,7 @@ int remount_resctrlfs(bool mum_resctrlfs);
158817  int get_resource_id(int cpu_no, int *resource_id);
158818  int umount_resctrlfs(void);
158819  int validate_bw_report_request(char *bw_report);
158820 -bool validate_resctrl_feature_request(char *resctrl_val);
158821 +bool validate_resctrl_feature_request(const char *resctrl_val);
158822  char *fgrep(FILE *inf, const char *str);
158823  int taskset_benchmark(pid_t bm_pid, int cpu_no);
158824  void run_benchmark(int signum, siginfo_t *info, void *ucontext);
158825 @@ -92,7 +101,7 @@ void tests_cleanup(void);
158826  void mbm_test_cleanup(void);
158827  int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd);
158828  void mba_test_cleanup(void);
158829 -int get_cbm_mask(char *cache_type);
158830 +int get_cbm_mask(char *cache_type, char *cbm_mask);
158831  int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size);
158832  void ctrlc_handler(int signum, siginfo_t *info, void *ptr);
158833  int cat_val(struct resctrl_val_param *param);
158834 diff --git a/tools/testing/selftests/resctrl/resctrl_tests.c b/tools/testing/selftests/resctrl/resctrl_tests.c
158835 index 425cc85ac883..ac2269610aa9 100644
158836 --- a/tools/testing/selftests/resctrl/resctrl_tests.c
158837 +++ b/tools/testing/selftests/resctrl/resctrl_tests.c
158838 @@ -73,7 +73,7 @@ int main(int argc, char **argv)
158839                 }
158840         }
158842 -       while ((c = getopt(argc_new, argv, "ht:b:")) != -1) {
158843 +       while ((c = getopt(argc_new, argv, "ht:b:n:p:")) != -1) {
158844                 char *token;
158846                 switch (c) {
158847 @@ -85,13 +85,13 @@ int main(int argc, char **argv)
158848                         cqm_test = false;
158849                         cat_test = false;
158850                         while (token) {
158851 -                               if (!strcmp(token, "mbm")) {
158852 +                               if (!strncmp(token, MBM_STR, sizeof(MBM_STR))) {
158853                                         mbm_test = true;
158854 -                               } else if (!strcmp(token, "mba")) {
158855 +                               } else if (!strncmp(token, MBA_STR, sizeof(MBA_STR))) {
158856                                         mba_test = true;
158857 -                               } else if (!strcmp(token, "cqm")) {
158858 +                               } else if (!strncmp(token, CQM_STR, sizeof(CQM_STR))) {
158859                                         cqm_test = true;
158860 -                               } else if (!strcmp(token, "cat")) {
158861 +                               } else if (!strncmp(token, CAT_STR, sizeof(CAT_STR))) {
158862                                         cat_test = true;
158863                                 } else {
158864                                         printf("invalid argument\n");
158865 @@ -161,7 +161,7 @@ int main(int argc, char **argv)
158866         if (!is_amd && mbm_test) {
158867                 printf("# Starting MBM BW change ...\n");
158868                 if (!has_ben)
158869 -                       sprintf(benchmark_cmd[5], "%s", "mba");
158870 +                       sprintf(benchmark_cmd[5], "%s", MBA_STR);
158871                 res = mbm_bw_change(span, cpu_no, bw_report, benchmark_cmd);
158872                 printf("%sok MBM: bw change\n", res ? "not " : "");
158873                 mbm_test_cleanup();
158874 @@ -181,7 +181,7 @@ int main(int argc, char **argv)
158875         if (cqm_test) {
158876                 printf("# Starting CQM test ...\n");
158877                 if (!has_ben)
158878 -                       sprintf(benchmark_cmd[5], "%s", "cqm");
158879 +                       sprintf(benchmark_cmd[5], "%s", CQM_STR);
158880                 res = cqm_resctrl_val(cpu_no, no_of_bits, benchmark_cmd);
158881                 printf("%sok CQM: test\n", res ? "not " : "");
158882                 cqm_test_cleanup();
158883 diff --git a/tools/testing/selftests/resctrl/resctrl_val.c b/tools/testing/selftests/resctrl/resctrl_val.c
158884 index 520fea3606d1..8df557894059 100644
158885 --- a/tools/testing/selftests/resctrl/resctrl_val.c
158886 +++ b/tools/testing/selftests/resctrl/resctrl_val.c
158887 @@ -221,8 +221,8 @@ static int read_from_imc_dir(char *imc_dir, int count)
158888   */
158889  static int num_of_imcs(void)
158891 +       char imc_dir[512], *temp;
158892         unsigned int count = 0;
158893 -       char imc_dir[512];
158894         struct dirent *ep;
158895         int ret;
158896         DIR *dp;
158897 @@ -230,7 +230,25 @@ static int num_of_imcs(void)
158898         dp = opendir(DYN_PMU_PATH);
158899         if (dp) {
158900                 while ((ep = readdir(dp))) {
158901 -                       if (strstr(ep->d_name, UNCORE_IMC)) {
158902 +                       temp = strstr(ep->d_name, UNCORE_IMC);
158903 +                       if (!temp)
158904 +                               continue;
158906 +                       /*
158907 +                        * imc counters are named as "uncore_imc_<n>", hence
158908 +                        * increment the pointer to point to <n>. Note that
158909 +                        * sizeof(UNCORE_IMC) would count for null character as
158910 +                        * well and hence the last underscore character in
158911 +                        * uncore_imc'_' need not be counted.
158912 +                        */
158913 +                       temp = temp + sizeof(UNCORE_IMC);
158915 +                       /*
158916 +                        * Some directories under "DYN_PMU_PATH" could have
158917 +                        * names like "uncore_imc_free_running", hence, check if
158918 +                        * first character is a numerical digit or not.
158919 +                        */
158920 +                       if (temp[0] >= '0' && temp[0] <= '9') {
158921                                 sprintf(imc_dir, "%s/%s/", DYN_PMU_PATH,
158922                                         ep->d_name);
158923                                 ret = read_from_imc_dir(imc_dir, count);
158924 @@ -282,9 +300,9 @@ static int initialize_mem_bw_imc(void)
158925   * Memory B/W utilized by a process on a socket can be calculated using
158926   * iMC counters. Perf events are used to read these counters.
158927   *
158928 - * Return: >= 0 on success. < 0 on failure.
158929 + * Return: = 0 on success. < 0 on failure.
158930   */
158931 -static float get_mem_bw_imc(int cpu_no, char *bw_report)
158932 +static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc)
158934         float reads, writes, of_mul_read, of_mul_write;
158935         int imc, j, ret;
158936 @@ -355,13 +373,18 @@ static float get_mem_bw_imc(int cpu_no, char *bw_report)
158937                 close(imc_counters_config[imc][WRITE].fd);
158938         }
158940 -       if (strcmp(bw_report, "reads") == 0)
158941 -               return reads;
158942 +       if (strcmp(bw_report, "reads") == 0) {
158943 +               *bw_imc = reads;
158944 +               return 0;
158945 +       }
158947 -       if (strcmp(bw_report, "writes") == 0)
158948 -               return writes;
158949 +       if (strcmp(bw_report, "writes") == 0) {
158950 +               *bw_imc = writes;
158951 +               return 0;
158952 +       }
158954 -       return (reads + writes);
158955 +       *bw_imc = reads + writes;
158956 +       return 0;
158959  void set_mbm_path(const char *ctrlgrp, const char *mongrp, int resource_id)
158960 @@ -397,10 +420,10 @@ static void initialize_mem_bw_resctrl(const char *ctrlgrp, const char *mongrp,
158961                 return;
158962         }
158964 -       if (strcmp(resctrl_val, "mbm") == 0)
158965 +       if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)))
158966                 set_mbm_path(ctrlgrp, mongrp, resource_id);
158968 -       if ((strcmp(resctrl_val, "mba") == 0)) {
158969 +       if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
158970                 if (ctrlgrp)
158971                         sprintf(mbm_total_path, CON_MBM_LOCAL_BYTES_PATH,
158972                                 RESCTRL_PATH, ctrlgrp, resource_id);
158973 @@ -420,9 +443,8 @@ static void initialize_mem_bw_resctrl(const char *ctrlgrp, const char *mongrp,
158974   * 1. If con_mon grp is given, then read from it
158975   * 2. If con_mon grp is not given, then read from root con_mon grp
158976   */
158977 -static unsigned long get_mem_bw_resctrl(void)
158978 +static int get_mem_bw_resctrl(unsigned long *mbm_total)
158980 -       unsigned long mbm_total = 0;
158981         FILE *fp;
158983         fp = fopen(mbm_total_path, "r");
158984 @@ -431,7 +453,7 @@ static unsigned long get_mem_bw_resctrl(void)
158986                 return -1;
158987         }
158988 -       if (fscanf(fp, "%lu", &mbm_total) <= 0) {
158989 +       if (fscanf(fp, "%lu", mbm_total) <= 0) {
158990                 perror("Could not get mbm local bytes");
158991                 fclose(fp);
158993 @@ -439,7 +461,7 @@ static unsigned long get_mem_bw_resctrl(void)
158994         }
158995         fclose(fp);
158997 -       return mbm_total;
158998 +       return 0;
159001  pid_t bm_pid, ppid;
159002 @@ -524,14 +546,15 @@ static void initialize_llc_occu_resctrl(const char *ctrlgrp, const char *mongrp,
159003                 return;
159004         }
159006 -       if (strcmp(resctrl_val, "cqm") == 0)
159007 +       if (!strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
159008                 set_cqm_path(ctrlgrp, mongrp, resource_id);
159011  static int
159012  measure_vals(struct resctrl_val_param *param, unsigned long *bw_resc_start)
159014 -       unsigned long bw_imc, bw_resc, bw_resc_end;
159015 +       unsigned long bw_resc, bw_resc_end;
159016 +       float bw_imc;
159017         int ret;
159019         /*
159020 @@ -541,13 +564,13 @@ measure_vals(struct resctrl_val_param *param, unsigned long *bw_resc_start)
159021          * Compare the two values to validate resctrl value.
159022          * It takes 1sec to measure the data.
159023          */
159024 -       bw_imc = get_mem_bw_imc(param->cpu_no, param->bw_report);
159025 -       if (bw_imc <= 0)
159026 -               return bw_imc;
159027 +       ret = get_mem_bw_imc(param->cpu_no, param->bw_report, &bw_imc);
159028 +       if (ret < 0)
159029 +               return ret;
159031 -       bw_resc_end = get_mem_bw_resctrl();
159032 -       if (bw_resc_end <= 0)
159033 -               return bw_resc_end;
159034 +       ret = get_mem_bw_resctrl(&bw_resc_end);
159035 +       if (ret < 0)
159036 +               return ret;
159038         bw_resc = (bw_resc_end - *bw_resc_start) / MB;
159039         ret = print_results_bw(param->filename, bm_pid, bw_imc, bw_resc);
159040 @@ -579,8 +602,8 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
159041         if (strcmp(param->filename, "") == 0)
159042                 sprintf(param->filename, "stdio");
159044 -       if ((strcmp(resctrl_val, "mba")) == 0 ||
159045 -           (strcmp(resctrl_val, "mbm")) == 0) {
159046 +       if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)) ||
159047 +           !strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
159048                 ret = validate_bw_report_request(param->bw_report);
159049                 if (ret)
159050                         return ret;
159051 @@ -674,15 +697,15 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
159052         if (ret)
159053                 goto out;
159055 -       if ((strcmp(resctrl_val, "mbm") == 0) ||
159056 -           (strcmp(resctrl_val, "mba") == 0)) {
159057 +       if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
159058 +           !strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
159059                 ret = initialize_mem_bw_imc();
159060                 if (ret)
159061                         goto out;
159063                 initialize_mem_bw_resctrl(param->ctrlgrp, param->mongrp,
159064                                           param->cpu_no, resctrl_val);
159065 -       } else if (strcmp(resctrl_val, "cqm") == 0)
159066 +       } else if (!strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
159067                 initialize_llc_occu_resctrl(param->ctrlgrp, param->mongrp,
159068                                             param->cpu_no, resctrl_val);
159070 @@ -710,8 +733,8 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
159072         /* Test runs until the callback setup() tells the test to stop. */
159073         while (1) {
159074 -               if ((strcmp(resctrl_val, "mbm") == 0) ||
159075 -                   (strcmp(resctrl_val, "mba") == 0)) {
159076 +               if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
159077 +                   !strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
159078                         ret = param->setup(1, param);
159079                         if (ret) {
159080                                 ret = 0;
159081 @@ -721,7 +744,7 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
159082                         ret = measure_vals(param, &bw_resc_start);
159083                         if (ret)
159084                                 break;
159085 -               } else if (strcmp(resctrl_val, "cqm") == 0) {
159086 +               } else if (!strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR))) {
159087                         ret = param->setup(1, param);
159088                         if (ret) {
159089                                 ret = 0;
159090 diff --git a/tools/testing/selftests/resctrl/resctrlfs.c b/tools/testing/selftests/resctrl/resctrlfs.c
159091 index 19c0ec4045a4..b57170f53861 100644
159092 --- a/tools/testing/selftests/resctrl/resctrlfs.c
159093 +++ b/tools/testing/selftests/resctrl/resctrlfs.c
159094 @@ -49,8 +49,6 @@ static int find_resctrl_mount(char *buffer)
159095         return -ENOENT;
159098 -char cbm_mask[256];
159101   * remount_resctrlfs - Remount resctrl FS at /sys/fs/resctrl
159102   * @mum_resctrlfs:     Should the resctrl FS be remounted?
159103 @@ -205,16 +203,18 @@ int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size)
159105   * get_cbm_mask - Get cbm mask for given cache
159106   * @cache_type:        Cache level L2/L3
159108 - * Mask is stored in cbm_mask which is global variable.
159109 + * @cbm_mask:  cbm_mask returned as a string
159110   *
159111   * Return: = 0 on success, < 0 on failure.
159112   */
159113 -int get_cbm_mask(char *cache_type)
159114 +int get_cbm_mask(char *cache_type, char *cbm_mask)
159116         char cbm_mask_path[1024];
159117         FILE *fp;
159119 +       if (!cbm_mask)
159120 +               return -1;
159122         sprintf(cbm_mask_path, "%s/%s/cbm_mask", CBM_MASK_PATH, cache_type);
159124         fp = fopen(cbm_mask_path, "r");
159125 @@ -334,7 +334,7 @@ void run_benchmark(int signum, siginfo_t *info, void *ucontext)
159126                 operation = atoi(benchmark_cmd[4]);
159127                 sprintf(resctrl_val, "%s", benchmark_cmd[5]);
159129 -               if (strcmp(resctrl_val, "cqm") != 0)
159130 +               if (strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
159131                         buffer_span = span * MB;
159132                 else
159133                         buffer_span = span;
159134 @@ -459,8 +459,8 @@ int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
159135                 goto out;
159137         /* Create mon grp and write pid into it for "mbm" and "cqm" test */
159138 -       if ((strcmp(resctrl_val, "cqm") == 0) ||
159139 -           (strcmp(resctrl_val, "mbm") == 0)) {
159140 +       if (!strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)) ||
159141 +           !strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
159142                 if (strlen(mongrp)) {
159143                         sprintf(monitorgroup_p, "%s/mon_groups", controlgroup);
159144                         sprintf(monitorgroup, "%s/%s", monitorgroup_p, mongrp);
159145 @@ -505,9 +505,9 @@ int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, char *resctrl_val)
159146         int resource_id, ret = 0;
159147         FILE *fp;
159149 -       if ((strcmp(resctrl_val, "mba") != 0) &&
159150 -           (strcmp(resctrl_val, "cat") != 0) &&
159151 -           (strcmp(resctrl_val, "cqm") != 0))
159152 +       if (strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)) &&
159153 +           strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)) &&
159154 +           strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
159155                 return -ENOENT;
159157         if (!schemata) {
159158 @@ -528,9 +528,10 @@ int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, char *resctrl_val)
159159         else
159160                 sprintf(controlgroup, "%s/schemata", RESCTRL_PATH);
159162 -       if (!strcmp(resctrl_val, "cat") || !strcmp(resctrl_val, "cqm"))
159163 +       if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)) ||
159164 +           !strncmp(resctrl_val, CQM_STR, sizeof(CQM_STR)))
159165                 sprintf(schema, "%s%d%c%s", "L3:", resource_id, '=', schemata);
159166 -       if (strcmp(resctrl_val, "mba") == 0)
159167 +       if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)))
159168                 sprintf(schema, "%s%d%c%s", "MB:", resource_id, '=', schemata);
159170         fp = fopen(controlgroup, "w");
159171 @@ -615,26 +616,56 @@ char *fgrep(FILE *inf, const char *str)
159172   * validate_resctrl_feature_request - Check if requested feature is valid.
159173   * @resctrl_val:       Requested feature
159174   *
159175 - * Return: 0 on success, non-zero on failure
159176 + * Return: True if the feature is supported, else false
159177   */
159178 -bool validate_resctrl_feature_request(char *resctrl_val)
159179 +bool validate_resctrl_feature_request(const char *resctrl_val)
159181 -       FILE *inf = fopen("/proc/cpuinfo", "r");
159182 +       struct stat statbuf;
159183         bool found = false;
159184         char *res;
159185 +       FILE *inf;
159187 -       if (!inf)
159188 +       if (!resctrl_val)
159189                 return false;
159191 -       res = fgrep(inf, "flags");
159193 -       if (res) {
159194 -               char *s = strchr(res, ':');
159195 +       if (remount_resctrlfs(false))
159196 +               return false;
159198 -               found = s && !strstr(s, resctrl_val);
159199 -               free(res);
159200 +       if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
159201 +               if (!stat(L3_PATH, &statbuf))
159202 +                       return true;
159203 +       } else if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
159204 +               if (!stat(MB_PATH, &statbuf))
159205 +                       return true;
159206 +       } else if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
159207 +                  !strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
159208 +               if (!stat(L3_MON_PATH, &statbuf)) {
159209 +                       inf = fopen(L3_MON_FEATURES_PATH, "r");
159210 +                       if (!inf)
159211 +                               return false;
159213 +                       if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
159214 +                               res = fgrep(inf, "llc_occupancy");
159215 +                               if (res) {
159216 +                                       found = true;
159217 +                                       free(res);
159218 +                               }
159219 +                       }
159221 +                       if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
159222 +                               res = fgrep(inf, "mbm_total_bytes");
159223 +                               if (res) {
159224 +                                       free(res);
159225 +                                       res = fgrep(inf, "mbm_local_bytes");
159226 +                                       if (res) {
159227 +                                               found = true;
159228 +                                               free(res);
159229 +                                       }
159230 +                               }
159231 +                       }
159232 +                       fclose(inf);
159233 +               }
159234         }
159235 -       fclose(inf);
159237         return found;
159239 diff --git a/tools/testing/selftests/x86/thunks_32.S b/tools/testing/selftests/x86/thunks_32.S
159240 index a71d92da8f46..f3f56e681e9f 100644
159241 --- a/tools/testing/selftests/x86/thunks_32.S
159242 +++ b/tools/testing/selftests/x86/thunks_32.S
159243 @@ -45,3 +45,5 @@ call64_from_32:
159244         ret
159246  .size call64_from_32, .-call64_from_32
159248 +.section .note.GNU-stack,"",%progbits
159249 diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
159250 index 62bd908ecd58..f08f5e82460b 100644
159251 --- a/virt/kvm/coalesced_mmio.c
159252 +++ b/virt/kvm/coalesced_mmio.c
159253 @@ -174,21 +174,36 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
159254                                            struct kvm_coalesced_mmio_zone *zone)
159256         struct kvm_coalesced_mmio_dev *dev, *tmp;
159257 +       int r;
159259         if (zone->pio != 1 && zone->pio != 0)
159260                 return -EINVAL;
159262         mutex_lock(&kvm->slots_lock);
159264 -       list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
159265 +       list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) {
159266                 if (zone->pio == dev->zone.pio &&
159267                     coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
159268 -                       kvm_io_bus_unregister_dev(kvm,
159269 +                       r = kvm_io_bus_unregister_dev(kvm,
159270                                 zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
159271                         kvm_iodevice_destructor(&dev->dev);
159273 +                       /*
159274 +                        * On failure, unregister destroys all devices on the
159275 +                        * bus _except_ the target device, i.e. coalesced_zones
159276 +                        * has been modified.  No need to restart the walk as
159277 +                        * there aren't any zones left.
159278 +                        */
159279 +                       if (r)
159280 +                               break;
159281                 }
159282 +       }
159284         mutex_unlock(&kvm->slots_lock);
159286 +       /*
159287 +        * Ignore the result of kvm_io_bus_unregister_dev(), from userspace's
159288 +        * perspective, the coalesced MMIO is most definitely unregistered.
159289 +        */
159290         return 0;
159292 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
159293 index 383df23514b9..5cabc6c748db 100644
159294 --- a/virt/kvm/kvm_main.c
159295 +++ b/virt/kvm/kvm_main.c
159296 @@ -2758,8 +2758,8 @@ static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
159297         if (val < grow_start)
159298                 val = grow_start;
159300 -       if (val > halt_poll_ns)
159301 -               val = halt_poll_ns;
159302 +       if (val > vcpu->kvm->max_halt_poll_ns)
159303 +               val = vcpu->kvm->max_halt_poll_ns;
159305         vcpu->halt_poll_ns = val;
159306  out:
159307 @@ -2838,7 +2838,8 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
159308                                 goto out;
159309                         }
159310                         poll_end = cur = ktime_get();
159311 -               } while (single_task_running() && ktime_before(cur, stop));
159312 +               } while (single_task_running() && !need_resched() &&
159313 +                        ktime_before(cur, stop));
159314         }
159316         prepare_to_rcuwait(&vcpu->wait);
159317 @@ -4486,15 +4487,15 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
159320  /* Caller must hold slots_lock. */
159321 -void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
159322 -                              struct kvm_io_device *dev)
159323 +int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
159324 +                             struct kvm_io_device *dev)
159326         int i, j;
159327         struct kvm_io_bus *new_bus, *bus;
159329         bus = kvm_get_bus(kvm, bus_idx);
159330         if (!bus)
159331 -               return;
159332 +               return 0;
159334         for (i = 0; i < bus->dev_count; i++)
159335                 if (bus->range[i].dev == dev) {
159336 @@ -4502,7 +4503,7 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
159337                 }
159339         if (i == bus->dev_count)
159340 -               return;
159341 +               return 0;
159343         new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
159344                           GFP_KERNEL_ACCOUNT);
159345 @@ -4511,7 +4512,13 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
159346                 new_bus->dev_count--;
159347                 memcpy(new_bus->range + i, bus->range + i + 1,
159348                                 flex_array_size(new_bus, range, new_bus->dev_count - i));
159349 -       } else {
159350 +       }
159352 +       rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
159353 +       synchronize_srcu_expedited(&kvm->srcu);
159355 +       /* Destroy the old bus _after_ installing the (null) bus. */
159356 +       if (!new_bus) {
159357                 pr_err("kvm: failed to shrink bus, removing it completely\n");
159358                 for (j = 0; j < bus->dev_count; j++) {
159359                         if (j == i)
159360 @@ -4520,10 +4527,8 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
159361                 }
159362         }
159364 -       rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
159365 -       synchronize_srcu_expedited(&kvm->srcu);
159366         kfree(bus);
159367 -       return;
159368 +       return new_bus ? 0 : -ENOMEM;
159371  struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,